repo_name
string
path
string
copies
string
size
string
content
string
license
string
Split-Screen/android_kernel_samsung_manta
drivers/parisc/iosapic.c
4847
27607
/* ** I/O Sapic Driver - PCI interrupt line support ** ** (c) Copyright 1999 Grant Grundler ** (c) Copyright 1999 Hewlett-Packard Company ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** The I/O sapic driver manages the Interrupt Redirection Table which is ** the control logic to convert PCI line based interrupts into a Message ** Signaled Interrupt (aka Transaction Based Interrupt, TBI). ** ** Acronyms ** -------- ** HPA Hard Physical Address (aka MMIO address) ** IRQ Interrupt ReQuest. Implies Line based interrupt. ** IRT Interrupt Routing Table (provided by PAT firmware) ** IRdT Interrupt Redirection Table. IRQ line to TXN ADDR/DATA ** table which is implemented in I/O SAPIC. ** ISR Interrupt Service Routine. aka Interrupt handler. ** MSI Message Signaled Interrupt. PCI 2.2 functionality. ** aka Transaction Based Interrupt (or TBI). ** PA Precision Architecture. HP's RISC architecture. ** RISC Reduced Instruction Set Computer. ** ** ** What's a Message Signalled Interrupt? ** ------------------------------------- ** MSI is a write transaction which targets a processor and is similar ** to a processor write to memory or MMIO. MSIs can be generated by I/O ** devices as well as processors and require *architecture* to work. ** ** PA only supports MSI. So I/O subsystems must either natively generate ** MSIs (e.g. GSC or HP-PB) or convert line based interrupts into MSIs ** (e.g. PCI and EISA). IA64 supports MSIs via a "local SAPIC" which ** acts on behalf of a processor. ** ** MSI allows any I/O device to interrupt any processor. This makes ** load balancing of the interrupt processing possible on an SMP platform. ** Interrupts are also ordered WRT to DMA data. It's possible on I/O ** coherent systems to completely eliminate PIO reads from the interrupt ** path. The device and driver must be designed and implemented to ** guarantee all DMA has been issued (issues about atomicity here) ** before the MSI is issued. I/O status can then safely be read from ** DMA'd data by the ISR. ** ** ** PA Firmware ** ----------- ** PA-RISC platforms have two fundamentally different types of firmware. ** For PCI devices, "Legacy" PDC initializes the "INTERRUPT_LINE" register ** and BARs similar to a traditional PC BIOS. ** The newer "PAT" firmware supports PDC calls which return tables. ** PAT firmware only initializes the PCI Console and Boot interface. ** With these tables, the OS can program all other PCI devices. ** ** One such PAT PDC call returns the "Interrupt Routing Table" (IRT). ** The IRT maps each PCI slot's INTA-D "output" line to an I/O SAPIC ** input line. If the IRT is not available, this driver assumes ** INTERRUPT_LINE register has been programmed by firmware. The latter ** case also means online addition of PCI cards can NOT be supported ** even if HW support is present. ** ** All platforms with PAT firmware to date (Oct 1999) use one Interrupt ** Routing Table for the entire platform. ** ** Where's the iosapic? ** -------------------- ** I/O sapic is part of the "Core Electronics Complex". And on HP platforms ** it's integrated as part of the PCI bus adapter, "lba". So no bus walk ** will discover I/O Sapic. I/O Sapic driver learns about each device ** when lba driver advertises the presence of the I/O sapic by calling ** iosapic_register(). ** ** ** IRQ handling notes ** ------------------ ** The IO-SAPIC can indicate to the CPU which interrupt was asserted. ** So, unlike the GSC-ASIC and Dino, we allocate one CPU interrupt per ** IO-SAPIC interrupt and call the device driver's handler directly. ** The IO-SAPIC driver hijacks the CPU interrupt handler so it can ** issue the End Of Interrupt command to the IO-SAPIC. ** ** Overview of exported iosapic functions ** -------------------------------------- ** (caveat: code isn't finished yet - this is just the plan) ** ** iosapic_init: ** o initialize globals (lock, etc) ** o try to read IRT. Presence of IRT determines if this is ** a PAT platform or not. ** ** iosapic_register(): ** o create iosapic_info instance data structure ** o allocate vector_info array for this iosapic ** o initialize vector_info - read corresponding IRdT? ** ** iosapic_xlate_pin: (only called by fixup_irq for PAT platform) ** o intr_pin = read cfg (INTERRUPT_PIN); ** o if (device under PCI-PCI bridge) ** translate slot/pin ** ** iosapic_fixup_irq: ** o if PAT platform (IRT present) ** intr_pin = iosapic_xlate_pin(isi,pcidev): ** intr_line = find IRT entry(isi, PCI_SLOT(pcidev), intr_pin) ** save IRT entry into vector_info later ** write cfg INTERRUPT_LINE (with intr_line)? ** else ** intr_line = pcidev->irq ** IRT pointer = NULL ** endif ** o locate vector_info (needs: isi, intr_line) ** o allocate processor "irq" and get txn_addr/data ** o request_irq(processor_irq, iosapic_interrupt, vector_info,...) ** ** iosapic_enable_irq: ** o clear any pending IRQ on that line ** o enable IRdT - call enable_irq(vector[line]->processor_irq) ** o write EOI in case line is already asserted. ** ** iosapic_disable_irq: ** o disable IRdT - call disable_irq(vector[line]->processor_irq) */ /* FIXME: determine which include files are really needed */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <asm/byteorder.h> /* get in-line asm for swab */ #include <asm/pdc.h> #include <asm/pdcpat.h> #include <asm/page.h> #include <asm/io.h> /* read/write functions */ #ifdef CONFIG_SUPERIO #include <asm/superio.h> #endif #include <asm/ropes.h> #include "./iosapic_private.h" #define MODULE_NAME "iosapic" /* "local" compile flags */ #undef PCI_BRIDGE_FUNCS #undef DEBUG_IOSAPIC #undef DEBUG_IOSAPIC_IRT #ifdef DEBUG_IOSAPIC #define DBG(x...) printk(x) #else /* DEBUG_IOSAPIC */ #define DBG(x...) #endif /* DEBUG_IOSAPIC */ #ifdef DEBUG_IOSAPIC_IRT #define DBG_IRT(x...) printk(x) #else #define DBG_IRT(x...) #endif #ifdef CONFIG_64BIT #define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa)) #else #define COMPARE_IRTE_ADDR(irte, hpa) \ ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL)) #endif #define IOSAPIC_REG_SELECT 0x00 #define IOSAPIC_REG_WINDOW 0x10 #define IOSAPIC_REG_EOI 0x40 #define IOSAPIC_REG_VERSION 0x1 #define IOSAPIC_IRDT_ENTRY(idx) (0x10+(idx)*2) #define IOSAPIC_IRDT_ENTRY_HI(idx) (0x11+(idx)*2) static inline unsigned int iosapic_read(void __iomem *iosapic, unsigned int reg) { writel(reg, iosapic + IOSAPIC_REG_SELECT); return readl(iosapic + IOSAPIC_REG_WINDOW); } static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 val) { writel(reg, iosapic + IOSAPIC_REG_SELECT); writel(val, iosapic + IOSAPIC_REG_WINDOW); } #define IOSAPIC_VERSION_MASK 0x000000ff #define IOSAPIC_VERSION(ver) ((int) (ver & IOSAPIC_VERSION_MASK)) #define IOSAPIC_MAX_ENTRY_MASK 0x00ff0000 #define IOSAPIC_MAX_ENTRY_SHIFT 0x10 #define IOSAPIC_IRDT_MAX_ENTRY(ver) \ (int) (((ver) & IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT) /* bits in the "low" I/O Sapic IRdT entry */ #define IOSAPIC_IRDT_ENABLE 0x10000 #define IOSAPIC_IRDT_PO_LOW 0x02000 #define IOSAPIC_IRDT_LEVEL_TRIG 0x08000 #define IOSAPIC_IRDT_MODE_LPRI 0x00100 /* bits in the "high" I/O Sapic IRdT entry */ #define IOSAPIC_IRDT_ID_EID_SHIFT 0x10 static DEFINE_SPINLOCK(iosapic_lock); static inline void iosapic_eoi(void __iomem *addr, unsigned int data) { __raw_writel(data, addr); } /* ** REVISIT: future platforms may have more than one IRT. ** If so, the following three fields form a structure which ** then be linked into a list. Names are chosen to make searching ** for them easy - not necessarily accurate (eg "cell"). ** ** Alternative: iosapic_info could point to the IRT it's in. ** iosapic_register() could search a list of IRT's. */ static struct irt_entry *irt_cell; static size_t irt_num_entry; static struct irt_entry *iosapic_alloc_irt(int num_entries) { unsigned long a; /* The IRT needs to be 8-byte aligned for the PDC call. * Normally kmalloc would guarantee larger alignment, but * if CONFIG_DEBUG_SLAB is enabled, then we can get only * 4-byte alignment on 32-bit kernels */ a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL); a = (a + 7UL) & ~7UL; return (struct irt_entry *)a; } /** * iosapic_load_irt - Fill in the interrupt routing table * @cell_num: The cell number of the CPU we're currently executing on * @irt: The address to place the new IRT at * @return The number of entries found * * The "Get PCI INT Routing Table Size" option returns the number of * entries in the PCI interrupt routing table for the cell specified * in the cell_number argument. The cell number must be for a cell * within the caller's protection domain. * * The "Get PCI INT Routing Table" option returns, for the cell * specified in the cell_number argument, the PCI interrupt routing * table in the caller allocated memory pointed to by mem_addr. * We assume the IRT only contains entries for I/O SAPIC and * calculate the size based on the size of I/O sapic entries. * * The PCI interrupt routing table entry format is derived from the * IA64 SAL Specification 2.4. The PCI interrupt routing table defines * the routing of PCI interrupt signals between the PCI device output * "pins" and the IO SAPICs' input "lines" (including core I/O PCI * devices). This table does NOT include information for devices/slots * behind PCI to PCI bridges. See PCI to PCI Bridge Architecture Spec. * for the architected method of routing of IRQ's behind PPB's. */ static int __init iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt) { long status; /* PDC return value status */ struct irt_entry *table; /* start of interrupt routing tbl */ unsigned long num_entries = 0UL; BUG_ON(!irt); if (is_pdc_pat()) { /* Use pat pdc routine to get interrupt routing table size */ DBG("calling get_irt_size (cell %ld)\n", cell_num); status = pdc_pat_get_irt_size(&num_entries, cell_num); DBG("get_irt_size: %ld\n", status); BUG_ON(status != PDC_OK); BUG_ON(num_entries == 0); /* ** allocate memory for interrupt routing table ** This interface isn't really right. We are assuming ** the contents of the table are exclusively ** for I/O sapic devices. */ table = iosapic_alloc_irt(num_entries); if (table == NULL) { printk(KERN_WARNING MODULE_NAME ": read_irt : can " "not alloc mem for IRT\n"); return 0; } /* get PCI INT routing table */ status = pdc_pat_get_irt(table, cell_num); DBG("pdc_pat_get_irt: %ld\n", status); WARN_ON(status != PDC_OK); } else { /* ** C3000/J5000 (and similar) platforms with Sprockets PDC ** will return exactly one IRT for all iosapics. ** So if we have one, don't need to get it again. */ if (irt_cell) return 0; /* Should be using the Elroy's HPA, but it's ignored anyway */ status = pdc_pci_irt_size(&num_entries, 0); DBG("pdc_pci_irt_size: %ld\n", status); if (status != PDC_OK) { /* Not a "legacy" system with I/O SAPIC either */ return 0; } BUG_ON(num_entries == 0); table = iosapic_alloc_irt(num_entries); if (!table) { printk(KERN_WARNING MODULE_NAME ": read_irt : can " "not alloc mem for IRT\n"); return 0; } /* HPA ignored by this call too. */ status = pdc_pci_irt(num_entries, 0, table); BUG_ON(status != PDC_OK); } /* return interrupt table address */ *irt = table; #ifdef DEBUG_IOSAPIC_IRT { struct irt_entry *p = table; int i; printk(MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num); printk(MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n", table, num_entries, (int) sizeof(struct irt_entry)); for (i = 0 ; i < num_entries ; i++, p++) { printk(MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n", p->entry_type, p->entry_length, p->interrupt_type, p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id, p->src_seg_id, p->dest_iosapic_intin, ((u32 *) p)[2], ((u32 *) p)[3] ); } } #endif /* DEBUG_IOSAPIC_IRT */ return num_entries; } void __init iosapic_init(void) { unsigned long cell = 0; DBG("iosapic_init()\n"); #ifdef __LP64__ if (is_pdc_pat()) { int status; struct pdc_pat_cell_num cell_info; status = pdc_pat_cell_get_number(&cell_info); if (status == PDC_OK) { cell = cell_info.cell_num; } } #endif /* get interrupt routing table for this cell */ irt_num_entry = iosapic_load_irt(cell, &irt_cell); if (irt_num_entry == 0) irt_cell = NULL; /* old PDC w/o iosapic */ } /* ** Return the IRT entry in case we need to look something else up. */ static struct irt_entry * irt_find_irqline(struct iosapic_info *isi, u8 slot, u8 intr_pin) { struct irt_entry *i = irt_cell; int cnt; /* track how many entries we've looked at */ u8 irq_devno = (slot << IRT_DEV_SHIFT) | (intr_pin-1); DBG_IRT("irt_find_irqline() SLOT %d pin %d\n", slot, intr_pin); for (cnt=0; cnt < irt_num_entry; cnt++, i++) { /* ** Validate: entry_type, entry_length, interrupt_type ** ** Difference between validate vs compare is the former ** should print debug info and is not expected to "fail" ** on current platforms. */ if (i->entry_type != IRT_IOSAPIC_TYPE) { DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d type %d\n", i, cnt, i->entry_type); continue; } if (i->entry_length != IRT_IOSAPIC_LENGTH) { DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d length %d\n", i, cnt, i->entry_length); continue; } if (i->interrupt_type != IRT_VECTORED_INTR) { DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d interrupt_type %d\n", i, cnt, i->interrupt_type); continue; } if (!COMPARE_IRTE_ADDR(i, isi->isi_hpa)) continue; if ((i->src_bus_irq_devno & IRT_IRQ_DEVNO_MASK) != irq_devno) continue; /* ** Ignore: src_bus_id and rc_seg_id correlate with ** iosapic_info->isi_hpa on HP platforms. ** If needed, pass in "PFA" (aka config space addr) ** instead of slot. */ /* Found it! */ return i; } printk(KERN_WARNING MODULE_NAME ": 0x%lx : no IRT entry for slot %d, pin %d\n", isi->isi_hpa, slot, intr_pin); return NULL; } /* ** xlate_pin() supports the skewing of IRQ lines done by subsidiary bridges. ** Legacy PDC already does this translation for us and stores it in INTR_LINE. ** ** PAT PDC needs to basically do what legacy PDC does: ** o read PIN ** o adjust PIN in case device is "behind" a PPB ** (eg 4-port 100BT and SCSI/LAN "Combo Card") ** o convert slot/pin to I/O SAPIC input line. ** ** HP platforms only support: ** o one level of skewing for any number of PPBs ** o only support PCI-PCI Bridges. */ static struct irt_entry * iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev) { u8 intr_pin, intr_slot; pci_read_config_byte(pcidev, PCI_INTERRUPT_PIN, &intr_pin); DBG_IRT("iosapic_xlate_pin(%s) SLOT %d pin %d\n", pcidev->slot_name, PCI_SLOT(pcidev->devfn), intr_pin); if (intr_pin == 0) { /* The device does NOT support/use IRQ lines. */ return NULL; } /* Check if pcidev behind a PPB */ if (pcidev->bus->parent) { /* Convert pcidev INTR_PIN into something we ** can lookup in the IRT. */ #ifdef PCI_BRIDGE_FUNCS /* ** Proposal #1: ** ** call implementation specific translation function ** This is architecturally "cleaner". HP-UX doesn't ** support other secondary bus types (eg. E/ISA) directly. ** May be needed for other processor (eg IA64) architectures ** or by some ambitous soul who wants to watch TV. */ if (pci_bridge_funcs->xlate_intr_line) { intr_pin = pci_bridge_funcs->xlate_intr_line(pcidev); } #else /* PCI_BRIDGE_FUNCS */ struct pci_bus *p = pcidev->bus; /* ** Proposal #2: ** The "pin" is skewed ((pin + dev - 1) % 4). ** ** This isn't very clean since I/O SAPIC must assume: ** - all platforms only have PCI busses. ** - only PCI-PCI bridge (eg not PCI-EISA, PCI-PCMCIA) ** - IRQ routing is only skewed once regardless of ** the number of PPB's between iosapic and device. ** (Bit3 expansion chassis follows this rule) ** ** Advantage is it's really easy to implement. */ intr_pin = pci_swizzle_interrupt_pin(pcidev, intr_pin); #endif /* PCI_BRIDGE_FUNCS */ /* * Locate the host slot of the PPB. */ while (p->parent->parent) p = p->parent; intr_slot = PCI_SLOT(p->self->devfn); } else { intr_slot = PCI_SLOT(pcidev->devfn); } DBG_IRT("iosapic_xlate_pin: bus %d slot %d pin %d\n", pcidev->bus->secondary, intr_slot, intr_pin); return irt_find_irqline(isi, intr_slot, intr_pin); } static void iosapic_rd_irt_entry(struct vector_info *vi , u32 *dp0, u32 *dp1) { struct iosapic_info *isp = vi->iosapic; u8 idx = vi->irqline; *dp0 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY(idx)); *dp1 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY_HI(idx)); } static void iosapic_wr_irt_entry(struct vector_info *vi, u32 dp0, u32 dp1) { struct iosapic_info *isp = vi->iosapic; DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %lx 0x%x 0x%x\n", vi->irqline, isp->isi_hpa, dp0, dp1); iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY(vi->irqline), dp0); /* Read the window register to flush the writes down to HW */ dp0 = readl(isp->addr+IOSAPIC_REG_WINDOW); iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY_HI(vi->irqline), dp1); /* Read the window register to flush the writes down to HW */ dp1 = readl(isp->addr+IOSAPIC_REG_WINDOW); } /* ** set_irt prepares the data (dp0, dp1) according to the vector_info ** and target cpu (id_eid). dp0/dp1 are then used to program I/O SAPIC ** IRdT for the given "vector" (aka IRQ line). */ static void iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1) { u32 mode = 0; struct irt_entry *p = vi->irte; if ((p->polarity_trigger & IRT_PO_MASK) == IRT_ACTIVE_LO) mode |= IOSAPIC_IRDT_PO_LOW; if (((p->polarity_trigger >> IRT_EL_SHIFT) & IRT_EL_MASK) == IRT_LEVEL_TRIG) mode |= IOSAPIC_IRDT_LEVEL_TRIG; /* ** IA64 REVISIT ** PA doesn't support EXTINT or LPRIO bits. */ *dp0 = mode | (u32) vi->txn_data; /* ** Extracting id_eid isn't a real clean way of getting it. ** But the encoding is the same for both PA and IA64 platforms. */ if (is_pdc_pat()) { /* ** PAT PDC just hands it to us "right". ** txn_addr comes from cpu_data[x].txn_addr. */ *dp1 = (u32) (vi->txn_addr); } else { /* ** eg if base_addr == 0xfffa0000), ** we want to get 0xa0ff0000. ** ** eid 0x0ff00000 -> 0x00ff0000 ** id 0x000ff000 -> 0xff000000 */ *dp1 = (((u32)vi->txn_addr & 0x0ff00000) >> 4) | (((u32)vi->txn_addr & 0x000ff000) << 12); } DBG_IRT("iosapic_set_irt_data(): 0x%x 0x%x\n", *dp0, *dp1); } static void iosapic_mask_irq(struct irq_data *d) { unsigned long flags; struct vector_info *vi = irq_data_get_irq_chip_data(d); u32 d0, d1; spin_lock_irqsave(&iosapic_lock, flags); iosapic_rd_irt_entry(vi, &d0, &d1); d0 |= IOSAPIC_IRDT_ENABLE; iosapic_wr_irt_entry(vi, d0, d1); spin_unlock_irqrestore(&iosapic_lock, flags); } static void iosapic_unmask_irq(struct irq_data *d) { struct vector_info *vi = irq_data_get_irq_chip_data(d); u32 d0, d1; /* data is initialized by fixup_irq */ WARN_ON(vi->txn_irq == 0); iosapic_set_irt_data(vi, &d0, &d1); iosapic_wr_irt_entry(vi, d0, d1); #ifdef DEBUG_IOSAPIC_IRT { u32 *t = (u32 *) ((ulong) vi->eoi_addr & ~0xffUL); printk("iosapic_enable_irq(): regs %p", vi->eoi_addr); for ( ; t < vi->eoi_addr; t++) printk(" %x", readl(t)); printk("\n"); } printk("iosapic_enable_irq(): sel "); { struct iosapic_info *isp = vi->iosapic; for (d0=0x10; d0<0x1e; d0++) { d1 = iosapic_read(isp->addr, d0); printk(" %x", d1); } } printk("\n"); #endif /* * Issuing I/O SAPIC an EOI causes an interrupt IFF IRQ line is * asserted. IRQ generally should not be asserted when a driver * enables their IRQ. It can lead to "interesting" race conditions * in the driver initialization sequence. */ DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", d->irq, vi->eoi_addr, vi->eoi_data); iosapic_eoi(vi->eoi_addr, vi->eoi_data); } static void iosapic_eoi_irq(struct irq_data *d) { struct vector_info *vi = irq_data_get_irq_chip_data(d); iosapic_eoi(vi->eoi_addr, vi->eoi_data); cpu_eoi_irq(d); } #ifdef CONFIG_SMP static int iosapic_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, bool force) { struct vector_info *vi = irq_data_get_irq_chip_data(d); u32 d0, d1, dummy_d0; unsigned long flags; int dest_cpu; dest_cpu = cpu_check_affinity(d, dest); if (dest_cpu < 0) return -1; cpumask_copy(d->affinity, cpumask_of(dest_cpu)); vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); spin_lock_irqsave(&iosapic_lock, flags); /* d1 contains the destination CPU, so only want to set that * entry */ iosapic_rd_irt_entry(vi, &d0, &d1); iosapic_set_irt_data(vi, &dummy_d0, &d1); iosapic_wr_irt_entry(vi, d0, d1); spin_unlock_irqrestore(&iosapic_lock, flags); return 0; } #endif static struct irq_chip iosapic_interrupt_type = { .name = "IO-SAPIC-level", .irq_unmask = iosapic_unmask_irq, .irq_mask = iosapic_mask_irq, .irq_ack = cpu_ack_irq, .irq_eoi = iosapic_eoi_irq, #ifdef CONFIG_SMP .irq_set_affinity = iosapic_set_affinity_irq, #endif }; int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) { struct iosapic_info *isi = isi_obj; struct irt_entry *irte = NULL; /* only used if PAT PDC */ struct vector_info *vi; int isi_line; /* line used by device */ if (!isi) { printk(KERN_WARNING MODULE_NAME ": hpa not registered for %s\n", pci_name(pcidev)); return -1; } #ifdef CONFIG_SUPERIO /* * HACK ALERT! (non-compliant PCI device support) * * All SuckyIO interrupts are routed through the PIC's on function 1. * But SuckyIO OHCI USB controller gets an IRT entry anyway because * it advertises INT D for INT_PIN. Use that IRT entry to get the * SuckyIO interrupt routing for PICs on function 1 (*BLEECCHH*). */ if (is_superio_device(pcidev)) { /* We must call superio_fixup_irq() to register the pdev */ pcidev->irq = superio_fixup_irq(pcidev); /* Don't return if need to program the IOSAPIC's IRT... */ if (PCI_FUNC(pcidev->devfn) != SUPERIO_USB_FN) return pcidev->irq; } #endif /* CONFIG_SUPERIO */ /* lookup IRT entry for isi/slot/pin set */ irte = iosapic_xlate_pin(isi, pcidev); if (!irte) { printk("iosapic: no IRTE for %s (IRQ not connected?)\n", pci_name(pcidev)); return -1; } DBG_IRT("iosapic_fixup_irq(): irte %p %x %x %x %x %x %x %x %x\n", irte, irte->entry_type, irte->entry_length, irte->polarity_trigger, irte->src_bus_irq_devno, irte->src_bus_id, irte->src_seg_id, irte->dest_iosapic_intin, (u32) irte->dest_iosapic_addr); isi_line = irte->dest_iosapic_intin; /* get vector info for this input line */ vi = isi->isi_vector + isi_line; DBG_IRT("iosapic_fixup_irq: line %d vi 0x%p\n", isi_line, vi); /* If this IRQ line has already been setup, skip it */ if (vi->irte) goto out; vi->irte = irte; /* * Allocate processor IRQ * * XXX/FIXME The txn_alloc_irq() code and related code should be * moved to enable_irq(). That way we only allocate processor IRQ * bits for devices that actually have drivers claiming them. * Right now we assign an IRQ to every PCI device present, * regardless of whether it's used or not. */ vi->txn_irq = txn_alloc_irq(8); if (vi->txn_irq < 0) panic("I/O sapic: couldn't get TXN IRQ\n"); /* enable_irq() will use txn_* to program IRdT */ vi->txn_addr = txn_alloc_addr(vi->txn_irq); vi->txn_data = txn_alloc_data(vi->txn_irq); vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; vi->eoi_data = cpu_to_le32(vi->txn_data); cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); out: pcidev->irq = vi->txn_irq; DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n", PCI_SLOT(pcidev->devfn), PCI_FUNC(pcidev->devfn), pcidev->vendor, pcidev->device, isi_line, pcidev->irq); return pcidev->irq; } /* ** squirrel away the I/O Sapic Version */ static unsigned int iosapic_rd_version(struct iosapic_info *isi) { return iosapic_read(isi->addr, IOSAPIC_REG_VERSION); } /* ** iosapic_register() is called by "drivers" with an integrated I/O SAPIC. ** Caller must be certain they have an I/O SAPIC and know its MMIO address. ** ** o allocate iosapic_info and add it to the list ** o read iosapic version and squirrel that away ** o read size of IRdT. ** o allocate and initialize isi_vector[] ** o allocate irq region */ void *iosapic_register(unsigned long hpa) { struct iosapic_info *isi = NULL; struct irt_entry *irte = irt_cell; struct vector_info *vip; int cnt; /* track how many entries we've looked at */ /* * Astro based platforms can only support PCI OLARD if they implement * PAT PDC. Legacy PDC omits LBAs with no PCI devices from the IRT. * Search the IRT and ignore iosapic's which aren't in the IRT. */ for (cnt=0; cnt < irt_num_entry; cnt++, irte++) { WARN_ON(IRT_IOSAPIC_TYPE != irte->entry_type); if (COMPARE_IRTE_ADDR(irte, hpa)) break; } if (cnt >= irt_num_entry) { DBG("iosapic_register() ignoring 0x%lx (NOT FOUND)\n", hpa); return NULL; } isi = kzalloc(sizeof(struct iosapic_info), GFP_KERNEL); if (!isi) { BUG(); return NULL; } isi->addr = ioremap_nocache(hpa, 4096); isi->isi_hpa = hpa; isi->isi_version = iosapic_rd_version(isi); isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1; vip = isi->isi_vector = kcalloc(isi->isi_num_vectors, sizeof(struct vector_info), GFP_KERNEL); if (vip == NULL) { kfree(isi); return NULL; } for (cnt=0; cnt < isi->isi_num_vectors; cnt++, vip++) { vip->irqline = (unsigned char) cnt; vip->iosapic = isi; } return isi; } #ifdef DEBUG_IOSAPIC static void iosapic_prt_irt(void *irt, long num_entry) { unsigned int i, *irp = (unsigned int *) irt; printk(KERN_DEBUG MODULE_NAME ": Interrupt Routing Table (%lx entries)\n", num_entry); for (i=0; i<num_entry; i++, irp += 4) { printk(KERN_DEBUG "%p : %2d %.8x %.8x %.8x %.8x\n", irp, i, irp[0], irp[1], irp[2], irp[3]); } } static void iosapic_prt_vi(struct vector_info *vi) { printk(KERN_DEBUG MODULE_NAME ": vector_info[%d] is at %p\n", vi->irqline, vi); printk(KERN_DEBUG "\t\tstatus: %.4x\n", vi->status); printk(KERN_DEBUG "\t\ttxn_irq: %d\n", vi->txn_irq); printk(KERN_DEBUG "\t\ttxn_addr: %lx\n", vi->txn_addr); printk(KERN_DEBUG "\t\ttxn_data: %lx\n", vi->txn_data); printk(KERN_DEBUG "\t\teoi_addr: %p\n", vi->eoi_addr); printk(KERN_DEBUG "\t\teoi_data: %x\n", vi->eoi_data); } static void iosapic_prt_isi(struct iosapic_info *isi) { printk(KERN_DEBUG MODULE_NAME ": io_sapic_info at %p\n", isi); printk(KERN_DEBUG "\t\tisi_hpa: %lx\n", isi->isi_hpa); printk(KERN_DEBUG "\t\tisi_status: %x\n", isi->isi_status); printk(KERN_DEBUG "\t\tisi_version: %x\n", isi->isi_version); printk(KERN_DEBUG "\t\tisi_vector: %p\n", isi->isi_vector); } #endif /* DEBUG_IOSAPIC */
gpl-2.0
crpalmer/dna-kernel-plus-upstream
arch/ia64/sn/pci/pci_dma.c
6895
13123
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. * * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for * a description of how these routines should be used. */ #include <linux/gfp.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <asm/dma.h> #include <asm/sn/intr.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/sn_sal.h> #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) /** * sn_dma_supported - test a DMA mask * @dev: device to test * @mask: DMA mask to test * * Return whether the given PCI device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during PCI bus mastering, then you would pass 0x00ffffff as the mask to * this function. Of course, SN only supports devices that have 32 or more * address bits when using the PMU. */ static int sn_dma_supported(struct device *dev, u64 mask) { BUG_ON(dev->bus != &pci_bus_type); if (mask < 0x7fffffff) return 0; return 1; } /** * sn_dma_set_mask - set the DMA mask * @dev: device to set * @dma_mask: new mask * * Set @dev's DMA mask if the hw supports it. */ int sn_dma_set_mask(struct device *dev, u64 dma_mask) { BUG_ON(dev->bus != &pci_bus_type); if (!sn_dma_supported(dev, dma_mask)) return 0; *dev->dma_mask = dma_mask; return 1; } EXPORT_SYMBOL(sn_dma_set_mask); /** * sn_dma_alloc_coherent - allocate memory for coherent DMA * @dev: device to allocate for * @size: size of the region * @dma_handle: DMA (bus) address * @flags: memory allocation flags * * dma_alloc_coherent() returns a pointer to a memory region suitable for * coherent DMA traffic to/from a PCI device. On SN platforms, this means * that @dma_handle will have the %PCIIO_DMA_CMD flag set. * * This interface is usually used for "command" streams (e.g. the command * queue for a SCSI controller). See Documentation/DMA-API.txt for * more information. */ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t flags, struct dma_attrs *attrs) { void *cpuaddr; unsigned long phys_addr; int node; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); /* * Allocate the memory. */ node = pcibus_to_node(pdev->bus); if (likely(node >=0)) { struct page *p = alloc_pages_exact_node(node, flags, get_order(size)); if (likely(p)) cpuaddr = page_address(p); else return NULL; } else cpuaddr = (void *)__get_free_pages(flags, get_order(size)); if (unlikely(!cpuaddr)) return NULL; memset(cpuaddr, 0x0, size); /* physical addr. of the memory we just got */ phys_addr = __pa(cpuaddr); /* * 64 bit address translations should never fail. * 32 bit translations can fail if there are insufficient mapping * resources. */ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); if (!*dma_handle) { printk(KERN_ERR "%s: out of ATEs\n", __func__); free_pages((unsigned long)cpuaddr, get_order(size)); return NULL; } return cpuaddr; } /** * sn_pci_free_coherent - free memory associated with coherent DMAable region * @dev: device to free for * @size: size to free * @cpu_addr: kernel virtual address to free * @dma_handle: DMA address associated with this region * * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping * any associated IOMMU mappings. */ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, struct dma_attrs *attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); provider->dma_unmap(pdev, dma_handle, 0); free_pages((unsigned long)cpu_addr, get_order(size)); } /** * sn_dma_map_single_attrs - map a single page for DMA * @dev: device to map for * @cpu_addr: kernel virtual address of the region to map * @size: size of the region * @direction: DMA direction * @attrs: optional dma attributes * * Map the region pointed to by @cpu_addr for DMA and return the * DMA address. * * We map this to the one step pcibr_dmamap_trans interface rather than * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have * no way of saving the dmamap handle from the alloc to later free * (which is pretty much unacceptable). * * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with * dma_map_consistent() so that writes force a flush of pending DMA. * (See "SGI Altix Architecture Considerations for Linux Device Drivers", * Document Number: 007-4763-001) * * TODO: simplify our interface; * figure out how to save dmamap handle so can use two step. */ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { void *cpu_addr = page_address(page) + offset; dma_addr_t dma_addr; unsigned long phys_addr; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int dmabarr; dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(dev->bus != &pci_bus_type); phys_addr = __pa(cpu_addr); if (dmabarr) dma_addr = provider->dma_map_consistent(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); else dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); if (!dma_addr) { printk(KERN_ERR "%s: out of ATEs\n", __func__); return 0; } return dma_addr; } /** * sn_dma_unmap_single_attrs - unamp a DMA mapped page * @dev: device to sync * @dma_addr: DMA address to sync * @size: size of region * @direction: DMA direction * @attrs: optional dma attributes * * This routine is supposed to sync the DMA region specified * by @dma_handle into the coherence domain. On SN, we're always cache * coherent, so we just need to free any ATEs associated with this mapping. */ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); provider->dma_unmap(pdev, dma_addr, dir); } /** * sn_dma_unmap_sg - unmap a DMA scatterlist * @dev: device to unmap * @sg: scatterlist to unmap * @nhwentries: number of scatterlist entries * @direction: DMA direction * @attrs: optional dma attributes * * Unmap a set of streaming mode DMA translations. */ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, struct dma_attrs *attrs) { int i; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); struct scatterlist *sg; BUG_ON(dev->bus != &pci_bus_type); for_each_sg(sgl, sg, nhwentries, i) { provider->dma_unmap(pdev, sg->dma_address, dir); sg->dma_address = (dma_addr_t) NULL; sg->dma_length = 0; } } /** * sn_dma_map_sg - map a scatterlist for DMA * @dev: device to map for * @sg: scatterlist to map * @nhwentries: number of entries * @direction: direction of the DMA transaction * @attrs: optional dma attributes * * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with * dma_map_consistent() so that writes force a flush of pending DMA. * (See "SGI Altix Architecture Considerations for Linux Device Drivers", * Document Number: 007-4763-001) * * Maps each entry of @sg for DMA. */ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, struct dma_attrs *attrs) { unsigned long phys_addr; struct scatterlist *saved_sg = sgl, *sg; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int i; int dmabarr; dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(dev->bus != &pci_bus_type); /* * Setup a DMA address for each entry in the scatterlist. */ for_each_sg(sgl, sg, nhwentries, i) { dma_addr_t dma_addr; phys_addr = SG_ENT_PHYS_ADDRESS(sg); if (dmabarr) dma_addr = provider->dma_map_consistent(pdev, phys_addr, sg->length, SN_DMA_ADDR_PHYS); else dma_addr = provider->dma_map(pdev, phys_addr, sg->length, SN_DMA_ADDR_PHYS); sg->dma_address = dma_addr; if (!sg->dma_address) { printk(KERN_ERR "%s: out of ATEs\n", __func__); /* * Free any successfully allocated entries. */ if (i > 0) sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); return 0; } sg->dma_length = sg->length; } return nhwentries; } static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } u64 sn_dma_get_required_mask(struct device *dev) { return DMA_BIT_MASK(64); } EXPORT_SYMBOL_GPL(sn_dma_get_required_mask); char *sn_pci_get_legacy_mem(struct pci_bus *bus) { if (!SN_PCIBUS_BUSSOFT(bus)) return ERR_PTR(-ENODEV); return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET); } int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) { unsigned long addr; int ret; struct ia64_sal_retval isrv; /* * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work * around hw issues at the pci bus level. SGI proms older than * 4.10 don't implement this. */ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, pci_domain_nr(bus), bus->number, 0, /* io */ 0, /* read */ port, size, __pa(val)); if (isrv.status == 0) return size; /* * If the above failed, retry using the SAL_PROBE call which should * be present in all proms (but which cannot work round PCI chipset * bugs). This code is retained for compatibility with old * pre-4.10 proms, and should be removed at some point in the future. */ if (!SN_PCIBUS_BUSSOFT(bus)) return -ENODEV; addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; addr += port; ret = ia64_sn_probe_mem(addr, (long)size, (void *)val); if (ret == 2) return -EINVAL; if (ret == 1) *val = -1; return size; } int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) { int ret = size; unsigned long paddr; unsigned long *addr; struct ia64_sal_retval isrv; /* * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work * around hw issues at the pci bus level. SGI proms older than * 4.10 don't implement this. */ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, pci_domain_nr(bus), bus->number, 0, /* io */ 1, /* write */ port, size, __pa(&val)); if (isrv.status == 0) return size; /* * If the above failed, retry using the SAL_PROBE call which should * be present in all proms (but which cannot work round PCI chipset * bugs). This code is retained for compatibility with old * pre-4.10 proms, and should be removed at some point in the future. */ if (!SN_PCIBUS_BUSSOFT(bus)) { ret = -ENODEV; goto out; } /* Put the phys addr in uncached space */ paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; paddr += port; addr = (unsigned long *)paddr; switch (size) { case 1: *(volatile u8 *)(addr) = (u8)(val); break; case 2: *(volatile u16 *)(addr) = (u16)(val); break; case 4: *(volatile u32 *)(addr) = (u32)(val); break; default: ret = -EINVAL; break; } out: return ret; } static struct dma_map_ops sn_dma_ops = { .alloc = sn_dma_alloc_coherent, .free = sn_dma_free_coherent, .map_page = sn_dma_map_page, .unmap_page = sn_dma_unmap_page, .map_sg = sn_dma_map_sg, .unmap_sg = sn_dma_unmap_sg, .sync_single_for_cpu = sn_dma_sync_single_for_cpu, .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, .sync_single_for_device = sn_dma_sync_single_for_device, .sync_sg_for_device = sn_dma_sync_sg_for_device, .mapping_error = sn_dma_mapping_error, .dma_supported = sn_dma_supported, }; void sn_dma_init(void) { dma_ops = &sn_dma_ops; }
gpl-2.0
sub-b/android_kernel_samsung_matissewifi-old
drivers/scsi/mac53c94.c
8431
15374
/* * SCSI low-level driver for the 53c94 SCSI bus adaptor found * on Power Macintosh computers, controlling the external SCSI chain. * We assume the 53c94 is connected to a DBDMA (descriptor-based DMA) * controller. * * Paul Mackerras, August 1996. * Copyright (C) 1996 Paul Mackerras. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "mac53c94.h" enum fsc_phase { idle, selecting, dataing, completing, busfreeing, }; struct fsc_state { struct mac53c94_regs __iomem *regs; int intr; struct dbdma_regs __iomem *dma; int dmaintr; int clk_freq; struct Scsi_Host *host; struct scsi_cmnd *request_q; struct scsi_cmnd *request_qtail; struct scsi_cmnd *current_req; /* req we're currently working on */ enum fsc_phase phase; /* what we're currently trying to do */ struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ void *dma_cmd_space; struct pci_dev *pdev; dma_addr_t dma_addr; struct macio_dev *mdev; }; static void mac53c94_init(struct fsc_state *); static void mac53c94_start(struct fsc_state *); static void mac53c94_interrupt(int, void *); static irqreturn_t do_mac53c94_interrupt(int, void *); static void cmd_done(struct fsc_state *, int result); static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct fsc_state *state; #if 0 if (cmd->sc_data_direction == DMA_TO_DEVICE) { int i; printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd); for (i = 0; i < cmd->cmd_len; ++i) printk(KERN_CONT " %.2x", cmd->cmnd[i]); printk(KERN_CONT "\n"); printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd)); } #endif cmd->scsi_done = done; cmd->host_scribble = NULL; state = (struct fsc_state *) cmd->device->host->hostdata; if (state->request_q == NULL) state->request_q = cmd; else state->request_qtail->host_scribble = (void *) cmd; state->request_qtail = cmd; if (state->phase == idle) mac53c94_start(state); return 0; } static DEF_SCSI_QCMD(mac53c94_queue) static int mac53c94_host_reset(struct scsi_cmnd *cmd) { struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; unsigned long flags; spin_lock_irqsave(cmd->device->host->host_lock, flags); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); writeb(CMD_SCSI_RESET, &regs->command); /* assert RST */ udelay(100); /* leave it on for a while (>= 25us) */ writeb(CMD_RESET, &regs->command); udelay(20); mac53c94_init(state); writeb(CMD_NOP, &regs->command); spin_unlock_irqrestore(cmd->device->host->host_lock, flags); return SUCCESS; } static void mac53c94_init(struct fsc_state *state) { struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; int x; writeb(state->host->this_id | CF1_PAR_ENABLE, &regs->config1); writeb(TIMO_VAL(250), &regs->sel_timeout); /* 250ms */ writeb(CLKF_VAL(state->clk_freq), &regs->clk_factor); writeb(CF2_FEATURE_EN, &regs->config2); writeb(0, &regs->config3); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); x = readb(&regs->interrupt); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); } /* * Start the next command for a 53C94. * Should be called with interrupts disabled. */ static void mac53c94_start(struct fsc_state *state) { struct scsi_cmnd *cmd; struct mac53c94_regs __iomem *regs = state->regs; int i; if (state->phase != idle || state->current_req != NULL) panic("inappropriate mac53c94_start (state=%p)", state); if (state->request_q == NULL) return; state->current_req = cmd = state->request_q; state->request_q = (struct scsi_cmnd *) cmd->host_scribble; /* Off we go */ writeb(0, &regs->count_lo); writeb(0, &regs->count_mid); writeb(0, &regs->count_hi); writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); udelay(1); writeb(CMD_FLUSH, &regs->command); udelay(1); writeb(cmd->device->id, &regs->dest_id); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); /* load the command into the FIFO */ for (i = 0; i < cmd->cmd_len; ++i) writeb(cmd->cmnd[i], &regs->fifo); /* do select without ATN XXX */ writeb(CMD_SELECT, &regs->command); state->phase = selecting; set_dma_cmds(state, cmd); } static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->device->host; spin_lock_irqsave(dev->host_lock, flags); mac53c94_interrupt(irq, dev_id); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void mac53c94_interrupt(int irq, void *dev_id) { struct fsc_state *state = (struct fsc_state *) dev_id; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; struct scsi_cmnd *cmd = state->current_req; int nb, stat, seq, intr; static int mac53c94_errors; /* * Apparently, reading the interrupt register unlatches * the status and sequence step registers. */ seq = readb(&regs->seqstep); stat = readb(&regs->status); intr = readb(&regs->interrupt); #if 0 printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif if (intr & INTR_RESET) { /* SCSI bus was reset */ printk(KERN_INFO "external SCSI bus reset detected\n"); writeb(CMD_NOP, &regs->command); writel(RUN << 16, &dma->control); /* stop dma */ cmd_done(state, DID_RESET << 16); return; } if (intr & INTR_ILL_CMD) { printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); cmd_done(state, DID_ERROR << 16); return; } if (stat & STAT_ERROR) { #if 0 /* XXX these seem to be harmless? */ printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif ++mac53c94_errors; writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); } if (cmd == 0) { printk(KERN_DEBUG "53c94: interrupt with no command active?\n"); return; } if (stat & STAT_PARITY) { printk(KERN_ERR "mac53c94: parity error\n"); cmd_done(state, DID_PARITY << 16); return; } switch (state->phase) { case selecting: if (intr & INTR_DISCONNECT) { /* selection timed out */ cmd_done(state, DID_BAD_TARGET << 16); return; } if (intr != INTR_BUS_SERV + INTR_DONE) { printk(KERN_DEBUG "got intr %x during selection\n", intr); cmd_done(state, DID_ERROR << 16); return; } if ((seq & SS_MASK) != SS_DONE) { printk(KERN_DEBUG "seq step %x after command\n", seq); cmd_done(state, DID_ERROR << 16); return; } writeb(CMD_NOP, &regs->command); /* set DMA controller going if any data to transfer */ if ((stat & (STAT_MSG|STAT_CD)) == 0 && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) { nb = cmd->SCp.this_residual; if (nb > 0xfff0) nb = 0xfff0; cmd->SCp.this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writel(virt_to_phys(state->dma_cmds), &dma->cmdptr); writel((RUN << 16) | RUN, &dma->control); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); state->phase = dataing; break; } else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) { /* up to status phase already */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; } else { printk(KERN_DEBUG "in unexpected phase %x after cmd\n", stat & STAT_PHASE); cmd_done(state, DID_ERROR << 16); return; } break; case dataing: if (intr != INTR_BUS_SERV) { printk(KERN_DEBUG "got intr %x before status\n", intr); cmd_done(state, DID_ERROR << 16); return; } if (cmd->SCp.this_residual != 0 && (stat & (STAT_MSG|STAT_CD)) == 0) { /* Set up the count regs to transfer more */ nb = cmd->SCp.this_residual; if (nb > 0xfff0) nb = 0xfff0; cmd->SCp.this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); break; } if ((stat & STAT_PHASE) != STAT_CD + STAT_IO) { printk(KERN_DEBUG "intr %x before data xfer complete\n", intr); } writel(RUN << 16, &dma->control); /* stop dma */ scsi_dma_unmap(cmd); /* should check dma status */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; break; case completing: if (intr != INTR_DONE) { printk(KERN_DEBUG "got intr %x on completion\n", intr); cmd_done(state, DID_ERROR << 16); return; } cmd->SCp.Status = readb(&regs->fifo); cmd->SCp.Message = readb(&regs->fifo); cmd->result = CMD_ACCEPT_MSG; writeb(CMD_ACCEPT_MSG, &regs->command); state->phase = busfreeing; break; case busfreeing: if (intr != INTR_DISCONNECT) { printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr); } cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8) + cmd->SCp.Status); break; default: printk(KERN_DEBUG "don't know about phase %d\n", state->phase); } } static void cmd_done(struct fsc_state *state, int result) { struct scsi_cmnd *cmd; cmd = state->current_req; if (cmd != 0) { cmd->result = result; (*cmd->scsi_done)(cmd); state->current_req = NULL; } state->phase = idle; mac53c94_start(state); } /* * Set up DMA commands for transferring data. */ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) { int i, dma_cmd, total, nseg; struct scatterlist *scl; struct dbdma_cmd *dcmds; dma_addr_t dma_addr; u32 dma_len; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (!nseg) return; dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ? OUTPUT_MORE : INPUT_MORE; dcmds = state->dma_cmds; total = 0; scsi_for_each_sg(cmd, scl, nseg, i) { dma_addr = sg_dma_address(scl); dma_len = sg_dma_len(scl); if (dma_len > 0xffff) panic("mac53c94: scatterlist element >= 64k"); total += dma_len; st_le16(&dcmds->req_count, dma_len); st_le16(&dcmds->command, dma_cmd); st_le32(&dcmds->phy_addr, dma_addr); dcmds->xfer_status = 0; ++dcmds; } dma_cmd += OUTPUT_LAST - OUTPUT_MORE; st_le16(&dcmds[-1].command, dma_cmd); st_le16(&dcmds->command, DBDMA_STOP); cmd->SCp.this_residual = total; } static struct scsi_host_template mac53c94_template = { .proc_name = "53c94", .name = "53C94", .queuecommand = mac53c94_queue, .eh_host_reset_handler = mac53c94_host_reset, .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *node = macio_get_of_node(mdev); struct pci_dev *pdev = macio_get_pci_dev(mdev); struct fsc_state *state; struct Scsi_Host *host; void *dma_cmd_space; const unsigned char *clkprop; int proplen, rc = -ENODEV; if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { printk(KERN_ERR "mac53c94: expected 2 addrs and intrs" " (got %d/%d)\n", macio_resource_count(mdev), macio_irq_count(mdev)); return -ENODEV; } if (macio_request_resources(mdev, "mac53c94") != 0) { printk(KERN_ERR "mac53c94: unable to request memory resources"); return -EBUSY; } host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state)); if (host == NULL) { printk(KERN_ERR "mac53c94: couldn't register host"); rc = -ENOMEM; goto out_release; } state = (struct fsc_state *) host->hostdata; macio_set_drvdata(mdev, state); state->host = host; state->pdev = pdev; state->mdev = mdev; state->regs = (struct mac53c94_regs __iomem *) ioremap(macio_resource_start(mdev, 0), 0x1000); state->intr = macio_irq(mdev, 0); state->dma = (struct dbdma_regs __iomem *) ioremap(macio_resource_start(mdev, 1), 0x1000); state->dmaintr = macio_irq(mdev, 1); if (state->regs == NULL || state->dma == NULL) { printk(KERN_ERR "mac53c94: ioremap failed for %s\n", node->full_name); goto out_free; } clkprop = of_get_property(node, "clock-frequency", &proplen); if (clkprop == NULL || proplen != sizeof(int)) { printk(KERN_ERR "%s: can't get clock frequency, " "assuming 25MHz\n", node->full_name); state->clk_freq = 25000000; } else state->clk_freq = *(int *)clkprop; /* Space for dma command list: +1 for stop command, * +1 to allow for aligning. * XXX FIXME: Use DMA consistent routines */ dma_cmd_space = kmalloc((host->sg_tablesize + 2) * sizeof(struct dbdma_cmd), GFP_KERNEL); if (dma_cmd_space == 0) { printk(KERN_ERR "mac53c94: couldn't allocate dma " "command space for %s\n", node->full_name); rc = -ENOMEM; goto out_free; } state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space); memset(state->dma_cmds, 0, (host->sg_tablesize + 1) * sizeof(struct dbdma_cmd)); state->dma_cmd_space = dma_cmd_space; mac53c94_init(state); if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) { printk(KERN_ERR "mac53C94: can't get irq %d for %s\n", state->intr, node->full_name); goto out_free_dma; } rc = scsi_add_host(host, &mdev->ofdev.dev); if (rc != 0) goto out_release_irq; scsi_scan_host(host); return 0; out_release_irq: free_irq(state->intr, state); out_free_dma: kfree(state->dma_cmd_space); out_free: if (state->dma != NULL) iounmap(state->dma); if (state->regs != NULL) iounmap(state->regs); scsi_host_put(host); out_release: macio_release_resources(mdev); return rc; } static int mac53c94_remove(struct macio_dev *mdev) { struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev); struct Scsi_Host *host = fp->host; scsi_remove_host(host); free_irq(fp->intr, fp); if (fp->regs) iounmap(fp->regs); if (fp->dma) iounmap(fp->dma); kfree(fp->dma_cmd_space); scsi_host_put(host); macio_release_resources(mdev); return 0; } static struct of_device_id mac53c94_match[] = { { .name = "53c94", }, {}, }; MODULE_DEVICE_TABLE (of, mac53c94_match); static struct macio_driver mac53c94_driver = { .driver = { .name = "mac53c94", .owner = THIS_MODULE, .of_match_table = mac53c94_match, }, .probe = mac53c94_probe, .remove = mac53c94_remove, }; static int __init init_mac53c94(void) { return macio_register_driver(&mac53c94_driver); } static void __exit exit_mac53c94(void) { return macio_unregister_driver(&mac53c94_driver); } module_init(init_mac53c94); module_exit(exit_mac53c94); MODULE_DESCRIPTION("PowerMac 53c94 SCSI driver"); MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>"); MODULE_LICENSE("GPL");
gpl-2.0
Snuzzo/funky_jewel
arch/arm/mach-w90x900/nuc910.c
13039
1298
/* * linux/arch/arm/mach-w90x900/nuc910.c * * Based on linux/arch/arm/plat-s3c24xx/s3c244x.c by Ben Dooks * * Copyright (c) 2009 Nuvoton corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC910 cpu support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include "cpu.h" #include "clock.h" /* define specific CPU platform device */ static struct platform_device *nuc910_dev[] __initdata = { &nuc900_device_ts, &nuc900_device_rtc, &nuc900_device_lcd, &nuc900_device_kpi, }; /* define specific CPU platform io map */ static struct map_desc nuc910evb_iodesc[] __initdata = { IODESC_ENT(USBEHCIHOST), IODESC_ENT(USBOHCIHOST), IODESC_ENT(KPI), IODESC_ENT(USBDEV), IODESC_ENT(ADC), }; /*Init NUC910 evb io*/ void __init nuc910_map_io(void) { nuc900_map_io(nuc910evb_iodesc, ARRAY_SIZE(nuc910evb_iodesc)); } /*Init NUC910 clock*/ void __init nuc910_init_clocks(void) { nuc900_init_clocks(); } /*Init NUC910 board info*/ void __init nuc910_board_init(void) { nuc900_board_init(nuc910_dev, ARRAY_SIZE(nuc910_dev)); }
gpl-2.0
zlatinski/p-android-omap-3.4-new-ion-topic-sync-dma-buf-fence2
drivers/misc/cb710/debug.c
14063
3344
/* * cb710/debug.c * * Copyright by Michał Mirosław, 2008-2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/cb710.h> #include <linux/kernel.h> #include <linux/module.h> #define CB710_REG_COUNT 0x80 static const u16 allow[CB710_REG_COUNT/16] = { 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF, }; static const char *const prefix[ARRAY_SIZE(allow)] = { "MMC", "MMC", "MMC", "MMC", "MS?", "MS?", "SM?", "SM?" }; static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits) { unsigned mask = (1 << bits/8) - 1; offset *= bits/8; return ((allow[block] >> offset) & mask) == mask; } #define CB710_READ_REGS_TEMPLATE(t) \ static void cb710_read_regs_##t(void __iomem *iobase, \ u##t *reg, unsigned select) \ { \ unsigned i, j; \ \ for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \ if (!(select & (1 << i))) \ continue; \ \ for (j = 0; j < 0x10/(t/8); ++j) { \ if (!allow_reg_read(i, j, t)) \ continue; \ reg[j] = ioread##t(iobase \ + (i << 4) + (j * (t/8))); \ } \ } \ } static const char cb710_regf_8[] = "%02X"; static const char cb710_regf_16[] = "%04X"; static const char cb710_regf_32[] = "%08X"; static const char cb710_xes[] = "xxxxxxxx"; #define CB710_DUMP_REGS_TEMPLATE(t) \ static void cb710_dump_regs_##t(struct device *dev, \ const u##t *reg, unsigned select) \ { \ const char *const xp = &cb710_xes[8 - t/4]; \ const char *const format = cb710_regf_##t; \ \ char msg[100], *p; \ unsigned i, j; \ \ for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \ if (!(select & (1 << i))) \ continue; \ p = msg; \ for (j = 0; j < 0x10/(t/8); ++j) { \ *p++ = ' '; \ if (j == 8/(t/8)) \ *p++ = ' '; \ if (allow_reg_read(i, j, t)) \ p += sprintf(p, format, reg[j]); \ else \ p += sprintf(p, "%s", xp); \ } \ dev_dbg(dev, "%s 0x%02X %s\n", prefix[i], i << 4, msg); \ } \ } #define CB710_READ_AND_DUMP_REGS_TEMPLATE(t) \ static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip, \ unsigned select) \ { \ u##t regs[CB710_REG_COUNT/sizeof(u##t)]; \ \ memset(&regs, 0, sizeof(regs)); \ cb710_read_regs_##t(chip->iobase, regs, select); \ cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select); \ } #define CB710_REG_ACCESS_TEMPLATES(t) \ CB710_READ_REGS_TEMPLATE(t) \ CB710_DUMP_REGS_TEMPLATE(t) \ CB710_READ_AND_DUMP_REGS_TEMPLATE(t) CB710_REG_ACCESS_TEMPLATES(8) CB710_REG_ACCESS_TEMPLATES(16) CB710_REG_ACCESS_TEMPLATES(32) void cb710_dump_regs(struct cb710_chip *chip, unsigned select) { if (!(select & CB710_DUMP_REGS_MASK)) select = CB710_DUMP_REGS_ALL; if (!(select & CB710_DUMP_ACCESS_MASK)) select |= CB710_DUMP_ACCESS_8; if (select & CB710_DUMP_ACCESS_32) cb710_read_and_dump_regs_32(chip, select); if (select & CB710_DUMP_ACCESS_16) cb710_read_and_dump_regs_16(chip, select); if (select & CB710_DUMP_ACCESS_8) cb710_read_and_dump_regs_8(chip, select); } EXPORT_SYMBOL_GPL(cb710_dump_regs);
gpl-2.0
carbonsoft/kernel
arch/arm/mach-nomadik/timer.c
496
4315
/* * linux/arch/arm/mach-nomadik/timer.c * * Copyright (C) 2008 STMicroelectronics * Copyright (C) 2009 Alessandro Rubini, somewhat based on at91sam926x * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/clockchips.h> #include <linux/jiffies.h> #include <asm/mach/time.h> #include <mach/mtu.h> #define TIMER_CTRL 0x80 /* No divisor */ #define TIMER_PERIODIC 0x40 #define TIMER_SZ32BIT 0x02 /* Initial value for SRC control register: all timers use MXTAL/8 source */ #define SRC_CR_INIT_MASK 0x00007fff #define SRC_CR_INIT_VAL 0x2aaa8000 static u32 nmdk_count; /* accumulated count */ static u32 nmdk_cycle; /* write-once */ static __iomem void *mtu_base; /* * clocksource: the MTU device is a decrementing counters, so we negate * the value being read. */ static cycle_t nmdk_read_timer(struct clocksource *cs) { u32 count = readl(mtu_base + MTU_VAL(0)); return nmdk_count + nmdk_cycle - count; } static struct clocksource nmdk_clksrc = { .name = "mtu_0", .rating = 120, .read = nmdk_read_timer, .shift = 20, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; /* * Clockevent device: currently only periodic mode is supported */ static void nmdk_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* enable interrupts -- and count current value? */ raw_local_irq_save(flags); writel(readl(mtu_base + MTU_IMSC) | 1, mtu_base + MTU_IMSC); raw_local_irq_restore(flags); break; case CLOCK_EVT_MODE_ONESHOT: BUG(); /* Not supported, yet */ /* FALLTHROUGH */ case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: /* disable irq */ raw_local_irq_save(flags); writel(readl(mtu_base + MTU_IMSC) & ~1, mtu_base + MTU_IMSC); raw_local_irq_restore(flags); break; case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device nmdk_clkevt = { .name = "mtu_0", .features = CLOCK_EVT_FEAT_PERIODIC, .shift = 32, .rating = 100, .set_mode = nmdk_clkevt_mode, }; /* * IRQ Handler for the timer 0 of the MTU block. The irq is not shared * as we are the only users of mtu0 by now. */ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id) { /* ack: "interrupt clear register" */ writel( 1 << 0, mtu_base + MTU_ICR); /* we can't count lost ticks, unfortunately */ nmdk_count += nmdk_cycle; nmdk_clkevt.event_handler(&nmdk_clkevt); return IRQ_HANDLED; } /* * Set up timer interrupt, and return the current time in seconds. */ static struct irqaction nmdk_timer_irq = { .name = "Nomadik Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = nmdk_timer_interrupt, }; static void nmdk_timer_reset(void) { u32 cr; writel(0, mtu_base + MTU_CR(0)); /* off */ /* configure load and background-load, and fire it up */ writel(nmdk_cycle, mtu_base + MTU_LR(0)); writel(nmdk_cycle, mtu_base + MTU_BGLR(0)); cr = MTU_CRn_PERIODIC | MTU_CRn_PRESCALE_1 | MTU_CRn_32BITS; writel(cr, mtu_base + MTU_CR(0)); writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0)); } static void __init nmdk_timer_init(void) { u32 src_cr; unsigned long rate; int bits; rate = CLOCK_TICK_RATE; /* 2.4MHz */ nmdk_cycle = (rate + HZ/2) / HZ; /* Configure timer sources in "system reset controller" ctrl reg */ src_cr = readl(io_p2v(NOMADIK_SRC_BASE)); src_cr &= SRC_CR_INIT_MASK; src_cr |= SRC_CR_INIT_VAL; writel(src_cr, io_p2v(NOMADIK_SRC_BASE)); /* Save global pointer to mtu, used by functions above */ mtu_base = io_p2v(NOMADIK_MTU0_BASE); /* Init the timer and register clocksource */ nmdk_timer_reset(); nmdk_clksrc.mult = clocksource_hz2mult(rate, nmdk_clksrc.shift); bits = 8*sizeof(nmdk_count); nmdk_clksrc.mask = CLOCKSOURCE_MASK(bits); clocksource_register(&nmdk_clksrc); /* Register irq and clockevents */ setup_irq(IRQ_MTU0, &nmdk_timer_irq); nmdk_clkevt.mult = div_sc(rate, NSEC_PER_SEC, nmdk_clkevt.shift); nmdk_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&nmdk_clkevt); } struct sys_timer nomadik_timer = { .init = nmdk_timer_init, };
gpl-2.0
herophj/linux_kerner_2_6
arch/arm/mach-pxa/pcm990-baseboard.c
496
13663
/* * arch/arm/mach-pxa/pcm990-baseboard.c * Support for the Phytec phyCORE-PXA270 Development Platform (PCM-990). * * Refer * http://www.phytec.com/products/rdk/ARM-XScale/phyCORE-XScale-PXA270.html * for additional hardware info * * Author: Juergen Kilb * Created: April 05, 2005 * Copyright: Phytec Messtechnik GmbH * e-Mail: armlinux@phytec.de * * based on Intel Mainstone Board * * Copyright 2007 Juergen Beisert @ Pengutronix (j.beisert@pengutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/pwm_backlight.h> #include <media/soc_camera.h> #include <asm/gpio.h> #include <plat/i2c.h> #include <mach/camera.h> #include <asm/mach/map.h> #include <mach/pxa27x.h> #include <mach/audio.h> #include <mach/mmc.h> #include <mach/ohci.h> #include <mach/pcm990_baseboard.h> #include <mach/pxafb.h> #include "devices.h" #include "generic.h" static unsigned long pcm990_pin_config[] __initdata = { /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* USB */ GPIO88_USBH1_PWR, GPIO89_USBH1_PEN, /* PWM0 */ GPIO16_PWM0_OUT, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, }; /* * pcm990_lcd_power - control power supply to the LCD * @on: 0 = switch off, 1 = switch on * * Called by the pxafb driver */ #ifndef CONFIG_PCM990_DISPLAY_NONE static void pcm990_lcd_power(int on, struct fb_var_screeninfo *var) { if (on) { /* enable LCD-Latches * power on LCD */ __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) = PCM990_CTRL_LCDPWR + PCM990_CTRL_LCDON; } else { /* disable LCD-Latches * power off LCD */ __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) = 0x00; } } #endif #if defined(CONFIG_PCM990_DISPLAY_SHARP) static struct pxafb_mode_info fb_info_sharp_lq084v1dg21 = { .pixclock = 28000, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 20, .left_margin = 103, .right_margin = 47, .vsync_len = 6, .upper_margin = 28, .lower_margin = 5, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info pcm990_fbinfo __initdata = { .modes = &fb_info_sharp_lq084v1dg21, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = pcm990_lcd_power, }; #elif defined(CONFIG_PCM990_DISPLAY_NEC) struct pxafb_mode_info fb_info_nec_nl6448bc20_18d = { .pixclock = 39720, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 32, .left_margin = 16, .right_margin = 48, .vsync_len = 2, .upper_margin = 12, .lower_margin = 17, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info pcm990_fbinfo __initdata = { .modes = &fb_info_nec_nl6448bc20_18d, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = pcm990_lcd_power, }; #endif static struct platform_pwm_backlight_data pcm990_backlight_data = { .pwm_id = 0, .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, }; static struct platform_device pcm990_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &pcm990_backlight_data, }, }; /* * The PCM-990 development baseboard uses PCM-027's hardware in the * following way: * * - LCD support is in use * - GPIO16 is output for back light on/off with PWM * - GPIO58 ... GPIO73 are outputs for display data * - GPIO74 is output output for LCDFCLK * - GPIO75 is output for LCDLCLK * - GPIO76 is output for LCDPCLK * - GPIO77 is output for LCDBIAS * - MMC support is in use * - GPIO32 is output for MMCCLK * - GPIO92 is MMDAT0 * - GPIO109 is MMDAT1 * - GPIO110 is MMCS0 * - GPIO111 is MMCS1 * - GPIO112 is MMCMD * - IDE/CF card is in use * - GPIO48 is output /POE * - GPIO49 is output /PWE * - GPIO50 is output /PIOR * - GPIO51 is output /PIOW * - GPIO54 is output /PCE2 * - GPIO55 is output /PREG * - GPIO56 is input /PWAIT * - GPIO57 is output /PIOS16 * - GPIO79 is output PSKTSEL * - GPIO85 is output /PCE1 * - FFUART is in use * - GPIO34 is input FFRXD * - GPIO35 is input FFCTS * - GPIO36 is input FFDCD * - GPIO37 is input FFDSR * - GPIO38 is input FFRI * - GPIO39 is output FFTXD * - GPIO40 is output FFDTR * - GPIO41 is output FFRTS * - BTUART is in use * - GPIO42 is input BTRXD * - GPIO43 is output BTTXD * - GPIO44 is input BTCTS * - GPIO45 is output BTRTS * - IRUART is in use * - GPIO46 is input STDRXD * - GPIO47 is output STDTXD * - AC97 is in use*) * - GPIO28 is input AC97CLK * - GPIO29 is input AC97DatIn * - GPIO30 is output AC97DatO * - GPIO31 is output AC97SYNC * - GPIO113 is output AC97_RESET * - SSP is in use * - GPIO23 is output SSPSCLK * - GPIO24 is output chip select to Max7301 * - GPIO25 is output SSPTXD * - GPIO26 is input SSPRXD * - GPIO27 is input for Max7301 IRQ * - GPIO53 is input SSPSYSCLK * - SSP3 is in use * - GPIO81 is output SSPTXD3 * - GPIO82 is input SSPRXD3 * - GPIO83 is output SSPSFRM * - GPIO84 is output SSPCLK3 * * Otherwise claimed GPIOs: * GPIO1 -> IRQ from user switch * GPIO9 -> IRQ from power management * GPIO10 -> IRQ from WML9712 AC97 controller * GPIO11 -> IRQ from IDE controller * GPIO12 -> IRQ from CF controller * GPIO13 -> IRQ from CF controller * GPIO14 -> GPIO free * GPIO15 -> /CS1 selects baseboard's Control CPLD (U7, 16 bit wide data path) * GPIO19 -> GPIO free * GPIO20 -> /SDCS2 * GPIO21 -> /CS3 PC card socket select * GPIO33 -> /CS5 network controller select * GPIO78 -> /CS2 (16 bit wide data path) * GPIO80 -> /CS4 (16 bit wide data path) * GPIO86 -> GPIO free * GPIO87 -> GPIO free * GPIO90 -> LED0 on CPU module * GPIO91 -> LED1 on CPI module * GPIO117 -> SCL * GPIO118 -> SDA */ static unsigned long pcm990_irq_enabled; static void pcm990_mask_ack_irq(unsigned int irq) { int pcm990_irq = (irq - PCM027_IRQ(0)); PCM990_INTMSKENA = (pcm990_irq_enabled &= ~(1 << pcm990_irq)); } static void pcm990_unmask_irq(unsigned int irq) { int pcm990_irq = (irq - PCM027_IRQ(0)); /* the irq can be acknowledged only if deasserted, so it's done here */ PCM990_INTSETCLR |= 1 << pcm990_irq; PCM990_INTMSKENA = (pcm990_irq_enabled |= (1 << pcm990_irq)); } static struct irq_chip pcm990_irq_chip = { .mask_ack = pcm990_mask_ack_irq, .unmask = pcm990_unmask_irq, }; static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled; do { GEDR(PCM990_CTRL_INT_IRQ_GPIO) = GPIO_bit(PCM990_CTRL_INT_IRQ_GPIO); if (likely(pending)) { irq = PCM027_IRQ(0) + __ffs(pending); generic_handle_irq(irq); } pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled; } while (pending); } static void __init pcm990_init_irq(void) { int irq; /* setup extra PCM990 irqs */ for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) { set_irq_chip(irq, &pcm990_irq_chip); set_irq_handler(irq, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } PCM990_INTMSKENA = 0x00; /* disable all Interrupts */ PCM990_INTSETCLR = 0xFF; set_irq_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler); set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE); } static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, void *data) { int err; err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED, "MMC card detect", data); if (err) printk(KERN_ERR "pcm990_mci_init: MMC/SD: can't request MMC " "card detect IRQ\n"); return err; } static void pcm990_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data *p_d = dev->platform_data; if ((1 << vdd) & p_d->ocr_mask) __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) = PCM990_CTRL_MMC2PWR; else __PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) = ~PCM990_CTRL_MMC2PWR; } static void pcm990_mci_exit(struct device *dev, void *data) { free_irq(PCM027_MMCDET_IRQ, data); } #define MSECS_PER_JIFFY (1000/HZ) static struct pxamci_platform_data pcm990_mci_platform_data = { .detect_delay = 250 / MSECS_PER_JIFFY, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .init = pcm990_mci_init, .setpower = pcm990_mci_setpower, .exit = pcm990_mci_exit, .gpio_card_detect = -1, .gpio_card_ro = -1, .gpio_power = -1, }; static struct pxaohci_platform_data pcm990_ohci_platform_data = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | POWER_CONTROL_LOW | POWER_SENSE_LOW, .power_on_delay = 10, }; /* * PXA27x Camera specific stuff */ #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) static unsigned long pcm990_camera_pin_config[] = { /* CIF */ GPIO98_CIF_DD_0, GPIO105_CIF_DD_1, GPIO104_CIF_DD_2, GPIO103_CIF_DD_3, GPIO95_CIF_DD_4, GPIO94_CIF_DD_5, GPIO93_CIF_DD_6, GPIO108_CIF_DD_7, GPIO107_CIF_DD_8, GPIO106_CIF_DD_9, GPIO42_CIF_MCLK, GPIO45_CIF_PCLK, GPIO43_CIF_FV, GPIO44_CIF_LV, }; static int pcm990_pxacamera_init(struct device *dev) { pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_camera_pin_config)); return 0; } /* * CICR4: PCLK_EN: Pixel clock is supplied by the sensor * MCLK_EN: Master clock is generated by PXA * PCP: Data sampled on the falling edge of pixel clock */ struct pxacamera_platform_data pcm990_pxacamera_platform_data = { .init = pcm990_pxacamera_init, .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_DATAWIDTH_10 | PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN/* | PXA_CAMERA_PCP*/, .mclk_10khz = 1000, }; #include <linux/i2c/pca953x.h> static struct pca953x_platform_data pca9536_data = { .gpio_base = NR_BUILTIN_GPIO, }; static int gpio_bus_switch = -EINVAL; static int pcm990_camera_set_bus_param(struct soc_camera_link *link, unsigned long flags) { if (gpio_bus_switch < 0) { if (flags == SOCAM_DATAWIDTH_10) return 0; else return -EINVAL; } if (flags & SOCAM_DATAWIDTH_8) gpio_set_value(gpio_bus_switch, 1); else gpio_set_value(gpio_bus_switch, 0); return 0; } static unsigned long pcm990_camera_query_bus_param(struct soc_camera_link *link) { int ret; if (gpio_bus_switch < 0) { ret = gpio_request(NR_BUILTIN_GPIO, "camera"); if (!ret) { gpio_bus_switch = NR_BUILTIN_GPIO; gpio_direction_output(gpio_bus_switch, 0); } } if (gpio_bus_switch >= 0) return SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_10; else return SOCAM_DATAWIDTH_10; } static void pcm990_camera_free_bus(struct soc_camera_link *link) { if (gpio_bus_switch < 0) return; gpio_free(gpio_bus_switch); gpio_bus_switch = -EINVAL; } /* Board I2C devices. */ static struct i2c_board_info __initdata pcm990_i2c_devices[] = { { /* Must initialize before the camera(s) */ I2C_BOARD_INFO("pca9536", 0x41), .platform_data = &pca9536_data, }, }; static struct i2c_board_info pcm990_camera_i2c[] = { { I2C_BOARD_INFO("mt9v022", 0x48), }, { I2C_BOARD_INFO("mt9m001", 0x5d), }, }; static struct soc_camera_link iclink[] = { { .bus_id = 0, /* Must match with the camera ID */ .board_info = &pcm990_camera_i2c[0], .i2c_adapter_id = 0, .query_bus_param = pcm990_camera_query_bus_param, .set_bus_param = pcm990_camera_set_bus_param, .free_bus = pcm990_camera_free_bus, .module_name = "mt9v022", }, { .bus_id = 0, /* Must match with the camera ID */ .board_info = &pcm990_camera_i2c[1], .i2c_adapter_id = 0, .query_bus_param = pcm990_camera_query_bus_param, .set_bus_param = pcm990_camera_set_bus_param, .free_bus = pcm990_camera_free_bus, .module_name = "mt9m001", }, }; static struct platform_device pcm990_camera[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &iclink[0], }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { .platform_data = &iclink[1], }, }, }; #endif /* CONFIG_VIDEO_PXA27x ||CONFIG_VIDEO_PXA27x_MODULE */ /* * enable generic access to the base board control CPLDs U6 and U7 */ static struct map_desc pcm990_io_desc[] __initdata = { { .virtual = PCM990_CTRL_BASE, .pfn = __phys_to_pfn(PCM990_CTRL_PHYS), .length = PCM990_CTRL_SIZE, .type = MT_DEVICE /* CPLD */ }, { .virtual = PCM990_CF_PLD_BASE, .pfn = __phys_to_pfn(PCM990_CF_PLD_PHYS), .length = PCM990_CF_PLD_SIZE, .type = MT_DEVICE /* CPLD */ } }; /* * system init for baseboard usage. Will be called by pcm027 init. * * Add platform devices present on this baseboard and init * them from CPU side as far as required to use them later on */ void __init pcm990_baseboard_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_pin_config)); /* register CPLD access */ iotable_init(ARRAY_AND_SIZE(pcm990_io_desc)); /* register CPLD's IRQ controller */ pcm990_init_irq(); #ifndef CONFIG_PCM990_DISPLAY_NONE set_pxa_fb_info(&pcm990_fbinfo); #endif platform_device_register(&pcm990_backlight_device); /* MMC */ pxa_set_mci_info(&pcm990_mci_platform_data); /* USB host */ pxa_set_ohci_info(&pcm990_ohci_platform_data); pxa_set_i2c_info(NULL); pxa_set_ac97_info(NULL); #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) pxa_set_camera_info(&pcm990_pxacamera_platform_data); i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices)); platform_device_register(&pcm990_camera[0]); platform_device_register(&pcm990_camera[1]); #endif printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n"); }
gpl-2.0
tyler6389/android_kernel_samsung_superiorlteskt
arch/arm/mach-exynos/setup-fimd.c
496
1662
/* linux/arch/arm/mach-exynos/setup-fimd.c * * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Base Exynos4 FIMD configuration * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/clk.h> #include <plat/fb.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <mach/regs-clock.h> #include <mach/map.h> void exynos4_fimd_cfg_gpios(unsigned int base, unsigned int nr, unsigned int cfg, s5p_gpio_drvstr_t drvstr) { s3c_gpio_cfgrange_nopull(base, nr, cfg); for (; nr > 0; nr--, base++) s5p_gpio_set_drvstr(base, drvstr); } int __init exynos4_fimd_setup_clock(struct device *dev, const char *bus_clk, const char *parent, unsigned long clk_rate) { struct clk *clk_parent; struct clk *sclk; sclk = clk_get(dev, bus_clk); if (IS_ERR(sclk)) return PTR_ERR(sclk); clk_parent = clk_get(NULL, parent); if (IS_ERR(clk_parent)) { clk_put(sclk); return PTR_ERR(clk_parent); } if (clk_set_parent(sclk, clk_parent)) { pr_err("Unable to set parent %s of clock %s.\n", clk_parent->name, sclk->name); clk_put(sclk); clk_put(clk_parent); return PTR_ERR(sclk); } if (!clk_rate) clk_rate = 87000000UL; if (clk_set_rate(sclk, clk_rate)) { pr_err("%s rate change failed: %lu\n", sclk->name, clk_rate); clk_put(sclk); clk_put(clk_parent); return PTR_ERR(sclk); } clk_put(sclk); clk_put(clk_parent); return 0; }
gpl-2.0
franciscofranco/manta
net/l2tp/l2tp_eth.c
1520
7430
/* * L2TPv3 ethernet pseudowire driver * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/hash.h> #include <linux/l2tp.h> #include <linux/in.h> #include <linux/etherdevice.h> #include <linux/spinlock.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/tcp_states.h> #include <net/protocol.h> #include <net/xfrm.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "l2tp_core.h" /* Default device name. May be overridden by name specified by user */ #define L2TP_ETH_DEV_NAME "l2tpeth%d" /* via netdev_priv() */ struct l2tp_eth { struct net_device *dev; struct sock *tunnel_sock; struct l2tp_session *session; struct list_head list; }; /* via l2tp_session_priv() */ struct l2tp_eth_sess { struct net_device *dev; }; /* per-net private data for this module */ static unsigned int l2tp_eth_net_id; struct l2tp_eth_net { struct list_head l2tp_eth_dev_list; spinlock_t l2tp_eth_lock; }; static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) { return net_generic(net, l2tp_eth_net_id); } static int l2tp_eth_dev_init(struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); priv->dev = dev; eth_hw_addr_random(dev); memset(&dev->broadcast[0], 0xff, 6); return 0; } static void l2tp_eth_dev_uninit(struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); spin_lock(&pn->l2tp_eth_lock); list_del_init(&priv->list); spin_unlock(&pn->l2tp_eth_lock); dev_put(dev); } static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_session *session = priv->session; l2tp_xmit_skb(session, skb, session->hdr_len); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; return 0; } static struct net_device_ops l2tp_eth_netdev_ops = { .ndo_init = l2tp_eth_dev_init, .ndo_uninit = l2tp_eth_dev_uninit, .ndo_start_xmit = l2tp_eth_dev_xmit, }; static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; } static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct l2tp_eth_sess *spriv = l2tp_session_priv(session); struct net_device *dev = spriv->dev; if (session->debug & L2TP_MSG_DATA) { unsigned int length; int offset; u8 *ptr = skb->data; length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto error; printk(KERN_DEBUG "%s: eth recv: ", session->name); offset = 0; do { printk(" %02X", ptr[offset]); } while (++offset < length); printk("\n"); } if (!pskb_may_pull(skb, ETH_HLEN)) goto error; secpath_reset(skb); /* checksums verified by L2TP */ skb->ip_summed = CHECKSUM_NONE; skb_dst_drop(skb); nf_reset(skb); if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { dev->stats.rx_packets++; dev->stats.rx_bytes += data_len; } else dev->stats.rx_errors++; return; error: dev->stats.rx_errors++; kfree_skb(skb); } static void l2tp_eth_delete(struct l2tp_session *session) { struct l2tp_eth_sess *spriv; struct net_device *dev; if (session) { spriv = l2tp_session_priv(session); dev = spriv->dev; if (dev) { unregister_netdev(dev); spriv->dev = NULL; module_put(THIS_MODULE); } } } #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) static void l2tp_eth_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; struct l2tp_eth_sess *spriv = l2tp_session_priv(session); struct net_device *dev = spriv->dev; seq_printf(m, " interface %s\n", dev->name); } #endif static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct net_device *dev; char name[IFNAMSIZ]; struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct l2tp_eth *priv; struct l2tp_eth_sess *spriv; int rc; struct l2tp_eth_net *pn; tunnel = l2tp_tunnel_find(net, tunnel_id); if (!tunnel) { rc = -ENODEV; goto out; } session = l2tp_session_find(net, tunnel, session_id); if (session) { rc = -EEXIST; goto out; } if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { dev_put(dev); rc = -EEXIST; goto out; } strlcpy(name, cfg->ifname, IFNAMSIZ); } else strcpy(name, L2TP_ETH_DEV_NAME); session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); if (!session) { rc = -ENOMEM; goto out; } dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup); if (!dev) { rc = -ENOMEM; goto out_del_session; } dev_net_set(dev, net); if (session->mtu == 0) session->mtu = dev->mtu - session->hdr_len; dev->mtu = session->mtu; dev->needed_headroom += session->hdr_len; priv = netdev_priv(dev); priv->dev = dev; priv->session = session; INIT_LIST_HEAD(&priv->list); priv->tunnel_sock = tunnel->sock; session->recv_skb = l2tp_eth_dev_recv; session->session_close = l2tp_eth_delete; #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) session->show = l2tp_eth_show; #endif spriv = l2tp_session_priv(session); spriv->dev = dev; rc = register_netdev(dev); if (rc < 0) goto out_del_dev; __module_get(THIS_MODULE); /* Must be done after register_netdev() */ strlcpy(session->ifname, dev->name, IFNAMSIZ); dev_hold(dev); pn = l2tp_eth_pernet(dev_net(dev)); spin_lock(&pn->l2tp_eth_lock); list_add(&priv->list, &pn->l2tp_eth_dev_list); spin_unlock(&pn->l2tp_eth_lock); return 0; out_del_dev: free_netdev(dev); spriv->dev = NULL; out_del_session: l2tp_session_delete(session); out: return rc; } static __net_init int l2tp_eth_init_net(struct net *net) { struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id); INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); spin_lock_init(&pn->l2tp_eth_lock); return 0; } static struct pernet_operations l2tp_eth_net_ops = { .init = l2tp_eth_init_net, .id = &l2tp_eth_net_id, .size = sizeof(struct l2tp_eth_net), }; static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { .session_create = l2tp_eth_create, .session_delete = l2tp_session_delete, }; static int __init l2tp_eth_init(void) { int err = 0; err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); if (err) goto out; err = register_pernet_device(&l2tp_eth_net_ops); if (err) goto out_unreg; printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); return 0; out_unreg: l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); out: return err; } static void __exit l2tp_eth_exit(void) { unregister_pernet_device(&l2tp_eth_net_ops); l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); } module_init(l2tp_eth_init); module_exit(l2tp_eth_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("L2TP ethernet pseudowire driver"); MODULE_VERSION("1.0");
gpl-2.0
jrior001/android_kernel_asus_Z00D
drivers/net/wireless/ath/ath5k/ahb.c
2288
6350
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/nl80211.h> #include <linux/platform_device.h> #include <linux/etherdevice.h> #include <linux/export.h> #include <ar231x_platform.h> #include "ath5k.h" #include "debug.h" #include "base.h" #include "reg.h" /* return bus cachesize in 4B word units */ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz) { *csz = L1_CACHE_BYTES >> 2; } static bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) { struct ath5k_hw *ah = common->priv; struct platform_device *pdev = to_platform_device(ah->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; u16 *eeprom, *eeprom_end; bcfg = pdev->dev.platform_data; eeprom = (u16 *) bcfg->radio; eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ; eeprom += off; if (eeprom > eeprom_end) return false; *data = *eeprom; return true; } int ath5k_hw_read_srev(struct ath5k_hw *ah) { struct platform_device *pdev = to_platform_device(ah->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; ah->ah_mac_srev = bcfg->devid; return 0; } static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) { struct platform_device *pdev = to_platform_device(ah->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; u8 *cfg_mac; if (to_platform_device(ah->dev)->id == 0) cfg_mac = bcfg->config->wlan0_mac; else cfg_mac = bcfg->config->wlan1_mac; memcpy(mac, cfg_mac, ETH_ALEN); return 0; } static const struct ath_bus_ops ath_ahb_bus_ops = { .ath_bus_type = ATH_AHB, .read_cachesize = ath5k_ahb_read_cachesize, .eeprom_read = ath5k_ahb_eeprom_read, .eeprom_read_mac = ath5k_ahb_eeprom_read_mac, }; /*Initialization*/ static int ath_ahb_probe(struct platform_device *pdev) { struct ar231x_board_config *bcfg = pdev->dev.platform_data; struct ath5k_hw *ah; struct ieee80211_hw *hw; struct resource *res; void __iomem *mem; int irq; int ret = 0; u32 reg; if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "no platform data specified\n"); ret = -EINVAL; goto err_out; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource found\n"); ret = -ENXIO; goto err_out; } mem = ioremap_nocache(res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_out; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "no IRQ resource found\n"); ret = -ENXIO; goto err_iounmap; } irq = res->start; hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops); if (hw == NULL) { dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); ret = -ENOMEM; goto err_iounmap; } ah = hw->priv; ah->hw = hw; ah->dev = &pdev->dev; ah->iobase = mem; ah->irq = irq; ah->devid = bcfg->devid; if (bcfg->devid >= AR5K_SREV_AR2315_R6) { /* Enable WMAC AHB arbitration */ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN; iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); /* Enable global WMAC swapping */ reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP); reg |= AR5K_AR2315_BYTESWAP_WMAC; iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP); } else { /* Enable WMAC DMA access (assuming 5312 or 231x*/ /* TODO: check other platforms */ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); if (to_platform_device(ah->dev)->id == 0) reg |= AR5K_AR5312_ENABLE_WLAN0; else reg |= AR5K_AR5312_ENABLE_WLAN1; iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); /* * On a dual-band AR5312, the multiband radio is only * used as pass-through. Disable 2 GHz support in the * driver for it */ if (to_platform_device(ah->dev)->id == 0 && (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) == (BD_WLAN1 | BD_WLAN0)) ah->ah_capabilities.cap_needs_2GHz_ovr = true; else ah->ah_capabilities.cap_needs_2GHz_ovr = false; } ret = ath5k_init_ah(ah, &ath_ahb_bus_ops); if (ret != 0) { dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret); ret = -ENODEV; goto err_free_hw; } platform_set_drvdata(pdev, hw); return 0; err_free_hw: ieee80211_free_hw(hw); platform_set_drvdata(pdev, NULL); err_iounmap: iounmap(mem); err_out: return ret; } static int ath_ahb_remove(struct platform_device *pdev) { struct ar231x_board_config *bcfg = pdev->dev.platform_data; struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct ath5k_hw *ah; u32 reg; if (!hw) return 0; ah = hw->priv; if (bcfg->devid >= AR5K_SREV_AR2315_R6) { /* Disable WMAC AHB arbitration */ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN; iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); } else { /*Stop DMA access */ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); if (to_platform_device(ah->dev)->id == 0) reg &= ~AR5K_AR5312_ENABLE_WLAN0; else reg &= ~AR5K_AR5312_ENABLE_WLAN1; iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); } ath5k_deinit_ah(ah); iounmap(ah->iobase); platform_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); return 0; } static struct platform_driver ath_ahb_driver = { .probe = ath_ahb_probe, .remove = ath_ahb_remove, .driver = { .name = "ar231x-wmac", .owner = THIS_MODULE, }, }; module_platform_driver(ath_ahb_driver);
gpl-2.0
dukie/sun4i-kernel
drivers/spi/ep93xx_spi.c
2544
25157
/* * Driver for Cirrus Logic EP93xx SPI controller. * * Copyright (c) 2010 Mika Westerberg * * Explicit FIFO handling code was inspired by amba-pl022 driver. * * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. * * For more information about the SPI controller see documentation on Cirrus * Logic web site: * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/spi/spi.h> #include <mach/ep93xx_spi.h> #define SSPCR0 0x0000 #define SSPCR0_MODE_SHIFT 6 #define SSPCR0_SCR_SHIFT 8 #define SSPCR1 0x0004 #define SSPCR1_RIE BIT(0) #define SSPCR1_TIE BIT(1) #define SSPCR1_RORIE BIT(2) #define SSPCR1_LBM BIT(3) #define SSPCR1_SSE BIT(4) #define SSPCR1_MS BIT(5) #define SSPCR1_SOD BIT(6) #define SSPDR 0x0008 #define SSPSR 0x000c #define SSPSR_TFE BIT(0) #define SSPSR_TNF BIT(1) #define SSPSR_RNE BIT(2) #define SSPSR_RFF BIT(3) #define SSPSR_BSY BIT(4) #define SSPCPSR 0x0010 #define SSPIIR 0x0014 #define SSPIIR_RIS BIT(0) #define SSPIIR_TIS BIT(1) #define SSPIIR_RORIS BIT(2) #define SSPICR SSPIIR /* timeout in milliseconds */ #define SPI_TIMEOUT 5 /* maximum depth of RX/TX FIFO */ #define SPI_FIFO_SIZE 8 /** * struct ep93xx_spi - EP93xx SPI controller structure * @lock: spinlock that protects concurrent accesses to fields @running, * @current_msg and @msg_queue * @pdev: pointer to platform device * @clk: clock for the controller * @regs_base: pointer to ioremap()'d registers * @irq: IRQ number used by the driver * @min_rate: minimum clock rate (in Hz) supported by the controller * @max_rate: maximum clock rate (in Hz) supported by the controller * @running: is the queue running * @wq: workqueue used by the driver * @msg_work: work that is queued for the driver * @wait: wait here until given transfer is completed * @msg_queue: queue for the messages * @current_msg: message that is currently processed (or %NULL if none) * @tx: current byte in transfer to transmit * @rx: current byte in transfer to receive * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one * frame decreases this level and sending one frame increases it. * * This structure holds EP93xx SPI controller specific information. When * @running is %true, driver accepts transfer requests from protocol drivers. * @current_msg is used to hold pointer to the message that is currently * processed. If @current_msg is %NULL, it means that no processing is going * on. * * Most of the fields are only written once and they can be accessed without * taking the @lock. Fields that are accessed concurrently are: @current_msg, * @running, and @msg_queue. */ struct ep93xx_spi { spinlock_t lock; const struct platform_device *pdev; struct clk *clk; void __iomem *regs_base; int irq; unsigned long min_rate; unsigned long max_rate; bool running; struct workqueue_struct *wq; struct work_struct msg_work; struct completion wait; struct list_head msg_queue; struct spi_message *current_msg; size_t tx; size_t rx; size_t fifo_level; }; /** * struct ep93xx_spi_chip - SPI device hardware settings * @spi: back pointer to the SPI device * @rate: max rate in hz this chip supports * @div_cpsr: cpsr (pre-scaler) divider * @div_scr: scr divider * @dss: bits per word (4 - 16 bits) * @ops: private chip operations * * This structure is used to store hardware register specific settings for each * SPI device. Settings are written to hardware by function * ep93xx_spi_chip_setup(). */ struct ep93xx_spi_chip { const struct spi_device *spi; unsigned long rate; u8 div_cpsr; u8 div_scr; u8 dss; struct ep93xx_spi_chip_ops *ops; }; /* converts bits per word to CR0.DSS value */ #define bits_per_word_to_dss(bpw) ((bpw) - 1) static inline void ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) { __raw_writeb(value, espi->regs_base + reg); } static inline u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) { return __raw_readb(spi->regs_base + reg); } static inline void ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) { __raw_writew(value, espi->regs_base + reg); } static inline u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) { return __raw_readw(spi->regs_base + reg); } static int ep93xx_spi_enable(const struct ep93xx_spi *espi) { u8 regval; int err; err = clk_enable(espi->clk); if (err) return err; regval = ep93xx_spi_read_u8(espi, SSPCR1); regval |= SSPCR1_SSE; ep93xx_spi_write_u8(espi, SSPCR1, regval); return 0; } static void ep93xx_spi_disable(const struct ep93xx_spi *espi) { u8 regval; regval = ep93xx_spi_read_u8(espi, SSPCR1); regval &= ~SSPCR1_SSE; ep93xx_spi_write_u8(espi, SSPCR1, regval); clk_disable(espi->clk); } static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) { u8 regval; regval = ep93xx_spi_read_u8(espi, SSPCR1); regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); ep93xx_spi_write_u8(espi, SSPCR1, regval); } static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) { u8 regval; regval = ep93xx_spi_read_u8(espi, SSPCR1); regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); ep93xx_spi_write_u8(espi, SSPCR1, regval); } /** * ep93xx_spi_calc_divisors() - calculates SPI clock divisors * @espi: ep93xx SPI controller struct * @chip: divisors are calculated for this chip * @rate: desired SPI output clock rate * * Function calculates cpsr (clock pre-scaler) and scr divisors based on * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, * for some reason, divisors cannot be calculated nothing is stored and * %-EINVAL is returned. */ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, struct ep93xx_spi_chip *chip, unsigned long rate) { unsigned long spi_clk_rate = clk_get_rate(espi->clk); int cpsr, scr; /* * Make sure that max value is between values supported by the * controller. Note that minimum value is already checked in * ep93xx_spi_transfer(). */ rate = clamp(rate, espi->min_rate, espi->max_rate); /* * Calculate divisors so that we can get speed according the * following formula: * rate = spi_clock_rate / (cpsr * (1 + scr)) * * cpsr must be even number and starts from 2, scr can be any number * between 0 and 255. */ for (cpsr = 2; cpsr <= 254; cpsr += 2) { for (scr = 0; scr <= 255; scr++) { if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { chip->div_scr = (u8)scr; chip->div_cpsr = (u8)cpsr; return 0; } } } return -EINVAL; } static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) { struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); int value = (spi->mode & SPI_CS_HIGH) ? control : !control; if (chip->ops && chip->ops->cs_control) chip->ops->cs_control(spi, value); } /** * ep93xx_spi_setup() - setup an SPI device * @spi: SPI device to setup * * This function sets up SPI device mode, speed etc. Can be called multiple * times for a single device. Returns %0 in case of success, negative error in * case of failure. When this function returns success, the device is * deselected. */ static int ep93xx_spi_setup(struct spi_device *spi) { struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); struct ep93xx_spi_chip *chip; if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { dev_err(&espi->pdev->dev, "invalid bits per word %d\n", spi->bits_per_word); return -EINVAL; } chip = spi_get_ctldata(spi); if (!chip) { dev_dbg(&espi->pdev->dev, "initial setup for %s\n", spi->modalias); chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->spi = spi; chip->ops = spi->controller_data; if (chip->ops && chip->ops->setup) { int ret = chip->ops->setup(spi); if (ret) { kfree(chip); return ret; } } spi_set_ctldata(spi, chip); } if (spi->max_speed_hz != chip->rate) { int err; err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); if (err != 0) { spi_set_ctldata(spi, NULL); kfree(chip); return err; } chip->rate = spi->max_speed_hz; } chip->dss = bits_per_word_to_dss(spi->bits_per_word); ep93xx_spi_cs_control(spi, false); return 0; } /** * ep93xx_spi_transfer() - queue message to be transferred * @spi: target SPI device * @msg: message to be transferred * * This function is called by SPI device drivers when they are going to transfer * a new message. It simply puts the message in the queue and schedules * workqueue to perform the actual transfer later on. * * Returns %0 on success and negative error in case of failure. */ static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) { struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); struct spi_transfer *t; unsigned long flags; if (!msg || !msg->complete) return -EINVAL; /* first validate each transfer */ list_for_each_entry(t, &msg->transfers, transfer_list) { if (t->bits_per_word) { if (t->bits_per_word < 4 || t->bits_per_word > 16) return -EINVAL; } if (t->speed_hz && t->speed_hz < espi->min_rate) return -EINVAL; } /* * Now that we own the message, let's initialize it so that it is * suitable for us. We use @msg->status to signal whether there was * error in transfer and @msg->state is used to hold pointer to the * current transfer (or %NULL if no active current transfer). */ msg->state = NULL; msg->status = 0; msg->actual_length = 0; spin_lock_irqsave(&espi->lock, flags); if (!espi->running) { spin_unlock_irqrestore(&espi->lock, flags); return -ESHUTDOWN; } list_add_tail(&msg->queue, &espi->msg_queue); queue_work(espi->wq, &espi->msg_work); spin_unlock_irqrestore(&espi->lock, flags); return 0; } /** * ep93xx_spi_cleanup() - cleans up master controller specific state * @spi: SPI device to cleanup * * This function releases master controller specific state for given @spi * device. */ static void ep93xx_spi_cleanup(struct spi_device *spi) { struct ep93xx_spi_chip *chip; chip = spi_get_ctldata(spi); if (chip) { if (chip->ops && chip->ops->cleanup) chip->ops->cleanup(spi); spi_set_ctldata(spi, NULL); kfree(chip); } } /** * ep93xx_spi_chip_setup() - configures hardware according to given @chip * @espi: ep93xx SPI controller struct * @chip: chip specific settings * * This function sets up the actual hardware registers with settings given in * @chip. Note that no validation is done so make sure that callers validate * settings before calling this. */ static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, const struct ep93xx_spi_chip *chip) { u16 cr0; cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; cr0 |= chip->dss; dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); ep93xx_spi_write_u16(espi, SSPCR0, cr0); } static inline int bits_per_word(const struct ep93xx_spi *espi) { struct spi_message *msg = espi->current_msg; struct spi_transfer *t = msg->state; return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; } static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) { if (bits_per_word(espi) > 8) { u16 tx_val = 0; if (t->tx_buf) tx_val = ((u16 *)t->tx_buf)[espi->tx]; ep93xx_spi_write_u16(espi, SSPDR, tx_val); espi->tx += sizeof(tx_val); } else { u8 tx_val = 0; if (t->tx_buf) tx_val = ((u8 *)t->tx_buf)[espi->tx]; ep93xx_spi_write_u8(espi, SSPDR, tx_val); espi->tx += sizeof(tx_val); } } static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) { if (bits_per_word(espi) > 8) { u16 rx_val; rx_val = ep93xx_spi_read_u16(espi, SSPDR); if (t->rx_buf) ((u16 *)t->rx_buf)[espi->rx] = rx_val; espi->rx += sizeof(rx_val); } else { u8 rx_val; rx_val = ep93xx_spi_read_u8(espi, SSPDR); if (t->rx_buf) ((u8 *)t->rx_buf)[espi->rx] = rx_val; espi->rx += sizeof(rx_val); } } /** * ep93xx_spi_read_write() - perform next RX/TX transfer * @espi: ep93xx SPI controller struct * * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If * called several times, the whole transfer will be completed. Returns * %-EINPROGRESS when current transfer was not yet completed otherwise %0. * * When this function is finished, RX FIFO should be empty and TX FIFO should be * full. */ static int ep93xx_spi_read_write(struct ep93xx_spi *espi) { struct spi_message *msg = espi->current_msg; struct spi_transfer *t = msg->state; /* read as long as RX FIFO has frames in it */ while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { ep93xx_do_read(espi, t); espi->fifo_level--; } /* write as long as TX FIFO has room */ while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { ep93xx_do_write(espi, t); espi->fifo_level++; } if (espi->rx == t->len) { msg->actual_length += t->len; return 0; } return -EINPROGRESS; } /** * ep93xx_spi_process_transfer() - processes one SPI transfer * @espi: ep93xx SPI controller struct * @msg: current message * @t: transfer to process * * This function processes one SPI transfer given in @t. Function waits until * transfer is complete (may sleep) and updates @msg->status based on whether * transfer was successfully processed or not. */ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, struct spi_message *msg, struct spi_transfer *t) { struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); msg->state = t; /* * Handle any transfer specific settings if needed. We use * temporary chip settings here and restore original later when * the transfer is finished. */ if (t->speed_hz || t->bits_per_word) { struct ep93xx_spi_chip tmp_chip = *chip; if (t->speed_hz) { int err; err = ep93xx_spi_calc_divisors(espi, &tmp_chip, t->speed_hz); if (err) { dev_err(&espi->pdev->dev, "failed to adjust speed\n"); msg->status = err; return; } } if (t->bits_per_word) tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); /* * Set up temporary new hw settings for this transfer. */ ep93xx_spi_chip_setup(espi, &tmp_chip); } espi->rx = 0; espi->tx = 0; /* * Now everything is set up for the current transfer. We prime the TX * FIFO, enable interrupts, and wait for the transfer to complete. */ if (ep93xx_spi_read_write(espi)) { ep93xx_spi_enable_interrupts(espi); wait_for_completion(&espi->wait); } /* * In case of error during transmit, we bail out from processing * the message. */ if (msg->status) return; /* * After this transfer is finished, perform any possible * post-transfer actions requested by the protocol driver. */ if (t->delay_usecs) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(usecs_to_jiffies(t->delay_usecs)); } if (t->cs_change) { if (!list_is_last(&t->transfer_list, &msg->transfers)) { /* * In case protocol driver is asking us to drop the * chipselect briefly, we let the scheduler to handle * any "delay" here. */ ep93xx_spi_cs_control(msg->spi, false); cond_resched(); ep93xx_spi_cs_control(msg->spi, true); } } if (t->speed_hz || t->bits_per_word) ep93xx_spi_chip_setup(espi, chip); } /* * ep93xx_spi_process_message() - process one SPI message * @espi: ep93xx SPI controller struct * @msg: message to process * * This function processes a single SPI message. We go through all transfers in * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is * asserted during the whole message (unless per transfer cs_change is set). * * @msg->status contains %0 in case of success or negative error code in case of * failure. */ static void ep93xx_spi_process_message(struct ep93xx_spi *espi, struct spi_message *msg) { unsigned long timeout; struct spi_transfer *t; int err; /* * Enable the SPI controller and its clock. */ err = ep93xx_spi_enable(espi); if (err) { dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); msg->status = err; return; } /* * Just to be sure: flush any data from RX FIFO. */ timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { if (time_after(jiffies, timeout)) { dev_warn(&espi->pdev->dev, "timeout while flushing RX FIFO\n"); msg->status = -ETIMEDOUT; return; } ep93xx_spi_read_u16(espi, SSPDR); } /* * We explicitly handle FIFO level. This way we don't have to check TX * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. */ espi->fifo_level = 0; /* * Update SPI controller registers according to spi device and assert * the chipselect. */ ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); ep93xx_spi_cs_control(msg->spi, true); list_for_each_entry(t, &msg->transfers, transfer_list) { ep93xx_spi_process_transfer(espi, msg, t); if (msg->status) break; } /* * Now the whole message is transferred (or failed for some reason). We * deselect the device and disable the SPI controller. */ ep93xx_spi_cs_control(msg->spi, false); ep93xx_spi_disable(espi); } #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) /** * ep93xx_spi_work() - EP93xx SPI workqueue worker function * @work: work struct * * Workqueue worker function. This function is called when there are new * SPI messages to be processed. Message is taken out from the queue and then * passed to ep93xx_spi_process_message(). * * After message is transferred, protocol driver is notified by calling * @msg->complete(). In case of error, @msg->status is set to negative error * number, otherwise it contains zero (and @msg->actual_length is updated). */ static void ep93xx_spi_work(struct work_struct *work) { struct ep93xx_spi *espi = work_to_espi(work); struct spi_message *msg; spin_lock_irq(&espi->lock); if (!espi->running || espi->current_msg || list_empty(&espi->msg_queue)) { spin_unlock_irq(&espi->lock); return; } msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); list_del_init(&msg->queue); espi->current_msg = msg; spin_unlock_irq(&espi->lock); ep93xx_spi_process_message(espi, msg); /* * Update the current message and re-schedule ourselves if there are * more messages in the queue. */ spin_lock_irq(&espi->lock); espi->current_msg = NULL; if (espi->running && !list_empty(&espi->msg_queue)) queue_work(espi->wq, &espi->msg_work); spin_unlock_irq(&espi->lock); /* notify the protocol driver that we are done with this message */ msg->complete(msg->context); } static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) { struct ep93xx_spi *espi = dev_id; u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); /* * If we got ROR (receive overrun) interrupt we know that something is * wrong. Just abort the message. */ if (unlikely(irq_status & SSPIIR_RORIS)) { /* clear the overrun interrupt */ ep93xx_spi_write_u8(espi, SSPICR, 0); dev_warn(&espi->pdev->dev, "receive overrun, aborting the message\n"); espi->current_msg->status = -EIO; } else { /* * Interrupt is either RX (RIS) or TX (TIS). For both cases we * simply execute next data transfer. */ if (ep93xx_spi_read_write(espi)) { /* * In normal case, there still is some processing left * for current transfer. Let's wait for the next * interrupt then. */ return IRQ_HANDLED; } } /* * Current transfer is finished, either with error or with success. In * any case we disable interrupts and notify the worker to handle * any post-processing of the message. */ ep93xx_spi_disable_interrupts(espi); complete(&espi->wait); return IRQ_HANDLED; } static int __init ep93xx_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct ep93xx_spi_info *info; struct ep93xx_spi *espi; struct resource *res; int error; info = pdev->dev.platform_data; master = spi_alloc_master(&pdev->dev, sizeof(*espi)); if (!master) { dev_err(&pdev->dev, "failed to allocate spi master\n"); return -ENOMEM; } master->setup = ep93xx_spi_setup; master->transfer = ep93xx_spi_transfer; master->cleanup = ep93xx_spi_cleanup; master->bus_num = pdev->id; master->num_chipselect = info->num_chipselect; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; platform_set_drvdata(pdev, master); espi = spi_master_get_devdata(master); espi->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(espi->clk)) { dev_err(&pdev->dev, "unable to get spi clock\n"); error = PTR_ERR(espi->clk); goto fail_release_master; } spin_lock_init(&espi->lock); init_completion(&espi->wait); /* * Calculate maximum and minimum supported clock rates * for the controller. */ espi->max_rate = clk_get_rate(espi->clk) / 2; espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); espi->pdev = pdev; espi->irq = platform_get_irq(pdev, 0); if (espi->irq < 0) { error = -EBUSY; dev_err(&pdev->dev, "failed to get irq resources\n"); goto fail_put_clock; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "unable to get iomem resource\n"); error = -ENODEV; goto fail_put_clock; } res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { dev_err(&pdev->dev, "unable to request iomem resources\n"); error = -EBUSY; goto fail_put_clock; } espi->regs_base = ioremap(res->start, resource_size(res)); if (!espi->regs_base) { dev_err(&pdev->dev, "failed to map resources\n"); error = -ENODEV; goto fail_free_mem; } error = request_irq(espi->irq, ep93xx_spi_interrupt, 0, "ep93xx-spi", espi); if (error) { dev_err(&pdev->dev, "failed to request irq\n"); goto fail_unmap_regs; } espi->wq = create_singlethread_workqueue("ep93xx_spid"); if (!espi->wq) { dev_err(&pdev->dev, "unable to create workqueue\n"); goto fail_free_irq; } INIT_WORK(&espi->msg_work, ep93xx_spi_work); INIT_LIST_HEAD(&espi->msg_queue); espi->running = true; /* make sure that the hardware is disabled */ ep93xx_spi_write_u8(espi, SSPCR1, 0); error = spi_register_master(master); if (error) { dev_err(&pdev->dev, "failed to register SPI master\n"); goto fail_free_queue; } dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", (unsigned long)res->start, espi->irq); return 0; fail_free_queue: destroy_workqueue(espi->wq); fail_free_irq: free_irq(espi->irq, espi); fail_unmap_regs: iounmap(espi->regs_base); fail_free_mem: release_mem_region(res->start, resource_size(res)); fail_put_clock: clk_put(espi->clk); fail_release_master: spi_master_put(master); platform_set_drvdata(pdev, NULL); return error; } static int __exit ep93xx_spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct ep93xx_spi *espi = spi_master_get_devdata(master); struct resource *res; spin_lock_irq(&espi->lock); espi->running = false; spin_unlock_irq(&espi->lock); destroy_workqueue(espi->wq); /* * Complete remaining messages with %-ESHUTDOWN status. */ spin_lock_irq(&espi->lock); while (!list_empty(&espi->msg_queue)) { struct spi_message *msg; msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); list_del_init(&msg->queue); msg->status = -ESHUTDOWN; spin_unlock_irq(&espi->lock); msg->complete(msg->context); spin_lock_irq(&espi->lock); } spin_unlock_irq(&espi->lock); free_irq(espi->irq, espi); iounmap(espi->regs_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); clk_put(espi->clk); platform_set_drvdata(pdev, NULL); spi_unregister_master(master); return 0; } static struct platform_driver ep93xx_spi_driver = { .driver = { .name = "ep93xx-spi", .owner = THIS_MODULE, }, .remove = __exit_p(ep93xx_spi_remove), }; static int __init ep93xx_spi_init(void) { return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe); } module_init(ep93xx_spi_init); static void __exit ep93xx_spi_exit(void) { platform_driver_unregister(&ep93xx_spi_driver); } module_exit(ep93xx_spi_exit); MODULE_DESCRIPTION("EP93xx SPI Controller driver"); MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ep93xx-spi");
gpl-2.0
dkhoi1997/android_kernel_samsung_aries
drivers/usb/host/ohci-ep93xx.c
4592
5037
/* * OHCI HCD (Host Controller Driver) for USB. * * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> * (C) Copyright 2002 Hewlett-Packard Company * * Bus Glue for ep93xx. * * Written by Christopher Hoover <ch@hpl.hp.com> * Based on fragments of previous driver by Russell King et al. * * Modified for LH7A404 from ohci-sa1111.c * by Durgesh Pattamatta <pattamattad@sharpsec.com> * * Modified for pxa27x from ohci-lh7a404.c * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004 * * Modified for ep93xx from ohci-pxa27x.c * by Lennert Buytenhek <buytenh@wantstofly.org> 28-2-2006 * Based on an earlier driver by Ray Lehtiniemi * * This file is licenced under the GPL. */ #include <linux/clk.h> #include <linux/device.h> #include <linux/signal.h> #include <linux/platform_device.h> static struct clk *usb_host_clock; static void ep93xx_start_hc(struct device *dev) { clk_enable(usb_host_clock); } static void ep93xx_stop_hc(struct device *dev) { clk_disable(usb_host_clock); } static int usb_hcd_ep93xx_probe(const struct hc_driver *driver, struct platform_device *pdev) { int retval; struct usb_hcd *hcd; if (pdev->resource[1].flags != IORESOURCE_IRQ) { dbg("resource[1] is not IORESOURCE_IRQ"); return -ENOMEM; } hcd = usb_create_hcd(driver, &pdev->dev, "ep93xx"); if (hcd == NULL) return -ENOMEM; hcd->rsrc_start = pdev->resource[0].start; hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { usb_put_hcd(hcd); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dbg("ioremap failed"); retval = -ENOMEM; goto err2; } usb_host_clock = clk_get(&pdev->dev, NULL); if (IS_ERR(usb_host_clock)) { dbg("clk_get failed"); retval = PTR_ERR(usb_host_clock); goto err3; } ep93xx_start_hc(&pdev->dev); ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED); if (retval == 0) return retval; ep93xx_stop_hc(&pdev->dev); err3: iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); return retval; } static void usb_hcd_ep93xx_remove(struct usb_hcd *hcd, struct platform_device *pdev) { usb_remove_hcd(hcd); ep93xx_stop_hc(&pdev->dev); clk_put(usb_host_clock); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); } static int __devinit ohci_ep93xx_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); int ret; if ((ret = ohci_init(ohci)) < 0) return ret; if ((ret = ohci_run(ohci)) < 0) { err("can't start %s", hcd->self.bus_name); ohci_stop(hcd); return ret; } return 0; } static struct hc_driver ohci_ep93xx_hc_driver = { .description = hcd_name, .product_desc = "EP93xx OHCI", .hcd_priv_size = sizeof(struct ohci_hcd), .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, .start = ohci_ep93xx_start, .stop = ohci_stop, .shutdown = ohci_shutdown, .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, .get_frame_number = ohci_get_frame, .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; extern int usb_disabled(void); static int ohci_hcd_ep93xx_drv_probe(struct platform_device *pdev) { int ret; ret = -ENODEV; if (!usb_disabled()) ret = usb_hcd_ep93xx_probe(&ohci_ep93xx_hc_driver, pdev); return ret; } static int ohci_hcd_ep93xx_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_hcd_ep93xx_remove(hcd, pdev); return 0; } #ifdef CONFIG_PM static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_t state) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; ep93xx_stop_hc(&pdev->dev); hcd->state = HC_STATE_SUSPENDED; return 0; } static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; ep93xx_start_hc(&pdev->dev); ohci_finish_controller_resume(hcd); return 0; } #endif static struct platform_driver ohci_hcd_ep93xx_driver = { .probe = ohci_hcd_ep93xx_drv_probe, .remove = ohci_hcd_ep93xx_drv_remove, .shutdown = usb_hcd_platform_shutdown, #ifdef CONFIG_PM .suspend = ohci_hcd_ep93xx_drv_suspend, .resume = ohci_hcd_ep93xx_drv_resume, #endif .driver = { .name = "ep93xx-ohci", .owner = THIS_MODULE, }, }; MODULE_ALIAS("platform:ep93xx-ohci");
gpl-2.0
LCameron/linux-xlnx
net/wireless/wext-proc.c
4592
4064
/* * This file implement the Wireless Extensions proc API. * * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * * (As all part of the Linux kernel, this file is GPL) */ /* * The /proc/net/wireless file is a human readable user-space interface * exporting various wireless specific statistics from the wireless devices. * This is the most popular part of the Wireless Extensions ;-) * * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). * The content of the file is basically the content of "struct iw_statistics". */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/wireless.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/iw_handler.h> #include <net/wext.h> static void wireless_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { /* Get stats from the driver */ struct iw_statistics *stats = get_wireless_stats(dev); static struct iw_statistics nullstats = {}; /* show device if it's wireless regardless of current stats */ if (!stats) { #ifdef CONFIG_WIRELESS_EXT if (dev->wireless_handlers) stats = &nullstats; #endif #ifdef CONFIG_CFG80211 if (dev->ieee80211_ptr) stats = &nullstats; #endif } if (stats) { seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " "%6d %6d %6d\n", dev->name, stats->status, stats->qual.qual, stats->qual.updated & IW_QUAL_QUAL_UPDATED ? '.' : ' ', ((__s32) stats->qual.level) - ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), stats->qual.updated & IW_QUAL_LEVEL_UPDATED ? '.' : ' ', ((__s32) stats->qual.noise) - ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), stats->qual.updated & IW_QUAL_NOISE_UPDATED ? '.' : ' ', stats->discard.nwid, stats->discard.code, stats->discard.fragment, stats->discard.retries, stats->discard.misc, stats->miss.beacon); if (stats != &nullstats) stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; } } /* ---------------------------------------------------------------- */ /* * Print info for /proc/net/wireless (print all entries) */ static int wireless_dev_seq_show(struct seq_file *seq, void *v) { might_sleep(); if (v == SEQ_START_TOKEN) seq_printf(seq, "Inter-| sta-| Quality | Discarded " "packets | Missed | WE\n" " face | tus | link level noise | nwid " "crypt frag retry misc | beacon | %d\n", WIRELESS_EXT); else wireless_seq_printf_stats(seq, v); return 0; } static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); loff_t off; struct net_device *dev; rtnl_lock(); if (!*pos) return SEQ_START_TOKEN; off = 1; for_each_netdev(net, dev) if (off++ == *pos) return dev; return NULL; } static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); ++*pos; return v == SEQ_START_TOKEN ? first_net_device(net) : next_net_device(v); } static void wireless_dev_seq_stop(struct seq_file *seq, void *v) { rtnl_unlock(); } static const struct seq_operations wireless_seq_ops = { .start = wireless_dev_seq_start, .next = wireless_dev_seq_next, .stop = wireless_dev_seq_stop, .show = wireless_dev_seq_show, }; static int seq_open_wireless(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &wireless_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations wireless_seq_fops = { .owner = THIS_MODULE, .open = seq_open_wireless, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; int __net_init wext_proc_init(struct net *net) { /* Create /proc/net/wireless entry */ if (!proc_create("wireless", S_IRUGO, net->proc_net, &wireless_seq_fops)) return -ENOMEM; return 0; } void __net_exit wext_proc_exit(struct net *net) { remove_proc_entry("wireless", net->proc_net); }
gpl-2.0
rcoscali/nethunter-kernel-samsung-tuna
drivers/misc/apanic.c
4592
14545
/* drivers/misc/apanic.c * * Copyright (C) 2009 Google, Inc. * Author: San Mehat <san@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/wakelock.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/mtd/mtd.h> #include <linux/notifier.h> #include <linux/mtd/mtd.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/preempt.h> extern void ram_console_enable_console(int); struct panic_header { u32 magic; #define PANIC_MAGIC 0xdeadf00d u32 version; #define PHDR_VERSION 0x01 u32 console_offset; u32 console_length; u32 threads_offset; u32 threads_length; }; struct apanic_data { struct mtd_info *mtd; struct panic_header curr; void *bounce; struct proc_dir_entry *apanic_console; struct proc_dir_entry *apanic_threads; }; static struct apanic_data drv_ctx; static struct work_struct proc_removal_work; static DEFINE_MUTEX(drv_mutex); static unsigned int *apanic_bbt; static unsigned int apanic_erase_blocks; static unsigned int apanic_good_blocks; static void set_bb(unsigned int block, unsigned int *bbt) { unsigned int flag = 1; BUG_ON(block >= apanic_erase_blocks); flag = flag << (block%32); apanic_bbt[block/32] |= flag; apanic_good_blocks--; } static unsigned int get_bb(unsigned int block, unsigned int *bbt) { unsigned int flag; BUG_ON(block >= apanic_erase_blocks); flag = 1 << (block%32); return apanic_bbt[block/32] & flag; } static void alloc_bbt(struct mtd_info *mtd, unsigned int *bbt) { int bbt_size; apanic_erase_blocks = (mtd->size)>>(mtd->erasesize_shift); bbt_size = (apanic_erase_blocks+32)/32; apanic_bbt = kmalloc(bbt_size*4, GFP_KERNEL); memset(apanic_bbt, 0, bbt_size*4); apanic_good_blocks = apanic_erase_blocks; } static void scan_bbt(struct mtd_info *mtd, unsigned int *bbt) { int i; for (i = 0; i < apanic_erase_blocks; i++) { if (mtd->block_isbad(mtd, i*mtd->erasesize)) set_bb(i, apanic_bbt); } } #define APANIC_INVALID_OFFSET 0xFFFFFFFF static unsigned int phy_offset(struct mtd_info *mtd, unsigned int offset) { unsigned int logic_block = offset>>(mtd->erasesize_shift); unsigned int phy_block; unsigned good_block = 0; for (phy_block = 0; phy_block < apanic_erase_blocks; phy_block++) { if (!get_bb(phy_block, apanic_bbt)) good_block++; if (good_block == (logic_block + 1)) break; } if (good_block != (logic_block + 1)) return APANIC_INVALID_OFFSET; return offset + ((phy_block-logic_block)<<mtd->erasesize_shift); } static void apanic_erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv; wake_up(wait_q); } static int apanic_proc_read(char *buffer, char **start, off_t offset, int count, int *peof, void *dat) { struct apanic_data *ctx = &drv_ctx; size_t file_length; off_t file_offset; unsigned int page_no; off_t page_offset; int rc; size_t len; if (!count) return 0; mutex_lock(&drv_mutex); switch ((int) dat) { case 1: /* apanic_console */ file_length = ctx->curr.console_length; file_offset = ctx->curr.console_offset; break; case 2: /* apanic_threads */ file_length = ctx->curr.threads_length; file_offset = ctx->curr.threads_offset; break; default: pr_err("Bad dat (%d)\n", (int) dat); mutex_unlock(&drv_mutex); return -EINVAL; } if ((offset + count) > file_length) { mutex_unlock(&drv_mutex); return 0; } /* We only support reading a maximum of a flash page */ if (count > ctx->mtd->writesize) count = ctx->mtd->writesize; page_no = (file_offset + offset) / ctx->mtd->writesize; page_offset = (file_offset + offset) % ctx->mtd->writesize; if (phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)) == APANIC_INVALID_OFFSET) { pr_err("apanic: reading an invalid address\n"); mutex_unlock(&drv_mutex); return -EINVAL; } rc = ctx->mtd->read(ctx->mtd, phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)), ctx->mtd->writesize, &len, ctx->bounce); if (page_offset) count -= page_offset; memcpy(buffer, ctx->bounce + page_offset, count); *start = count; if ((offset + count) == file_length) *peof = 1; mutex_unlock(&drv_mutex); return count; } static void mtd_panic_erase(void) { struct apanic_data *ctx = &drv_ctx; struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; int rc, i; init_waitqueue_head(&wait_q); erase.mtd = ctx->mtd; erase.callback = apanic_erase_callback; erase.len = ctx->mtd->erasesize; erase.priv = (u_long)&wait_q; for (i = 0; i < ctx->mtd->size; i += ctx->mtd->erasesize) { erase.addr = i; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); if (get_bb(erase.addr>>ctx->mtd->erasesize_shift, apanic_bbt)) { printk(KERN_WARNING "apanic: Skipping erase of bad " "block @%llx\n", erase.addr); set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); continue; } rc = ctx->mtd->erase(ctx->mtd, &erase); if (rc) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk(KERN_ERR "apanic: Erase of 0x%llx, 0x%llx failed\n", (unsigned long long) erase.addr, (unsigned long long) erase.len); if (rc == -EIO) { if (ctx->mtd->block_markbad(ctx->mtd, erase.addr)) { printk(KERN_ERR "apanic: Err marking blk bad\n"); goto out; } printk(KERN_INFO "apanic: Marked a bad block" " @%llx\n", erase.addr); set_bb(erase.addr>>ctx->mtd->erasesize_shift, apanic_bbt); continue; } goto out; } schedule(); remove_wait_queue(&wait_q, &wait); } printk(KERN_DEBUG "apanic: %s partition erased\n", CONFIG_APANIC_PLABEL); out: return; } static void apanic_remove_proc_work(struct work_struct *work) { struct apanic_data *ctx = &drv_ctx; mutex_lock(&drv_mutex); mtd_panic_erase(); memset(&ctx->curr, 0, sizeof(struct panic_header)); if (ctx->apanic_console) { remove_proc_entry("apanic_console", NULL); ctx->apanic_console = NULL; } if (ctx->apanic_threads) { remove_proc_entry("apanic_threads", NULL); ctx->apanic_threads = NULL; } mutex_unlock(&drv_mutex); } static int apanic_proc_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { schedule_work(&proc_removal_work); return count; } static void mtd_panic_notify_add(struct mtd_info *mtd) { struct apanic_data *ctx = &drv_ctx; struct panic_header *hdr = ctx->bounce; size_t len; int rc; int proc_entry_created = 0; if (strcmp(mtd->name, CONFIG_APANIC_PLABEL)) return; ctx->mtd = mtd; alloc_bbt(mtd, apanic_bbt); scan_bbt(mtd, apanic_bbt); if (apanic_good_blocks == 0) { printk(KERN_ERR "apanic: no any good blocks?!\n"); goto out_err; } rc = mtd->read(mtd, phy_offset(mtd, 0), mtd->writesize, &len, ctx->bounce); if (rc && rc == -EBADMSG) { printk(KERN_WARNING "apanic: Bad ECC on block 0 (ignored)\n"); } else if (rc && rc != -EUCLEAN) { printk(KERN_ERR "apanic: Error reading block 0 (%d)\n", rc); goto out_err; } if (len != mtd->writesize) { printk(KERN_ERR "apanic: Bad read size (%d)\n", rc); goto out_err; } printk(KERN_INFO "apanic: Bound to mtd partition '%s'\n", mtd->name); if (hdr->magic != PANIC_MAGIC) { printk(KERN_INFO "apanic: No panic data available\n"); mtd_panic_erase(); return; } if (hdr->version != PHDR_VERSION) { printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n", hdr->version, PHDR_VERSION); mtd_panic_erase(); return; } memcpy(&ctx->curr, hdr, sizeof(struct panic_header)); printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u)\n", hdr->console_offset, hdr->console_length, hdr->threads_offset, hdr->threads_length); if (hdr->console_length) { ctx->apanic_console = create_proc_entry("apanic_console", S_IFREG | S_IRUGO, NULL); if (!ctx->apanic_console) printk(KERN_ERR "%s: failed creating procfile\n", __func__); else { ctx->apanic_console->read_proc = apanic_proc_read; ctx->apanic_console->write_proc = apanic_proc_write; ctx->apanic_console->size = hdr->console_length; ctx->apanic_console->data = (void *) 1; proc_entry_created = 1; } } if (hdr->threads_length) { ctx->apanic_threads = create_proc_entry("apanic_threads", S_IFREG | S_IRUGO, NULL); if (!ctx->apanic_threads) printk(KERN_ERR "%s: failed creating procfile\n", __func__); else { ctx->apanic_threads->read_proc = apanic_proc_read; ctx->apanic_threads->write_proc = apanic_proc_write; ctx->apanic_threads->size = hdr->threads_length; ctx->apanic_threads->data = (void *) 2; proc_entry_created = 1; } } if (!proc_entry_created) mtd_panic_erase(); return; out_err: ctx->mtd = NULL; } static void mtd_panic_notify_remove(struct mtd_info *mtd) { struct apanic_data *ctx = &drv_ctx; if (mtd == ctx->mtd) { ctx->mtd = NULL; printk(KERN_INFO "apanic: Unbound from %s\n", mtd->name); } } static struct mtd_notifier mtd_panic_notifier = { .add = mtd_panic_notify_add, .remove = mtd_panic_notify_remove, }; static int in_panic = 0; static int apanic_writeflashpage(struct mtd_info *mtd, loff_t to, const u_char *buf) { int rc; size_t wlen; int panic = in_interrupt() | in_atomic(); if (panic && !mtd->panic_write) { printk(KERN_EMERG "%s: No panic_write available\n", __func__); return 0; } else if (!panic && !mtd->write) { printk(KERN_EMERG "%s: No write available\n", __func__); return 0; } to = phy_offset(mtd, to); if (to == APANIC_INVALID_OFFSET) { printk(KERN_EMERG "apanic: write to invalid address\n"); return 0; } if (panic) rc = mtd->panic_write(mtd, to, mtd->writesize, &wlen, buf); else rc = mtd->write(mtd, to, mtd->writesize, &wlen, buf); if (rc) { printk(KERN_EMERG "%s: Error writing data to flash (%d)\n", __func__, rc); return rc; } return wlen; } extern int log_buf_copy(char *dest, int idx, int len); extern void log_buf_clear(void); /* * Writes the contents of the console to the specified offset in flash. * Returns number of bytes written */ static int apanic_write_console(struct mtd_info *mtd, unsigned int off) { struct apanic_data *ctx = &drv_ctx; int saved_oip; int idx = 0; int rc, rc2; unsigned int last_chunk = 0; while (!last_chunk) { saved_oip = oops_in_progress; oops_in_progress = 1; rc = log_buf_copy(ctx->bounce, idx, mtd->writesize); if (rc < 0) break; if (rc != mtd->writesize) last_chunk = rc; oops_in_progress = saved_oip; if (rc <= 0) break; if (rc != mtd->writesize) memset(ctx->bounce + rc, 0, mtd->writesize - rc); rc2 = apanic_writeflashpage(mtd, off, ctx->bounce); if (rc2 <= 0) { printk(KERN_EMERG "apanic: Flash write failed (%d)\n", rc2); return idx; } if (!last_chunk) idx += rc2; else idx += last_chunk; off += rc2; } return idx; } static int apanic(struct notifier_block *this, unsigned long event, void *ptr) { struct apanic_data *ctx = &drv_ctx; struct panic_header *hdr = (struct panic_header *) ctx->bounce; int console_offset = 0; int console_len = 0; int threads_offset = 0; int threads_len = 0; int rc; if (in_panic) return NOTIFY_DONE; in_panic = 1; #ifdef CONFIG_PREEMPT /* Ensure that cond_resched() won't try to preempt anybody */ add_preempt_count(PREEMPT_ACTIVE); #endif touch_softlockup_watchdog(); if (!ctx->mtd) goto out; if (ctx->curr.magic) { printk(KERN_EMERG "Crash partition in use!\n"); goto out; } console_offset = ctx->mtd->writesize; /* * Write out the console */ console_len = apanic_write_console(ctx->mtd, console_offset); if (console_len < 0) { printk(KERN_EMERG "Error writing console to panic log! (%d)\n", console_len); console_len = 0; } /* * Write out all threads */ threads_offset = ALIGN(console_offset + console_len, ctx->mtd->writesize); if (!threads_offset) threads_offset = ctx->mtd->writesize; ram_console_enable_console(0); log_buf_clear(); show_state_filter(0); threads_len = apanic_write_console(ctx->mtd, threads_offset); if (threads_len < 0) { printk(KERN_EMERG "Error writing threads to panic log! (%d)\n", threads_len); threads_len = 0; } /* * Finally write the panic header */ memset(ctx->bounce, 0, PAGE_SIZE); hdr->magic = PANIC_MAGIC; hdr->version = PHDR_VERSION; hdr->console_offset = console_offset; hdr->console_length = console_len; hdr->threads_offset = threads_offset; hdr->threads_length = threads_len; rc = apanic_writeflashpage(ctx->mtd, 0, ctx->bounce); if (rc <= 0) { printk(KERN_EMERG "apanic: Header write failed (%d)\n", rc); goto out; } printk(KERN_EMERG "apanic: Panic dump sucessfully written to flash\n"); out: #ifdef CONFIG_PREEMPT sub_preempt_count(PREEMPT_ACTIVE); #endif in_panic = 0; return NOTIFY_DONE; } static struct notifier_block panic_blk = { .notifier_call = apanic, }; static int panic_dbg_get(void *data, u64 *val) { apanic(NULL, 0, NULL); return 0; } static int panic_dbg_set(void *data, u64 val) { BUG(); return -1; } DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n"); int __init apanic_init(void) { register_mtd_user(&mtd_panic_notifier); atomic_notifier_chain_register(&panic_notifier_list, &panic_blk); debugfs_create_file("apanic", 0644, NULL, NULL, &panic_dbg_fops); memset(&drv_ctx, 0, sizeof(drv_ctx)); drv_ctx.bounce = (void *) __get_free_page(GFP_KERNEL); INIT_WORK(&proc_removal_work, apanic_remove_proc_work); printk(KERN_INFO "Android kernel panic handler initialized (bind=%s)\n", CONFIG_APANIC_PLABEL); return 0; } module_init(apanic_init);
gpl-2.0
CyanideL/android_kernel_samsung_hlte
arch/um/os-Linux/registers.c
5360
1158
/* * Copyright (C) 2004 PathScale, Inc * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <errno.h> #include <string.h> #include <sys/ptrace.h> #include "sysdep/ptrace.h" #include "sysdep/ptrace_user.h" #include "registers.h" int save_registers(int pid, struct uml_pt_regs *regs) { int err; err = ptrace(PTRACE_GETREGS, pid, 0, regs->gp); if (err < 0) return -errno; return 0; } int restore_registers(int pid, struct uml_pt_regs *regs) { int err; err = ptrace(PTRACE_SETREGS, pid, 0, regs->gp); if (err < 0) return -errno; return 0; } /* This is set once at boot time and not changed thereafter */ static unsigned long exec_regs[MAX_REG_NR]; static unsigned long exec_fp_regs[FP_SIZE]; int init_registers(int pid) { int err; err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs); if (err < 0) return -errno; arch_init_registers(pid); get_fp_registers(pid, exec_fp_regs); return 0; } void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) { memcpy(regs, exec_regs, sizeof(exec_regs)); if (fp_regs) memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs)); }
gpl-2.0
CyanogenMod/android_kernel_asus_tf701t
drivers/mfd/davinci_voicecodec.c
7920
4979
/* * DaVinci Voice Codec Core Interface for TI platforms * * Copyright (C) 2010 Texas Instruments, Inc * * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/clk.h> #include <sound/pcm.h> #include <linux/mfd/davinci_voicecodec.h> u32 davinci_vc_read(struct davinci_vc *davinci_vc, int reg) { return __raw_readl(davinci_vc->base + reg); } void davinci_vc_write(struct davinci_vc *davinci_vc, int reg, u32 val) { __raw_writel(val, davinci_vc->base + reg); } static int __init davinci_vc_probe(struct platform_device *pdev) { struct davinci_vc *davinci_vc; struct resource *res, *mem; struct mfd_cell *cell = NULL; int ret; davinci_vc = kzalloc(sizeof(struct davinci_vc), GFP_KERNEL); if (!davinci_vc) { dev_dbg(&pdev->dev, "could not allocate memory for private data\n"); return -ENOMEM; } davinci_vc->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(davinci_vc->clk)) { dev_dbg(&pdev->dev, "could not get the clock for voice codec\n"); ret = -ENODEV; goto fail1; } clk_enable(davinci_vc->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mem resource\n"); ret = -ENODEV; goto fail2; } davinci_vc->pbase = res->start; davinci_vc->base_size = resource_size(res); mem = request_mem_region(davinci_vc->pbase, davinci_vc->base_size, pdev->name); if (!mem) { dev_err(&pdev->dev, "VCIF region already claimed\n"); ret = -EBUSY; goto fail2; } davinci_vc->base = ioremap(davinci_vc->pbase, davinci_vc->base_size); if (!davinci_vc->base) { dev_err(&pdev->dev, "can't ioremap mem resource.\n"); ret = -ENOMEM; goto fail3; } res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto fail4; } davinci_vc->davinci_vcif.dma_tx_channel = res->start; davinci_vc->davinci_vcif.dma_tx_addr = (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_WFIFO); res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto fail4; } davinci_vc->davinci_vcif.dma_rx_channel = res->start; davinci_vc->davinci_vcif.dma_rx_addr = (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_RFIFO); davinci_vc->dev = &pdev->dev; davinci_vc->pdev = pdev; /* Voice codec interface client */ cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; cell->name = "davinci-vcif"; cell->platform_data = davinci_vc; cell->pdata_size = sizeof(*davinci_vc); /* Voice codec CQ93VC client */ cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; cell->name = "cq93vc-codec"; cell->platform_data = davinci_vc; cell->pdata_size = sizeof(*davinci_vc); ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, DAVINCI_VC_CELLS, NULL, 0); if (ret != 0) { dev_err(&pdev->dev, "fail to register client devices\n"); goto fail4; } return 0; fail4: iounmap(davinci_vc->base); fail3: release_mem_region(davinci_vc->pbase, davinci_vc->base_size); fail2: clk_disable(davinci_vc->clk); clk_put(davinci_vc->clk); davinci_vc->clk = NULL; fail1: kfree(davinci_vc); return ret; } static int __devexit davinci_vc_remove(struct platform_device *pdev) { struct davinci_vc *davinci_vc = platform_get_drvdata(pdev); mfd_remove_devices(&pdev->dev); iounmap(davinci_vc->base); release_mem_region(davinci_vc->pbase, davinci_vc->base_size); clk_disable(davinci_vc->clk); clk_put(davinci_vc->clk); davinci_vc->clk = NULL; kfree(davinci_vc); return 0; } static struct platform_driver davinci_vc_driver = { .driver = { .name = "davinci_voicecodec", .owner = THIS_MODULE, }, .remove = __devexit_p(davinci_vc_remove), }; static int __init davinci_vc_init(void) { return platform_driver_probe(&davinci_vc_driver, davinci_vc_probe); } module_init(davinci_vc_init); static void __exit davinci_vc_exit(void) { platform_driver_unregister(&davinci_vc_driver); } module_exit(davinci_vc_exit); MODULE_AUTHOR("Miguel Aguilar"); MODULE_DESCRIPTION("Texas Instruments DaVinci Voice Codec Core Interface"); MODULE_LICENSE("GPL");
gpl-2.0
linino/linux
drivers/atm/adummy.c
12784
4120
/* * adummy.c: a dummy ATM driver */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/sonet.h> /* version definition */ #define DRV_VERSION "1.0" #define DEV_LABEL "adummy" #define ADUMMY_DEV(dev) ((struct adummy_dev *) (dev)->dev_data) struct adummy_dev { struct atm_dev *atm_dev; struct list_head entry; }; /* globals */ static LIST_HEAD(adummy_devs); static ssize_t __set_signal(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct atm_dev *atm_dev = container_of(dev, struct atm_dev, class_dev); int signal; if (sscanf(buf, "%d", &signal) == 1) { if (signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND) signal = ATM_PHY_SIG_UNKNOWN; atm_dev_signal_change(atm_dev, signal); return 1; } return -EINVAL; } static ssize_t __show_signal(struct device *dev, struct device_attribute *attr, char *buf) { struct atm_dev *atm_dev = container_of(dev, struct atm_dev, class_dev); return sprintf(buf, "%d\n", atm_dev->signal); } static DEVICE_ATTR(signal, 0644, __show_signal, __set_signal); static struct attribute *adummy_attrs[] = { &dev_attr_signal.attr, NULL }; static struct attribute_group adummy_group_attrs = { .name = NULL, /* We want them in dev's root folder */ .attrs = adummy_attrs }; static int __init adummy_start(struct atm_dev *dev) { dev->ci_range.vpi_bits = 4; dev->ci_range.vci_bits = 12; return 0; } static int adummy_open(struct atm_vcc *vcc) { short vpi = vcc->vpi; int vci = vcc->vci; if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC) return 0; set_bit(ATM_VF_ADDR, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); return 0; } static void adummy_close(struct atm_vcc *vcc) { clear_bit(ATM_VF_READY, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); } static int adummy_send(struct atm_vcc *vcc, struct sk_buff *skb) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->tx); return 0; } static int adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page) { int left = *pos; if (!left--) return sprintf(page, "version %s\n", DRV_VERSION); return 0; } static struct atmdev_ops adummy_ops = { .open = adummy_open, .close = adummy_close, .send = adummy_send, .proc_read = adummy_proc_read, .owner = THIS_MODULE }; static int __init adummy_init(void) { struct atm_dev *atm_dev; struct adummy_dev *adummy_dev; int err = 0; printk(KERN_ERR "adummy: version %s\n", DRV_VERSION); adummy_dev = kzalloc(sizeof(struct adummy_dev), GFP_KERNEL); if (!adummy_dev) { printk(KERN_ERR DEV_LABEL ": kzalloc() failed\n"); err = -ENOMEM; goto out; } atm_dev = atm_dev_register(DEV_LABEL, NULL, &adummy_ops, -1, NULL); if (!atm_dev) { printk(KERN_ERR DEV_LABEL ": atm_dev_register() failed\n"); err = -ENODEV; goto out_kfree; } adummy_dev->atm_dev = atm_dev; atm_dev->dev_data = adummy_dev; if (sysfs_create_group(&atm_dev->class_dev.kobj, &adummy_group_attrs)) dev_err(&atm_dev->class_dev, "Could not register attrs for adummy\n"); if (adummy_start(atm_dev)) { printk(KERN_ERR DEV_LABEL ": adummy_start() failed\n"); err = -ENODEV; goto out_unregister; } list_add(&adummy_dev->entry, &adummy_devs); out: return err; out_unregister: atm_dev_deregister(atm_dev); out_kfree: kfree(adummy_dev); goto out; } static void __exit adummy_cleanup(void) { struct adummy_dev *adummy_dev, *next; list_for_each_entry_safe(adummy_dev, next, &adummy_devs, entry) { atm_dev_deregister(adummy_dev->atm_dev); kfree(adummy_dev); } } module_init(adummy_init); module_exit(adummy_cleanup); MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>"); MODULE_DESCRIPTION("dummy ATM driver"); MODULE_LICENSE("GPL");
gpl-2.0
psyke83/kernel_huawei_u8160
arch/alpha/kernel/core_polaris.c
13808
4523
/* * linux/arch/alpha/kernel/core_polaris.c * * POLARIS chip-specific code */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_polaris.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address. This is fairly straightforward * on POLARIS, since the chip itself generates Type 0 or Type 1 * cycles automatically depending on the bus number (Bus 0 is * hardwired to Type 0, all others are Type 1. Peer bridges * are not supported). * * All types: * * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., scsi and ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, u8 *type1) { u8 bus = pbus->number; *type1 = (bus == 0) ? 0 : 1; *pci_addr = (bus << 16) | (device_fn << 8) | (where) | POLARIS_DENSE_CONFIG_BASE; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " returning address 0x%p\n" bus, device_fn, where, *pci_addr)); return 0; } static int polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops polaris_pci_ops = { .read = polaris_read_config, .write = polaris_write_config, }; void __init polaris_init_arch(void) { struct pci_controller *hose; /* May need to initialize error reporting (see PCICTL0/1), but * for now assume that the firmware has done the right thing * already. */ #if 0 printk("polaris_init_arch(): trusting firmware for setup\n"); #endif /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = 0; hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; hose->sparse_io_base = 0; hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; hose->sg_isa = hose->sg_pci = NULL; /* The I/O window is fixed at 2G @ 2G. */ __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; } static inline void polaris_pci_clr_err(void) { *(vusp)POLARIS_W_STATUS; /* Write 1's to settable bits to clear errors */ *(vusp)POLARIS_W_STATUS = 0x7800; mb(); *(vusp)POLARIS_W_STATUS; } void polaris_machine_check(unsigned long vector, unsigned long la_ptr) { /* Clear the error before any reporting. */ mb(); mb(); draina(); polaris_pci_clr_err(); wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "POLARIS", mcheck_expected(0)); }
gpl-2.0
nitrogen-os-devices/nitrogen_kernel_lge_hammerhead
arch/alpha/kernel/core_polaris.c
13808
4523
/* * linux/arch/alpha/kernel/core_polaris.c * * POLARIS chip-specific code */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_polaris.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address. This is fairly straightforward * on POLARIS, since the chip itself generates Type 0 or Type 1 * cycles automatically depending on the bus number (Bus 0 is * hardwired to Type 0, all others are Type 1. Peer bridges * are not supported). * * All types: * * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., scsi and ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, u8 *type1) { u8 bus = pbus->number; *type1 = (bus == 0) ? 0 : 1; *pci_addr = (bus << 16) | (device_fn << 8) | (where) | POLARIS_DENSE_CONFIG_BASE; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " returning address 0x%p\n" bus, device_fn, where, *pci_addr)); return 0; } static int polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops polaris_pci_ops = { .read = polaris_read_config, .write = polaris_write_config, }; void __init polaris_init_arch(void) { struct pci_controller *hose; /* May need to initialize error reporting (see PCICTL0/1), but * for now assume that the firmware has done the right thing * already. */ #if 0 printk("polaris_init_arch(): trusting firmware for setup\n"); #endif /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = 0; hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; hose->sparse_io_base = 0; hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; hose->sg_isa = hose->sg_pci = NULL; /* The I/O window is fixed at 2G @ 2G. */ __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; } static inline void polaris_pci_clr_err(void) { *(vusp)POLARIS_W_STATUS; /* Write 1's to settable bits to clear errors */ *(vusp)POLARIS_W_STATUS = 0x7800; mb(); *(vusp)POLARIS_W_STATUS; } void polaris_machine_check(unsigned long vector, unsigned long la_ptr) { /* Clear the error before any reporting. */ mb(); mb(); draina(); polaris_pci_clr_err(); wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "POLARIS", mcheck_expected(0)); }
gpl-2.0
BIBIMAINETTIDEV/imx6m300-linux-3.10.17
scripts/kconfig/confdata.c
241
25401
/* * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> * Released under the terms of the GNU GPL v2.0. */ #include <sys/stat.h> #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #include "lkc.h" static void conf_warning(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); static void conf_message(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); static const char *conf_filename; static int conf_lineno, conf_warnings, conf_unsaved; const char conf_defname[] = "arch/$ARCH/defconfig"; static void conf_warning(const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "%s:%d:warning: ", conf_filename, conf_lineno); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); conf_warnings++; } static void conf_default_message_callback(const char *fmt, va_list ap) { printf("#\n# "); vprintf(fmt, ap); printf("\n#\n"); } static void (*conf_message_callback) (const char *fmt, va_list ap) = conf_default_message_callback; void conf_set_message_callback(void (*fn) (const char *fmt, va_list ap)) { conf_message_callback = fn; } static void conf_message(const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (conf_message_callback) conf_message_callback(fmt, ap); } const char *conf_get_configname(void) { char *name = getenv("KCONFIG_CONFIG"); return name ? name : ".config"; } const char *conf_get_autoconfig_name(void) { char *name = getenv("KCONFIG_AUTOCONFIG"); return name ? name : "include/config/auto.conf"; } static char *conf_expand_value(const char *in) { struct symbol *sym; const char *src; static char res_value[SYMBOL_MAXLENGTH]; char *dst, name[SYMBOL_MAXLENGTH]; res_value[0] = 0; dst = name; while ((src = strchr(in, '$'))) { strncat(res_value, in, src - in); src++; dst = name; while (isalnum(*src) || *src == '_') *dst++ = *src++; *dst = 0; sym = sym_lookup(name, 0); sym_calc_value(sym); strcat(res_value, sym_get_string_value(sym)); in = src; } strcat(res_value, in); return res_value; } char *conf_get_default_confname(void) { struct stat buf; static char fullname[PATH_MAX+1]; char *env, *name; name = conf_expand_value(conf_defname); env = getenv(SRCTREE); if (env) { sprintf(fullname, "%s/%s", env, name); if (!stat(fullname, &buf)) return fullname; } return name; } static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p) { char *p2; switch (sym->type) { case S_TRISTATE: if (p[0] == 'm') { sym->def[def].tri = mod; sym->flags |= def_flags; break; } /* fall through */ case S_BOOLEAN: if (p[0] == 'y') { sym->def[def].tri = yes; sym->flags |= def_flags; break; } if (p[0] == 'n') { sym->def[def].tri = no; sym->flags |= def_flags; break; } conf_warning("symbol value '%s' invalid for %s", p, sym->name); return 1; case S_OTHER: if (*p != '"') { for (p2 = p; *p2 && !isspace(*p2); p2++) ; sym->type = S_STRING; goto done; } /* fall through */ case S_STRING: if (*p++ != '"') break; for (p2 = p; (p2 = strpbrk(p2, "\"\\")); p2++) { if (*p2 == '"') { *p2 = 0; break; } memmove(p2, p2 + 1, strlen(p2)); } if (!p2) { conf_warning("invalid string found"); return 1; } /* fall through */ case S_INT: case S_HEX: done: if (sym_string_valid(sym, p)) { sym->def[def].val = strdup(p); sym->flags |= def_flags; } else { conf_warning("symbol value '%s' invalid for %s", p, sym->name); return 1; } break; default: ; } return 0; } #define LINE_GROWTH 16 static int add_byte(int c, char **lineptr, size_t slen, size_t *n) { char *nline; size_t new_size = slen + 1; if (new_size > *n) { new_size += LINE_GROWTH - 1; new_size *= 2; nline = realloc(*lineptr, new_size); if (!nline) return -1; *lineptr = nline; *n = new_size; } (*lineptr)[slen] = c; return 0; } static ssize_t compat_getline(char **lineptr, size_t *n, FILE *stream) { char *line = *lineptr; size_t slen = 0; for (;;) { int c = getc(stream); switch (c) { case '\n': if (add_byte(c, &line, slen, n) < 0) goto e_out; slen++; /* fall through */ case EOF: if (add_byte('\0', &line, slen, n) < 0) goto e_out; *lineptr = line; if (slen == 0) return -1; return slen; default: if (add_byte(c, &line, slen, n) < 0) goto e_out; slen++; } } e_out: line[slen-1] = '\0'; *lineptr = line; return -1; } int conf_read_simple(const char *name, int def) { FILE *in = NULL; char *line = NULL; size_t line_asize = 0; char *p, *p2; struct symbol *sym; int i, def_flags; if (name) { in = zconf_fopen(name); } else { struct property *prop; name = conf_get_configname(); in = zconf_fopen(name); if (in) goto load; sym_add_change_count(1); if (!sym_defconfig_list) { if (modules_sym) sym_calc_value(modules_sym); return 1; } for_all_defaults(sym_defconfig_list, prop) { if (expr_calc_value(prop->visible.expr) == no || prop->expr->type != E_SYMBOL) continue; name = conf_expand_value(prop->expr->left.sym->name); in = zconf_fopen(name); if (in) { conf_message(_("using defaults found in %s"), name); goto load; } } } if (!in) return 1; load: conf_filename = name; conf_lineno = 0; conf_warnings = 0; conf_unsaved = 0; def_flags = SYMBOL_DEF << def; for_all_symbols(i, sym) { sym->flags |= SYMBOL_CHANGED; sym->flags &= ~(def_flags|SYMBOL_VALID); if (sym_is_choice(sym)) sym->flags |= def_flags; switch (sym->type) { case S_INT: case S_HEX: case S_STRING: if (sym->def[def].val) free(sym->def[def].val); /* fall through */ default: sym->def[def].val = NULL; sym->def[def].tri = no; } } while (compat_getline(&line, &line_asize, in) != -1) { conf_lineno++; sym = NULL; if (line[0] == '#') { if (memcmp(line + 2, CONFIG_, strlen(CONFIG_))) continue; p = strchr(line + 2 + strlen(CONFIG_), ' '); if (!p) continue; *p++ = 0; if (strncmp(p, "is not set", 10)) continue; if (def == S_DEF_USER) { sym = sym_find(line + 2 + strlen(CONFIG_)); if (!sym) { sym_add_change_count(1); goto setsym; } } else { sym = sym_lookup(line + 2 + strlen(CONFIG_), 0); if (sym->type == S_UNKNOWN) sym->type = S_BOOLEAN; } if (sym->flags & def_flags) { conf_warning("override: reassigning to symbol %s", sym->name); } switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: sym->def[def].tri = no; sym->flags |= def_flags; break; default: ; } } else if (memcmp(line, CONFIG_, strlen(CONFIG_)) == 0) { p = strchr(line + strlen(CONFIG_), '='); if (!p) continue; *p++ = 0; p2 = strchr(p, '\n'); if (p2) { *p2-- = 0; if (*p2 == '\r') *p2 = 0; } if (def == S_DEF_USER) { sym = sym_find(line + strlen(CONFIG_)); if (!sym) { sym_add_change_count(1); goto setsym; } } else { sym = sym_lookup(line + strlen(CONFIG_), 0); if (sym->type == S_UNKNOWN) sym->type = S_OTHER; } if (sym->flags & def_flags) { conf_warning("override: reassigning to symbol %s", sym->name); } if (conf_set_sym_val(sym, def, def_flags, p)) continue; } else { if (line[0] != '\r' && line[0] != '\n') conf_warning("unexpected data"); continue; } setsym: if (sym && sym_is_choice_value(sym)) { struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); switch (sym->def[def].tri) { case no: break; case mod: if (cs->def[def].tri == yes) { conf_warning("%s creates inconsistent choice state", sym->name); cs->flags &= ~def_flags; } break; case yes: if (cs->def[def].tri != no) conf_warning("override: %s changes choice state", sym->name); cs->def[def].val = sym; break; } cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri); } } free(line); fclose(in); if (modules_sym) sym_calc_value(modules_sym); return 0; } int conf_read(const char *name) { struct symbol *sym; int i; sym_set_change_count(0); if (conf_read_simple(name, S_DEF_USER)) return 1; for_all_symbols(i, sym) { sym_calc_value(sym); if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO)) continue; if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) { /* check that calculated value agrees with saved value */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym)) break; if (!sym_is_choice(sym)) continue; /* fall through */ default: if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val)) continue; break; } } else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE)) /* no previous value and not saved */ continue; conf_unsaved++; /* maybe print value in verbose mode... */ } for_all_symbols(i, sym) { if (sym_has_value(sym) && !sym_is_choice_value(sym)) { /* Reset values of generates values, so they'll appear * as new, if they should become visible, but that * doesn't quite work if the Kconfig and the saved * configuration disagree. */ if (sym->visible == no && !conf_unsaved) sym->flags &= ~SYMBOL_DEF_USER; switch (sym->type) { case S_STRING: case S_INT: case S_HEX: /* Reset a string value if it's out of range */ if (sym_string_within_range(sym, sym->def[S_DEF_USER].val)) break; sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER); conf_unsaved++; break; default: break; } } } sym_add_change_count(conf_warnings || conf_unsaved); return 0; } /* * Kconfig configuration printer * * This printer is used when generating the resulting configuration after * kconfig invocation and `defconfig' files. Unset symbol might be omitted by * passing a non-NULL argument to the printer. * */ static void kconfig_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (*value == 'n') { bool skip_unset = (arg != NULL); if (!skip_unset) fprintf(fp, "# %s%s is not set\n", CONFIG_, sym->name); return; } break; default: break; } fprintf(fp, "%s%s=%s\n", CONFIG_, sym->name, value); } static void kconfig_print_comment(FILE *fp, const char *value, void *arg) { const char *p = value; size_t l; for (;;) { l = strcspn(p, "\n"); fprintf(fp, "#"); if (l) { fprintf(fp, " "); xfwrite(p, l, 1, fp); p += l; } fprintf(fp, "\n"); if (*p++ == '\0') break; } } static struct conf_printer kconfig_printer_cb = { .print_symbol = kconfig_print_symbol, .print_comment = kconfig_print_comment, }; /* * Header printer * * This printer is used when generating the `include/generated/autoconf.h' file. */ static void header_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: { const char *suffix = ""; switch (*value) { case 'n': break; case 'm': suffix = "_MODULE"; /* fall through */ default: fprintf(fp, "#define %s%s%s 1\n", CONFIG_, sym->name, suffix); } break; } case S_HEX: { const char *prefix = ""; if (value[0] != '0' || (value[1] != 'x' && value[1] != 'X')) prefix = "0x"; fprintf(fp, "#define %s%s %s%s\n", CONFIG_, sym->name, prefix, value); break; } case S_STRING: case S_INT: fprintf(fp, "#define %s%s %s\n", CONFIG_, sym->name, value); break; default: break; } } static void header_print_comment(FILE *fp, const char *value, void *arg) { const char *p = value; size_t l; fprintf(fp, "/*\n"); for (;;) { l = strcspn(p, "\n"); fprintf(fp, " *"); if (l) { fprintf(fp, " "); xfwrite(p, l, 1, fp); p += l; } fprintf(fp, "\n"); if (*p++ == '\0') break; } fprintf(fp, " */\n"); } static struct conf_printer header_printer_cb = { .print_symbol = header_print_symbol, .print_comment = header_print_comment, }; /* * Tristate printer * * This printer is used when generating the `include/config/tristate.conf' file. */ static void tristate_print_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg) { if (sym->type == S_TRISTATE && *value != 'n') fprintf(fp, "%s%s=%c\n", CONFIG_, sym->name, (char)toupper(*value)); } static struct conf_printer tristate_printer_cb = { .print_symbol = tristate_print_symbol, .print_comment = kconfig_print_comment, }; static void conf_write_symbol(FILE *fp, struct symbol *sym, struct conf_printer *printer, void *printer_arg) { const char *str; switch (sym->type) { case S_OTHER: case S_UNKNOWN: break; case S_STRING: str = sym_get_string_value(sym); str = sym_escape_string_value(str); printer->print_symbol(fp, sym, str, printer_arg); free((void *)str); break; default: str = sym_get_string_value(sym); printer->print_symbol(fp, sym, str, printer_arg); } } static void conf_write_heading(FILE *fp, struct conf_printer *printer, void *printer_arg) { char buf[256]; snprintf(buf, sizeof(buf), "\n" "Automatically generated file; DO NOT EDIT.\n" "%s\n", rootmenu.prompt->text); printer->print_comment(fp, buf, printer_arg); } /* * Write out a minimal config. * All values that has default values are skipped as this is redundant. */ int conf_write_defconfig(const char *filename) { struct symbol *sym; struct menu *menu; FILE *out; out = fopen(filename, "w"); if (!out) return 1; sym_clear_all_valid(); /* Traverse all menus to find all relevant symbols */ menu = rootmenu.list; while (menu != NULL) { sym = menu->sym; if (sym == NULL) { if (!menu_is_visible(menu)) goto next_menu; } else if (!sym_is_choice(sym)) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE)) goto next_menu; sym->flags &= ~SYMBOL_WRITE; /* If we cannot change the symbol - skip */ if (!sym_is_changable(sym)) goto next_menu; /* If symbol equals to default value - skip */ if (strcmp(sym_get_string_value(sym), sym_get_string_default(sym)) == 0) goto next_menu; /* * If symbol is a choice value and equals to the * default for a choice - skip. * But only if value is bool and equal to "y" and * choice is not "optional". * (If choice is "optional" then all values can be "n") */ if (sym_is_choice_value(sym)) { struct symbol *cs; struct symbol *ds; cs = prop_get_symbol(sym_get_choice_prop(sym)); ds = sym_choice_default(cs); if (!sym_is_optional(cs) && sym == ds) { if ((sym->type == S_BOOLEAN) && sym_get_tristate_value(sym) == yes) goto next_menu; } } conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); } next_menu: if (menu->list != NULL) { menu = menu->list; } else if (menu->next != NULL) { menu = menu->next; } else { while ((menu = menu->parent)) { if (menu->next != NULL) { menu = menu->next; break; } } } } fclose(out); return 0; } int conf_write(const char *name) { FILE *out; struct symbol *sym; struct menu *menu; const char *basename; const char *str; char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1]; char *env; dirname[0] = 0; if (name && name[0]) { struct stat st; char *slash; if (!stat(name, &st) && S_ISDIR(st.st_mode)) { strcpy(dirname, name); strcat(dirname, "/"); basename = conf_get_configname(); } else if ((slash = strrchr(name, '/'))) { int size = slash - name + 1; memcpy(dirname, name, size); dirname[size] = 0; if (slash[1]) basename = slash + 1; else basename = conf_get_configname(); } else basename = name; } else basename = conf_get_configname(); sprintf(newname, "%s%s", dirname, basename); env = getenv("KCONFIG_OVERWRITECONFIG"); if (!env || !*env) { sprintf(tmpname, "%s.tmpconfig.%d", dirname, (int)getpid()); out = fopen(tmpname, "w"); } else { *tmpname = 0; out = fopen(newname, "w"); } if (!out) return 1; conf_write_heading(out, &kconfig_printer_cb, NULL); if (!conf_get_changed()) sym_clear_all_valid(); menu = rootmenu.list; while (menu) { sym = menu->sym; if (!sym) { if (!menu_is_visible(menu)) goto next; str = menu_get_prompt(menu); fprintf(out, "\n" "#\n" "# %s\n" "#\n", str); } else if (!(sym->flags & SYMBOL_CHOICE)) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE)) goto next; sym->flags &= ~SYMBOL_WRITE; conf_write_symbol(out, sym, &kconfig_printer_cb, NULL); } next: if (menu->list) { menu = menu->list; continue; } if (menu->next) menu = menu->next; else while ((menu = menu->parent)) { if (menu->next) { menu = menu->next; break; } } } fclose(out); if (*tmpname) { strcat(dirname, basename); strcat(dirname, ".old"); rename(newname, dirname); if (rename(tmpname, newname)) return 1; } conf_message(_("configuration written to %s"), newname); sym_set_change_count(0); return 0; } static int conf_split_config(void) { const char *name; char path[PATH_MAX+1]; char *s, *d, c; struct symbol *sym; struct stat sb; int res, i, fd; name = conf_get_autoconfig_name(); conf_read_simple(name, S_DEF_AUTO); if (chdir("include/config")) return 1; res = 0; for_all_symbols(i, sym) { sym_calc_value(sym); if ((sym->flags & SYMBOL_AUTO) || !sym->name) continue; if (sym->flags & SYMBOL_WRITE) { if (sym->flags & SYMBOL_DEF_AUTO) { /* * symbol has old and new value, * so compare them... */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym_get_tristate_value(sym) == sym->def[S_DEF_AUTO].tri) continue; break; case S_STRING: case S_HEX: case S_INT: if (!strcmp(sym_get_string_value(sym), sym->def[S_DEF_AUTO].val)) continue; break; default: break; } } else { /* * If there is no old value, only 'no' (unset) * is allowed as new value. */ switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: if (sym_get_tristate_value(sym) == no) continue; break; default: break; } } } else if (!(sym->flags & SYMBOL_DEF_AUTO)) /* There is neither an old nor a new value. */ continue; /* else * There is an old value, but no new value ('no' (unset) * isn't saved in auto.conf, so the old value is always * different from 'no'). */ /* Replace all '_' and append ".h" */ s = sym->name; d = path; while ((c = *s++)) { c = tolower(c); *d++ = (c == '_') ? '/' : c; } strcpy(d, ".h"); /* Assume directory path already exists. */ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); if (fd == -1) { if (errno != ENOENT) { res = 1; break; } /* * Create directory components, * unless they exist already. */ d = path; while ((d = strchr(d, '/'))) { *d = 0; if (stat(path, &sb) && mkdir(path, 0755)) { res = 1; goto out; } *d++ = '/'; } /* Try it again. */ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644); if (fd == -1) { res = 1; break; } } close(fd); } out: if (chdir("../..")) return 1; return res; } int conf_write_autoconf(void) { struct symbol *sym; const char *name; FILE *out, *tristate, *out_h; int i; sym_clear_all_valid(); file_write_dep("include/config/auto.conf.cmd"); if (conf_split_config()) return 1; out = fopen(".tmpconfig", "w"); if (!out) return 1; tristate = fopen(".tmpconfig_tristate", "w"); if (!tristate) { fclose(out); return 1; } out_h = fopen(".tmpconfig.h", "w"); if (!out_h) { fclose(out); fclose(tristate); return 1; } conf_write_heading(out, &kconfig_printer_cb, NULL); conf_write_heading(tristate, &tristate_printer_cb, NULL); conf_write_heading(out_h, &header_printer_cb, NULL); for_all_symbols(i, sym) { sym_calc_value(sym); if (!(sym->flags & SYMBOL_WRITE) || !sym->name) continue; /* write symbol to auto.conf, tristate and header files */ conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1); conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1); conf_write_symbol(out_h, sym, &header_printer_cb, NULL); } fclose(out); fclose(tristate); fclose(out_h); name = getenv("KCONFIG_AUTOHEADER"); if (!name) name = "include/generated/autoconf.h"; if (rename(".tmpconfig.h", name)) return 1; name = getenv("KCONFIG_TRISTATE"); if (!name) name = "include/config/tristate.conf"; if (rename(".tmpconfig_tristate", name)) return 1; name = conf_get_autoconfig_name(); /* * This must be the last step, kbuild has a dependency on auto.conf * and this marks the successful completion of the previous steps. */ if (rename(".tmpconfig", name)) return 1; return 0; } static int sym_change_count; static void (*conf_changed_callback)(void); void sym_set_change_count(int count) { int _sym_change_count = sym_change_count; sym_change_count = count; if (conf_changed_callback && (bool)_sym_change_count != (bool)count) conf_changed_callback(); } void sym_add_change_count(int count) { sym_set_change_count(count + sym_change_count); } bool conf_get_changed(void) { return sym_change_count; } void conf_set_changed_callback(void (*fn)(void)) { conf_changed_callback = fn; } static void randomize_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; struct expr *e; int cnt, def; /* * If choice is mod then we may have more items selected * and if no then no-one. * In both cases stop. */ if (csym->curr.tri != yes) return; prop = sym_get_choice_prop(csym); /* count entries in choice block */ cnt = 0; expr_list_for_each_sym(prop->expr, e, sym) cnt++; /* * find a random value and set it to yes, * set the rest to no so we have only one set */ def = (rand() % cnt); cnt = 0; expr_list_for_each_sym(prop->expr, e, sym) { if (def == cnt++) { sym->def[S_DEF_USER].tri = yes; csym->def[S_DEF_USER].val = sym; } else { sym->def[S_DEF_USER].tri = no; } } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ csym->flags &= ~(SYMBOL_VALID); } static void set_all_choice_values(struct symbol *csym) { struct property *prop; struct symbol *sym; struct expr *e; prop = sym_get_choice_prop(csym); /* * Set all non-assinged choice values to no */ expr_list_for_each_sym(prop->expr, e, sym) { if (!sym_has_value(sym)) sym->def[S_DEF_USER].tri = no; } csym->flags |= SYMBOL_DEF_USER; /* clear VALID to get value calculated */ csym->flags &= ~(SYMBOL_VALID); } void conf_set_all_new_symbols(enum conf_def_mode mode) { struct symbol *sym, *csym; int i, cnt, pby, pty, ptm; /* pby: probability of boolean = y * pty: probability of tristate = y * ptm: probability of tristate = m */ pby = 50; pty = ptm = 33; /* can't go as the default in switch-case * below, otherwise gcc whines about * -Wmaybe-uninitialized */ if (mode == def_random) { int n, p[3]; char *env = getenv("KCONFIG_PROBABILITY"); n = 0; while( env && *env ) { char *endp; int tmp = strtol( env, &endp, 10 ); if( tmp >= 0 && tmp <= 100 ) { p[n++] = tmp; } else { errno = ERANGE; perror( "KCONFIG_PROBABILITY" ); exit( 1 ); } env = (*endp == ':') ? endp+1 : endp; if( n >=3 ) { break; } } switch( n ) { case 1: pby = p[0]; ptm = pby/2; pty = pby-ptm; break; case 2: pty = p[0]; ptm = p[1]; pby = pty + ptm; break; case 3: pby = p[0]; pty = p[1]; ptm = p[2]; break; } if( pty+ptm > 100 ) { errno = ERANGE; perror( "KCONFIG_PROBABILITY" ); exit( 1 ); } } for_all_symbols(i, sym) { if (sym_has_value(sym) || (sym->flags & SYMBOL_VALID)) continue; switch (sym_get_type(sym)) { case S_BOOLEAN: case S_TRISTATE: switch (mode) { case def_yes: sym->def[S_DEF_USER].tri = yes; break; case def_mod: sym->def[S_DEF_USER].tri = mod; break; case def_no: sym->def[S_DEF_USER].tri = no; break; case def_random: sym->def[S_DEF_USER].tri = no; cnt = rand() % 100; if (sym->type == S_TRISTATE) { if (cnt < pty) sym->def[S_DEF_USER].tri = yes; else if (cnt < (pty+ptm)) sym->def[S_DEF_USER].tri = mod; } else if (cnt < pby) sym->def[S_DEF_USER].tri = yes; break; default: continue; } if (!(sym_is_choice(sym) && mode == def_random)) sym->flags |= SYMBOL_DEF_USER; break; default: break; } } sym_clear_all_valid(); /* * We have different type of choice blocks. * If curr.tri equals to mod then we can select several * choice symbols in one block. * In this case we do nothing. * If curr.tri equals yes then only one symbol can be * selected in a choice block and we set it to yes, * and the rest to no. */ for_all_symbols(i, csym) { if (sym_has_value(csym) || !sym_is_choice(csym)) continue; sym_calc_value(csym); if (mode == def_random) randomize_choice_values(csym); else set_all_choice_values(csym); } }
gpl-2.0
Bdaman80/BDA-ACTV
arch/powerpc/kvm/emulate.c
497
10622
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Hollis Blanchard <hollisb@us.ibm.com> */ #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kvm_host.h> #include <asm/reg.h> #include <asm/time.h> #include <asm/byteorder.h> #include <asm/kvm_ppc.h> #include <asm/disassemble.h> #include "timing.h" #include "trace.h" #define OP_TRAP 3 #define OP_31_XOP_LWZX 23 #define OP_31_XOP_LBZX 87 #define OP_31_XOP_STWX 151 #define OP_31_XOP_STBX 215 #define OP_31_XOP_STBUX 247 #define OP_31_XOP_LHZX 279 #define OP_31_XOP_LHZUX 311 #define OP_31_XOP_MFSPR 339 #define OP_31_XOP_STHX 407 #define OP_31_XOP_STHUX 439 #define OP_31_XOP_MTSPR 467 #define OP_31_XOP_DCBI 470 #define OP_31_XOP_LWBRX 534 #define OP_31_XOP_TLBSYNC 566 #define OP_31_XOP_STWBRX 662 #define OP_31_XOP_LHBRX 790 #define OP_31_XOP_STHBRX 918 #define OP_LWZ 32 #define OP_LWZU 33 #define OP_LBZ 34 #define OP_LBZU 35 #define OP_STW 36 #define OP_STWU 37 #define OP_STB 38 #define OP_STBU 39 #define OP_LHZ 40 #define OP_LHZU 41 #define OP_STH 44 #define OP_STHU 45 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) { if (vcpu->arch.tcr & TCR_DIE) { /* The decrementer ticks at the same rate as the timebase, so * that's how we convert the guest DEC value to the number of * host ticks. */ unsigned long nr_jiffies; nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; mod_timer(&vcpu->arch.dec_timer, get_jiffies_64() + nr_jiffies); } else { del_timer(&vcpu->arch.dec_timer); } } /* XXX to do: * lhax * lhaux * lswx * lswi * stswx * stswi * lha * lhau * lmw * stmw * * XXX is_bigendian should depend on MMU mapping or MSR[LE] */ /* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = vcpu->arch.last_inst; u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); switch (get_op(inst)) { case OP_TRAP: vcpu->arch.esr |= ESR_PTR; kvmppc_core_queue_program(vcpu); advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[rs] = ea; break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; case SPRN_SRR1: vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; case SPRN_PVR: vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break; case SPRN_PIR: vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: vcpu->arch.gpr[rt] = mftbl(); break; case SPRN_TBWU: vcpu->arch.gpr[rt] = mftbu(); break; case SPRN_SPRG0: vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; case SPRN_SPRG1: vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; case SPRN_SPRG2: vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; case SPRN_SPRG3: vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); vcpu->arch.gpr[rt] = 0; } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; case SPRN_SRR1: vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_DEC: vcpu->arch.dec = vcpu->arch.gpr[rs]; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG1: vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG2: vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG3: vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); if (advance) vcpu->arch.pc += 4; /* Advance past emulated instruction. */ return emulated; }
gpl-2.0
RonGokhale/lge-kernel-pecan
arch/powerpc/mm/tlb_hash64.c
497
6583
/* * This file contains the routines for flushing entries from the * TLB and MMU hash table. * * Derived from arch/ppc64/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/bug.h> DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); /* * A linux PTE was changed and the corresponding hash table entry * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. * * Must be called from within some kind of spinlock/non-preempt region... */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; int ssize; real_pte_t rpte; int i; i = batch->index; /* We mask the address for the base page size. Huge pages will * have applied their own masking already */ addr &= PAGE_MASK; /* Get page size (maybe move back to caller). * * NOTE: when using special 64K mappings in 4K environment like * for SPEs, we obtain the page size from the slice, which thus * must still exist (and thus the VMA not reused) at the time * of this call */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif } else psize = pte_pagesize_index(mm, addr, pte); /* Build full vaddr */ if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_vsid(mm->context.id, addr, ssize); WARN_ON(vsid == 0); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } vaddr = hpt_va(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); /* * Check if we have an active batch on this CPU. If not, just * flush now and return. For now, we don global invalidates * in that case, might be worth testing the mm cpu mask though * and decide to use local invalidates instead... */ if (!batch->active) { flush_hash_page(vaddr, rpte, psize, ssize, 0); return; } /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. * * We also need to ensure only one page size is present in a given * batch */ if (i != 0 && (mm != batch->mm || batch->psize != psize || batch->ssize != ssize)) { __flush_tlb_pending(batch); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; batch->ssize = ssize; } batch->pte[i] = rpte; batch->vaddr[i] = vaddr; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); } /* * This function is called when terminating an mmu batch or when a batch * is full. It will perform the flush of all the entries currently stored * in a batch. * * Must be called from within some kind of spinlock/non-preempt region... */ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) { const struct cpumask *tmp; int i, local = 0; i = batch->index; tmp = cpumask_of(smp_processor_id()); if (cpumask_equal(mm_cpumask(batch->mm), tmp)) local = 1; if (i == 1) flush_hash_page(batch->vaddr[0], batch->pte[0], batch->psize, batch->ssize, local); else flush_hash_range(i, local); batch->index = 0; } void tlb_flush(struct mmu_gather *tlb) { struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); /* If there's a TLB batch pending, then we must flush it because the * pages are going to be freed and we really don't want to have a CPU * access a freed page because it has a stale TLB */ if (tlbbatch->index) __flush_tlb_pending(tlbbatch); /* Push out batch of freed page tables */ pte_free_finish(); } /** * __flush_hash_table_range - Flush all HPTEs for a given address range * from the hash table (and the TLB). But keeps * the linux PTEs intact. * * @mm : mm_struct of the target address space (generally init_mm) * @start : starting address * @end : ending address (not included in the flush) * * This function is mostly to be used by some IO hotplug code in order * to remove all hash entries from a given address range used to map IO * space on a removed PCI-PCI bidge without tearing down the full mapping * since 64K pages may overlap with other bridges when using 64K pages * with 4K HW pages on IO space. * * Because of that usage pattern, it's only available with CONFIG_HOTPLUG * and is implemented for small size rather than speed. */ #ifdef CONFIG_HOTPLUG void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, unsigned long end) { unsigned long flags; start = _ALIGN_DOWN(start, PAGE_SIZE); end = _ALIGN_UP(end, PAGE_SIZE); BUG_ON(!mm->pgd); /* Note: Normally, we should only ever use a batch within a * PTE locked section. This violates the rule, but will work * since we don't actually modify the PTEs, we just flush the * hash while leaving the PTEs intact (including their reference * to being hashed). This is not the most performance oriented * way to do things but is fine for our needs here. */ local_irq_save(flags); arch_enter_lazy_mmu_mode(); for (; start < end; start += PAGE_SIZE) { pte_t *ptep = find_linux_pte(mm->pgd, start); unsigned long pte; if (ptep == NULL) continue; pte = pte_val(*ptep); if (!(pte & _PAGE_HASHPTE)) continue; hpte_need_flush(mm, start, ptep, pte, 0); } arch_leave_lazy_mmu_mode(); local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG */
gpl-2.0
OldDroid/android_kernel_samsung_tblte
arch/x86/kernel/aperture_64.c
2289
14238
/* * Firmware replacement code. * * Work around broken BIOSes that don't set an aperture, only set the * aperture in the AGP bridge, or set too small aperture. * * If all fails map the aperture over some low memory. This is cheaper than * doing bounce buffering. The memory is lost. This is done at early boot * because only the bootmem allocator can allocate 32+MB. * * Copyright 2002 Andi Kleen, SuSE Labs. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/pci_ids.h> #include <linux/pci.h> #include <linux/bitops.h> #include <linux/ioport.h> #include <linux/suspend.h> #include <asm/e820.h> #include <asm/io.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/pci-direct.h> #include <asm/dma.h> #include <asm/amd_nb.h> #include <asm/x86_init.h> /* * Using 512M as goal, in case kexec will load kernel_big * that will do the on-position decompress, and could overlap with * with the gart aperture that is used. * Sequence: * kernel_small * ==> kexec (with kdump trigger path or gart still enabled) * ==> kernel_small (gart area become e820_reserved) * ==> kexec (with kdump trigger path or gart still enabled) * ==> kerne_big (uncompressed size will be big than 64M or 128M) * So don't use 512M below as gart iommu, leave the space for kernel * code for safe. */ #define GART_MIN_ADDR (512ULL << 20) #define GART_MAX_ADDR (1ULL << 32) int gart_iommu_aperture; int gart_iommu_aperture_disabled __initdata; int gart_iommu_aperture_allowed __initdata; int fallback_aper_order __initdata = 1; /* 64MB */ int fallback_aper_force __initdata; int fix_aperture __initdata = 1; static struct resource gart_resource = { .name = "GART", .flags = IORESOURCE_MEM, }; static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) { gart_resource.start = aper_base; gart_resource.end = aper_base + aper_size - 1; insert_resource(&iomem_resource, &gart_resource); } /* This code runs before the PCI subsystem is initialized, so just access the northbridge directly. */ static u32 __init allocate_aperture(void) { u32 aper_size; unsigned long addr; /* aper_size should <= 1G */ if (fallback_aper_order > 5) fallback_aper_order = 5; aper_size = (32 * 1024 * 1024) << fallback_aper_order; /* * Aperture has to be naturally aligned. This means a 2GB aperture * won't have much chance of finding a place in the lower 4GB of * memory. Unfortunately we cannot move it up because that would * make the IOMMU useless. */ addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, aper_size, aper_size); if (!addr) { printk(KERN_ERR "Cannot allocate aperture memory hole (%lx,%uK)\n", addr, aper_size>>10); return 0; } memblock_reserve(addr, aper_size); printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", aper_size >> 10, addr); insert_aperture_resource((u32)addr, aper_size); register_nosave_region(addr >> PAGE_SHIFT, (addr+aper_size) >> PAGE_SHIFT); return (u32)addr; } /* Find a PCI capability */ static u32 __init find_cap(int bus, int slot, int func, int cap) { int bytes; u8 pos; if (!(read_pci_config_16(bus, slot, func, PCI_STATUS) & PCI_STATUS_CAP_LIST)) return 0; pos = read_pci_config_byte(bus, slot, func, PCI_CAPABILITY_LIST); for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { u8 id; pos &= ~3; id = read_pci_config_byte(bus, slot, func, pos+PCI_CAP_LIST_ID); if (id == 0xff) break; if (id == cap) return pos; pos = read_pci_config_byte(bus, slot, func, pos+PCI_CAP_LIST_NEXT); } return 0; } /* Read a standard AGPv3 bridge header */ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) { u32 apsize; u32 apsizereg; int nbits; u32 aper_low, aper_hi; u64 aper; u32 old_order; printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", bus, slot, func); apsizereg = read_pci_config_16(bus, slot, func, cap + 0x14); if (apsizereg == 0xffffffff) { printk(KERN_ERR "APSIZE in AGP bridge unreadable\n"); return 0; } /* old_order could be the value from NB gart setting */ old_order = *order; apsize = apsizereg & 0xfff; /* Some BIOS use weird encodings not in the AGPv3 table. */ if (apsize & 0xff) apsize |= 0xf00; nbits = hweight16(apsize); *order = 7 - nbits; if ((int)*order < 0) /* < 32MB */ *order = 0; aper_low = read_pci_config(bus, slot, func, 0x10); aper_hi = read_pci_config(bus, slot, func, 0x14); aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); /* * On some sick chips, APSIZE is 0. It means it wants 4G * so let double check that order, and lets trust AMD NB settings: */ printk(KERN_INFO "Aperture from AGP @ %Lx old size %u MB\n", aper, 32 << old_order); if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) { printk(KERN_INFO "Aperture size %u MB (APSIZE %x) is not right, using settings from NB\n", 32 << *order, apsizereg); *order = old_order; } printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", aper, 32 << *order, apsizereg); if (!aperture_valid(aper, (32*1024*1024) << *order, 32<<20)) return 0; return (u32)aper; } /* * Look for an AGP bridge. Windows only expects the aperture in the * AGP bridge and some BIOS forget to initialize the Northbridge too. * Work around this here. * * Do an PCI bus scan by hand because we're running before the PCI * subsystem. * * All AMD AGP bridges are AGPv3 compliant, so we can do this scan * generically. It's probably overkill to always scan all slots because * the AGP bridges should be always an own bus on the HT hierarchy, * but do it here for future safety. */ static u32 __init search_agp_bridge(u32 *order, int *valid_agp) { int bus, slot, func; /* Poor man's PCI discovery */ for (bus = 0; bus < 256; bus++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { u32 class, cap; u8 type; class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) break; switch (class >> 16) { case PCI_CLASS_BRIDGE_HOST: case PCI_CLASS_BRIDGE_OTHER: /* needed? */ /* AGP bridge? */ cap = find_cap(bus, slot, func, PCI_CAP_ID_AGP); if (!cap) break; *valid_agp = 1; return read_agp(bus, slot, func, cap, order); } /* No multi-function device? */ type = read_pci_config_byte(bus, slot, func, PCI_HEADER_TYPE); if (!(type & 0x80)) break; } } } printk(KERN_INFO "No AGP bridge found\n"); return 0; } static int gart_fix_e820 __initdata = 1; static int __init parse_gart_mem(char *p) { if (!p) return -EINVAL; if (!strncmp(p, "off", 3)) gart_fix_e820 = 0; else if (!strncmp(p, "on", 2)) gart_fix_e820 = 1; return 0; } early_param("gart_fix_e820", parse_gart_mem); void __init early_gart_iommu_check(void) { /* * in case it is enabled before, esp for kexec/kdump, * previous kernel already enable that. memset called * by allocate_aperture/__alloc_bootmem_nopanic cause restart. * or second kernel have different position for GART hole. and new * kernel could use hole as RAM that is still used by GART set by * first kernel * or BIOS forget to put that in reserved. * try to update e820 to make that region as reserved. */ u32 agp_aper_order = 0; int i, fix, slot, valid_agp = 0; u32 ctl; u32 aper_size = 0, aper_order = 0, last_aper_order = 0; u64 aper_base = 0, last_aper_base = 0; int aper_enabled = 0, last_aper_enabled = 0, last_valid = 0; if (!early_pci_allowed()) return; /* This is mostly duplicate of iommu_hole_init */ search_agp_bridge(&agp_aper_order, &valid_agp); fix = 0; for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) { int bus; int dev_base, dev_limit; bus = amd_nb_bus_dev_ranges[i].bus; dev_base = amd_nb_bus_dev_ranges[i].dev_base; dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) continue; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); aper_enabled = ctl & GARTEN; aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; if (last_valid) { if ((aper_order != last_aper_order) || (aper_base != last_aper_base) || (aper_enabled != last_aper_enabled)) { fix = 1; break; } } last_aper_order = aper_order; last_aper_base = aper_base; last_aper_enabled = aper_enabled; last_valid = 1; } } if (!fix && !aper_enabled) return; if (!aper_base || !aper_size || aper_base + aper_size > 0x100000000UL) fix = 1; if (gart_fix_e820 && !fix && aper_enabled) { if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { /* reserve it, so we can reuse it in second kernel */ printk(KERN_INFO "update e820 for GART\n"); e820_add_region(aper_base, aper_size, E820_RESERVED); update_e820(); } } if (valid_agp) return; /* disable them all at first */ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { int bus; int dev_base, dev_limit; bus = amd_nb_bus_dev_ranges[i].bus; dev_base = amd_nb_bus_dev_ranges[i].dev_base; dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) continue; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ctl &= ~GARTEN; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); } } } static int __initdata printed_gart_size_msg; int __init gart_iommu_hole_init(void) { u32 agp_aper_base = 0, agp_aper_order = 0; u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; u64 aper_base, last_aper_base = 0; int fix, slot, valid_agp = 0; int i, node; if (gart_iommu_aperture_disabled || !fix_aperture || !early_pci_allowed()) return -ENODEV; printk(KERN_INFO "Checking aperture...\n"); if (!fallback_aper_force) agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp); fix = 0; node = 0; for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { int bus; int dev_base, dev_limit; u32 ctl; bus = amd_nb_bus_dev_ranges[i].bus; dev_base = amd_nb_bus_dev_ranges[i].dev_base; dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) continue; iommu_detected = 1; gart_iommu_aperture = 1; x86_init.iommu.iommu_init = gart_iommu_init; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); /* * Before we do anything else disable the GART. It may * still be enabled if we boot into a crash-kernel here. * Reconfiguring the GART while it is enabled could have * unknown side-effects. */ ctl &= ~GARTEN; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n", node, aper_base, aper_size >> 20); node++; if (!aperture_valid(aper_base, aper_size, 64<<20)) { if (valid_agp && agp_aper_base && agp_aper_base == aper_base && agp_aper_order == aper_order) { /* the same between two setting from NB and agp */ if (!no_iommu && max_pfn > MAX_DMA32_PFN && !printed_gart_size_msg) { printk(KERN_ERR "you are using iommu with agp, but GART size is less than 64M\n"); printk(KERN_ERR "please increase GART size in your BIOS setup\n"); printk(KERN_ERR "if BIOS doesn't have that option, contact your HW vendor!\n"); printed_gart_size_msg = 1; } } else { fix = 1; goto out; } } if ((last_aper_order && aper_order != last_aper_order) || (last_aper_base && aper_base != last_aper_base)) { fix = 1; goto out; } last_aper_order = aper_order; last_aper_base = aper_base; } } out: if (!fix && !fallback_aper_force) { if (last_aper_base) { unsigned long n = (32 * 1024 * 1024) << last_aper_order; insert_aperture_resource((u32)last_aper_base, n); return 1; } return 0; } if (!fallback_aper_force) { aper_alloc = agp_aper_base; aper_order = agp_aper_order; } if (aper_alloc) { /* Got the aperture from the AGP bridge */ } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || force_iommu || valid_agp || fallback_aper_force) { printk(KERN_INFO "Your BIOS doesn't leave a aperture memory hole\n"); printk(KERN_INFO "Please enable the IOMMU option in the BIOS setup\n"); printk(KERN_INFO "This costs you %d MB of RAM\n", 32 << fallback_aper_order); aper_order = fallback_aper_order; aper_alloc = allocate_aperture(); if (!aper_alloc) { /* * Could disable AGP and IOMMU here, but it's * probably not worth it. But the later users * cannot deal with bad apertures and turning * on the aperture over memory causes very * strange problems, so it's better to panic * early. */ panic("Not enough memory for aperture"); } } else { return 0; } /* Fix up the north bridges */ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { int bus, dev_base, dev_limit; /* * Don't enable translation yet but enable GART IO and CPU * accesses and set DISTLBWALKPRB since GART table memory is UC. */ u32 ctl = aper_order << 1; bus = amd_nb_bus_dev_ranges[i].bus; dev_base = amd_nb_bus_dev_ranges[i].dev_base; dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) continue; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25); } } set_up_gart_resume(aper_order, aper_alloc); return 1; }
gpl-2.0
locusf/linux
arch/mips/pci/pci-sb1250.c
2545
7687
/* * Copyright (C) 2001,2002,2003 Broadcom Corporation * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * BCM1250-specific PCI support * * This module provides the glue between Linux's PCI subsystem * and the hardware. We basically provide glue for accessing * configuration space, and set up the translation for I/O * space accesses. * * To access configuration space, we use ioremap. In the 32-bit * kernel, this consumes either 4 or 8 page table pages, and 16MB of * kernel mapped memory. Hopefully neither of these should be a huge * problem. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/vt.h> #include <asm/io.h> #include <asm/sibyte/sb1250_defs.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_scd.h> #include <asm/sibyte/board.h> /* * Macros for calculating offsets into config space given a device * structure or dev/fun/reg */ #define CFGOFFSET(bus, devfn, where) (((bus)<<16) + ((devfn)<<8) + (where)) #define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where) static void *cfg_space; #define PCI_BUS_ENABLED 1 #define LDT_BUS_ENABLED 2 #define PCI_DEVICE_MODE 4 static int sb1250_bus_status; #define PCI_BRIDGE_DEVICE 0 #define LDT_BRIDGE_DEVICE 1 #ifdef CONFIG_SIBYTE_HAS_LDT /* * HT's level-sensitive interrupts require EOI, which is generated * through a 4MB memory-mapped region */ unsigned long ldt_eoi_space; #endif /* * Read/write 32-bit values in config space. */ static inline u32 READCFG32(u32 addr) { return *(u32 *) (cfg_space + (addr & ~3)); } static inline void WRITECFG32(u32 addr, u32 data) { *(u32 *) (cfg_space + (addr & ~3)) = data; } int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return dev->irq; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; } /* * Some checks before doing config cycles: * In PCI Device Mode, hide everything on bus 0 except the LDT host * bridge. Otherwise, access is controlled by bridge MasterEn bits. */ static int sb1250_pci_can_access(struct pci_bus *bus, int devfn) { u32 devno; if (!(sb1250_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE))) return 0; if (bus->number == 0) { devno = PCI_SLOT(devfn); if (devno == LDT_BRIDGE_DEVICE) return (sb1250_bus_status & LDT_BUS_ENABLED) != 0; else if (sb1250_bus_status & PCI_DEVICE_MODE) return 0; else return 1; } else return 1; } /* * Read/write access functions for various sizes of values * in config space. Return all 1's for disallowed accesses * for a kludgy but adequate simulation of master aborts. */ static int sb1250_pcibios_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) { u32 data = 0; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; if (sb1250_pci_can_access(bus, devfn)) data = READCFG32(CFGADDR(bus, devfn, where)); else data = 0xFFFFFFFF; if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 3) << 3)) & 0xffff; else *val = data; return PCIBIOS_SUCCESSFUL; } static int sb1250_pcibios_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 cfgaddr = CFGADDR(bus, devfn, where); u32 data = 0; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; if (!sb1250_pci_can_access(bus, devfn)) return PCIBIOS_BAD_REGISTER_NUMBER; data = READCFG32(cfgaddr); if (size == 1) data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else if (size == 2) data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else data = val; WRITECFG32(cfgaddr, data); return PCIBIOS_SUCCESSFUL; } struct pci_ops sb1250_pci_ops = { .read = sb1250_pcibios_read, .write = sb1250_pcibios_write, }; static struct resource sb1250_mem_resource = { .name = "SB1250 PCI MEM", .start = 0x40000000UL, .end = 0x5fffffffUL, .flags = IORESOURCE_MEM, }; static struct resource sb1250_io_resource = { .name = "SB1250 PCI I/O", .start = 0x00000000UL, .end = 0x01ffffffUL, .flags = IORESOURCE_IO, }; struct pci_controller sb1250_controller = { .pci_ops = &sb1250_pci_ops, .mem_resource = &sb1250_mem_resource, .io_resource = &sb1250_io_resource, }; static int __init sb1250_pcibios_init(void) { void __iomem *io_map_base; uint32_t cmdreg; uint64_t reg; /* CFE will assign PCI resources */ pci_set_flags(PCI_PROBE_ONLY); /* Avoid ISA compat ranges. */ PCIBIOS_MIN_IO = 0x00008000UL; PCIBIOS_MIN_MEM = 0x01000000UL; /* Set I/O resource limits. */ ioport_resource.end = 0x01ffffffUL; /* 32MB accessible by sb1250 */ iomem_resource.end = 0xffffffffUL; /* no HT support yet */ cfg_space = ioremap(A_PHYS_LDTPCI_CFG_MATCH_BITS, 16 * 1024 * 1024); /* * See if the PCI bus has been configured by the firmware. */ reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG)); if (!(reg & M_SYS_PCI_HOST)) { sb1250_bus_status |= PCI_DEVICE_MODE; } else { cmdreg = READCFG32(CFGOFFSET (0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), PCI_COMMAND)); if (!(cmdreg & PCI_COMMAND_MASTER)) { printk ("PCI: Skipping PCI probe. Bus is not initialized.\n"); iounmap(cfg_space); return 0; } sb1250_bus_status |= PCI_BUS_ENABLED; } /* * Establish mappings in KSEG2 (kernel virtual) to PCI I/O * space. Use "match bytes" policy to make everything look * little-endian. So, you need to also set * CONFIG_SWAP_IO_SPACE, but this is the combination that * works correctly with most of Linux's drivers. * XXX ehs: Should this happen in PCI Device mode? */ io_map_base = ioremap(A_PHYS_LDTPCI_IO_MATCH_BYTES, 1024 * 1024); sb1250_controller.io_map_base = (unsigned long)io_map_base; set_io_port_base((unsigned long)io_map_base); #ifdef CONFIG_SIBYTE_HAS_LDT /* * Also check the LDT bridge's enable, just in case we didn't * initialize that one. */ cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(LDT_BRIDGE_DEVICE, 0), PCI_COMMAND)); if (cmdreg & PCI_COMMAND_MASTER) { sb1250_bus_status |= LDT_BUS_ENABLED; /* * Need bits 23:16 to convey vector number. Note that * this consumes 4MB of kernel-mapped memory * (Kseg2/Kseg3) for 32-bit kernel. */ ldt_eoi_space = (unsigned long) ioremap(A_PHYS_LDT_SPECIAL_MATCH_BYTES, 4 * 1024 * 1024); } #endif register_pci_controller(&sb1250_controller); #ifdef CONFIG_VGA_CONSOLE console_lock(); do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES - 1, 1); console_unlock(); #endif return 0; } arch_initcall(sb1250_pcibios_init);
gpl-2.0
Nick73/King_Kernel
mm/fadvise.c
3569
3624
/* * mm/fadvise.c * * Copyright (C) 2002, Linus Torvalds * * 11Jan2003 Andrew Morton * Initial version. */ #include <linux/kernel.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/fadvise.h> #include <linux/writeback.h> #include <linux/syscalls.h> #include <asm/unistd.h> /* * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could * deactivate the pages and clear PG_Referenced. */ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice) { struct file *file = fget(fd); struct address_space *mapping; struct backing_dev_info *bdi; loff_t endbyte; /* inclusive */ pgoff_t start_index; pgoff_t end_index; unsigned long nrpages; int ret = 0; if (!file) return -EBADF; if (S_ISFIFO(file->f_path.dentry->d_inode->i_mode)) { ret = -ESPIPE; goto out; } mapping = file->f_mapping; if (!mapping || len < 0) { ret = -EINVAL; goto out; } if (mapping->a_ops->get_xip_mem) { switch (advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_RANDOM: case POSIX_FADV_SEQUENTIAL: case POSIX_FADV_WILLNEED: case POSIX_FADV_NOREUSE: case POSIX_FADV_DONTNEED: /* no bad return value, but ignore advice */ break; default: ret = -EINVAL; } goto out; } /* Careful about overflows. Len == 0 means "as much as possible" */ endbyte = offset + len; if (!len || endbyte < len) endbyte = -1; else endbyte--; /* inclusive */ bdi = mapping->backing_dev_info; switch (advice) { case POSIX_FADV_NORMAL: file->f_ra.ra_pages = bdi->ra_pages; spin_lock(&file->f_lock); file->f_mode &= ~FMODE_RANDOM; spin_unlock(&file->f_lock); break; case POSIX_FADV_RANDOM: spin_lock(&file->f_lock); file->f_mode |= FMODE_RANDOM; spin_unlock(&file->f_lock); break; case POSIX_FADV_SEQUENTIAL: file->f_ra.ra_pages = bdi->ra_pages * 2; spin_lock(&file->f_lock); file->f_mode &= ~FMODE_RANDOM; spin_unlock(&file->f_lock); break; case POSIX_FADV_WILLNEED: if (!mapping->a_ops->readpage) { ret = -EINVAL; break; } /* First and last PARTIAL page! */ start_index = offset >> PAGE_CACHE_SHIFT; end_index = endbyte >> PAGE_CACHE_SHIFT; /* Careful about overflow on the "+1" */ nrpages = end_index - start_index + 1; if (!nrpages) nrpages = ~0UL; ret = force_page_cache_readahead(mapping, file, start_index, nrpages); if (ret > 0) ret = 0; break; case POSIX_FADV_NOREUSE: break; case POSIX_FADV_DONTNEED: if (!bdi_write_congested(mapping->backing_dev_info)) filemap_flush(mapping); /* First and last FULL page! */ start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; end_index = (endbyte >> PAGE_CACHE_SHIFT); if (end_index >= start_index) invalidate_mapping_pages(mapping, start_index, end_index); break; default: ret = -EINVAL; } out: fput(file); return ret; } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_fadvise64_64(long fd, loff_t offset, loff_t len, long advice) { return SYSC_fadvise64_64((int) fd, offset, len, (int) advice); } SYSCALL_ALIAS(sys_fadvise64_64, SyS_fadvise64_64); #endif #ifdef __ARCH_WANT_SYS_FADVISE64 SYSCALL_DEFINE(fadvise64)(int fd, loff_t offset, size_t len, int advice) { return sys_fadvise64_64(fd, offset, len, advice); } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_fadvise64(long fd, loff_t offset, long len, long advice) { return SYSC_fadvise64((int) fd, offset, (size_t)len, (int)advice); } SYSCALL_ALIAS(sys_fadvise64, SyS_fadvise64); #endif #endif
gpl-2.0
AdrianoMartins/android_kernel_semc_msm7x30
mm/fadvise.c
3569
3624
/* * mm/fadvise.c * * Copyright (C) 2002, Linus Torvalds * * 11Jan2003 Andrew Morton * Initial version. */ #include <linux/kernel.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/fadvise.h> #include <linux/writeback.h> #include <linux/syscalls.h> #include <asm/unistd.h> /* * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could * deactivate the pages and clear PG_Referenced. */ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice) { struct file *file = fget(fd); struct address_space *mapping; struct backing_dev_info *bdi; loff_t endbyte; /* inclusive */ pgoff_t start_index; pgoff_t end_index; unsigned long nrpages; int ret = 0; if (!file) return -EBADF; if (S_ISFIFO(file->f_path.dentry->d_inode->i_mode)) { ret = -ESPIPE; goto out; } mapping = file->f_mapping; if (!mapping || len < 0) { ret = -EINVAL; goto out; } if (mapping->a_ops->get_xip_mem) { switch (advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_RANDOM: case POSIX_FADV_SEQUENTIAL: case POSIX_FADV_WILLNEED: case POSIX_FADV_NOREUSE: case POSIX_FADV_DONTNEED: /* no bad return value, but ignore advice */ break; default: ret = -EINVAL; } goto out; } /* Careful about overflows. Len == 0 means "as much as possible" */ endbyte = offset + len; if (!len || endbyte < len) endbyte = -1; else endbyte--; /* inclusive */ bdi = mapping->backing_dev_info; switch (advice) { case POSIX_FADV_NORMAL: file->f_ra.ra_pages = bdi->ra_pages; spin_lock(&file->f_lock); file->f_mode &= ~FMODE_RANDOM; spin_unlock(&file->f_lock); break; case POSIX_FADV_RANDOM: spin_lock(&file->f_lock); file->f_mode |= FMODE_RANDOM; spin_unlock(&file->f_lock); break; case POSIX_FADV_SEQUENTIAL: file->f_ra.ra_pages = bdi->ra_pages * 2; spin_lock(&file->f_lock); file->f_mode &= ~FMODE_RANDOM; spin_unlock(&file->f_lock); break; case POSIX_FADV_WILLNEED: if (!mapping->a_ops->readpage) { ret = -EINVAL; break; } /* First and last PARTIAL page! */ start_index = offset >> PAGE_CACHE_SHIFT; end_index = endbyte >> PAGE_CACHE_SHIFT; /* Careful about overflow on the "+1" */ nrpages = end_index - start_index + 1; if (!nrpages) nrpages = ~0UL; ret = force_page_cache_readahead(mapping, file, start_index, nrpages); if (ret > 0) ret = 0; break; case POSIX_FADV_NOREUSE: break; case POSIX_FADV_DONTNEED: if (!bdi_write_congested(mapping->backing_dev_info)) filemap_flush(mapping); /* First and last FULL page! */ start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; end_index = (endbyte >> PAGE_CACHE_SHIFT); if (end_index >= start_index) invalidate_mapping_pages(mapping, start_index, end_index); break; default: ret = -EINVAL; } out: fput(file); return ret; } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_fadvise64_64(long fd, loff_t offset, loff_t len, long advice) { return SYSC_fadvise64_64((int) fd, offset, len, (int) advice); } SYSCALL_ALIAS(sys_fadvise64_64, SyS_fadvise64_64); #endif #ifdef __ARCH_WANT_SYS_FADVISE64 SYSCALL_DEFINE(fadvise64)(int fd, loff_t offset, size_t len, int advice) { return sys_fadvise64_64(fd, offset, len, advice); } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_fadvise64(long fd, loff_t offset, long len, long advice) { return SYSC_fadvise64((int) fd, offset, (size_t)len, (int)advice); } SYSCALL_ALIAS(sys_fadvise64, SyS_fadvise64); #endif #endif
gpl-2.0
DroidThug/Trident
arch/m68k/hp300/time.c
4337
1954
/* * linux/arch/m68k/hp300/time.c * * Copyright (C) 1998 Philip Blundell <philb@gnu.org> * * This file contains the HP300-specific time handling code. */ #include <asm/ptrace.h> #include <linux/types.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/traps.h> #include <asm/blinken.h> /* Clock hardware definitions */ #define CLOCKBASE 0xf05f8000 #define CLKCR1 0x1 #define CLKCR2 0x3 #define CLKCR3 CLKCR1 #define CLKSR CLKCR2 #define CLKMSB1 0x5 #define CLKMSB2 0x9 #define CLKMSB3 0xD /* This is for machines which generate the exact clock. */ #define USECS_PER_JIFFY (1000000/HZ) #define INTVAL ((10000 / 4) - 1) static irqreturn_t hp300_tick(int irq, void *dev_id) { unsigned long tmp; irq_handler_t vector = dev_id; in_8(CLOCKBASE + CLKSR); asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE)); /* Turn off the network and SCSI leds */ blinken_leds(0, 0xe0); return vector(irq, NULL); } u32 hp300_gettimeoffset(void) { /* Read current timer 1 value */ unsigned char lsb, msb1, msb2; unsigned short ticks; msb1 = in_8(CLOCKBASE + 5); lsb = in_8(CLOCKBASE + 7); msb2 = in_8(CLOCKBASE + 5); if (msb1 != msb2) /* A carry happened while we were reading. Read it again */ lsb = in_8(CLOCKBASE + 7); ticks = INTVAL - ((msb2 << 8) | lsb); return ((USECS_PER_JIFFY * ticks) / INTVAL) * 1000; } void __init hp300_sched_init(irq_handler_t vector) { out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */ out_8(CLOCKBASE + CLKCR1, 0x1); /* reset */ asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE)); if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector)) pr_err("Couldn't register timer interrupt\n"); out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */ out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */ }
gpl-2.0
JB1tz/kernel-msm
lib/plist.c
4593
4839
/* * lib/plist.c * * Descending-priority-sorted double-linked list * * (C) 2002-2003 Intel Corp * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. * * 2001-2005 (c) MontaVista Software, Inc. * Daniel Walker <dwalker@mvista.com> * * (C) 2005 Thomas Gleixner <tglx@linutronix.de> * * Simplifications of the original code by * Oleg Nesterov <oleg@tv-sign.ru> * * Licensed under the FSF's GNU Public License v2 or later. * * Based on simple lists (include/linux/list.h). * * This file contains the add / del functions which are considered to * be too large to inline. See include/linux/plist.h for further * information. */ #include <linux/bug.h> #include <linux/plist.h> #include <linux/spinlock.h> #ifdef CONFIG_DEBUG_PI_LIST static struct plist_head test_head; static void plist_check_prev_next(struct list_head *t, struct list_head *p, struct list_head *n) { WARN(n->prev != p || p->next != n, "top: %p, n: %p, p: %p\n" "prev: %p, n: %p, p: %p\n" "next: %p, n: %p, p: %p\n", t, t->next, t->prev, p, p->next, p->prev, n, n->next, n->prev); } static void plist_check_list(struct list_head *top) { struct list_head *prev = top, *next = top->next; plist_check_prev_next(top, prev, next); while (next != top) { prev = next; next = prev->next; plist_check_prev_next(top, prev, next); } } static void plist_check_head(struct plist_head *head) { if (!plist_head_empty(head)) plist_check_list(&plist_first(head)->prio_list); plist_check_list(&head->node_list); } #else # define plist_check_head(h) do { } while (0) #endif /** * plist_add - add @node to @head * * @node: &struct plist_node pointer * @head: &struct plist_head pointer */ void plist_add(struct plist_node *node, struct plist_head *head) { struct plist_node *first, *iter, *prev = NULL; struct list_head *node_next = &head->node_list; plist_check_head(head); WARN_ON(!plist_node_empty(node)); WARN_ON(!list_empty(&node->prio_list)); if (plist_head_empty(head)) goto ins_node; first = iter = plist_first(head); do { if (node->prio < iter->prio) { node_next = &iter->node_list; break; } prev = iter; iter = list_entry(iter->prio_list.next, struct plist_node, prio_list); } while (iter != first); if (!prev || prev->prio != node->prio) list_add_tail(&node->prio_list, &iter->prio_list); ins_node: list_add_tail(&node->node_list, node_next); plist_check_head(head); } /** * plist_del - Remove a @node from plist. * * @node: &struct plist_node pointer - entry to be removed * @head: &struct plist_head pointer - list head */ void plist_del(struct plist_node *node, struct plist_head *head) { plist_check_head(head); if (!list_empty(&node->prio_list)) { if (node->node_list.next != &head->node_list) { struct plist_node *next; next = list_entry(node->node_list.next, struct plist_node, node_list); /* add the next plist_node into prio_list */ if (list_empty(&next->prio_list)) list_add(&next->prio_list, &node->prio_list); } list_del_init(&node->prio_list); } list_del_init(&node->node_list); plist_check_head(head); } #ifdef CONFIG_DEBUG_PI_LIST #include <linux/sched.h> #include <linux/module.h> #include <linux/init.h> static struct plist_node __initdata test_node[241]; static void __init plist_test_check(int nr_expect) { struct plist_node *first, *prio_pos, *node_pos; if (plist_head_empty(&test_head)) { BUG_ON(nr_expect != 0); return; } prio_pos = first = plist_first(&test_head); plist_for_each(node_pos, &test_head) { if (nr_expect-- < 0) break; if (node_pos == first) continue; if (node_pos->prio == prio_pos->prio) { BUG_ON(!list_empty(&node_pos->prio_list)); continue; } BUG_ON(prio_pos->prio > node_pos->prio); BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list); prio_pos = node_pos; } BUG_ON(nr_expect != 0); BUG_ON(prio_pos->prio_list.next != &first->prio_list); } static int __init plist_test(void) { int nr_expect = 0, i, loop; unsigned int r = local_clock(); printk(KERN_INFO "start plist test\n"); plist_head_init(&test_head); for (i = 0; i < ARRAY_SIZE(test_node); i++) plist_node_init(test_node + i, 0); for (loop = 0; loop < 1000; loop++) { r = r * 193939 % 47629; i = r % ARRAY_SIZE(test_node); if (plist_node_empty(test_node + i)) { r = r * 193939 % 47629; test_node[i].prio = r % 99; plist_add(test_node + i, &test_head); nr_expect++; } else { plist_del(test_node + i, &test_head); nr_expect--; } plist_test_check(nr_expect); } for (i = 0; i < ARRAY_SIZE(test_node); i++) { if (plist_node_empty(test_node + i)) continue; plist_del(test_node + i, &test_head); nr_expect--; plist_test_check(nr_expect); } printk(KERN_INFO "end plist test\n"); return 0; } module_init(plist_test); #endif
gpl-2.0
davidftv/rk3x_kernel_3.0.36
drivers/isdn/hisax/jade_irq.c
5105
6381
/* $Id: jade_irq.c,v 1.7.2.4 2004/02/11 13:21:34 keil Exp $ * * Low level JADE IRQ stuff (derived from original hscx_irq.c) * * Author Roland Klabunde * Copyright by Roland Klabunde <R.Klabunde@Berkom.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ static inline void waitforCEC(struct IsdnCardState *cs, int jade, int reg) { int to = 50; int mask = (reg == jade_HDLC_XCMD ? jadeSTAR_XCEC : jadeSTAR_RCEC); while ((READJADE(cs, jade, jade_HDLC_STAR) & mask) && to) { udelay(1); to--; } if (!to) printk(KERN_WARNING "HiSax: waitforCEC (jade) timeout\n"); } static inline void waitforXFW(struct IsdnCardState *cs, int jade) { /* Does not work on older jade versions, don't care */ } static inline void WriteJADECMDR(struct IsdnCardState *cs, int jade, int reg, u_char data) { waitforCEC(cs, jade, reg); WRITEJADE(cs, jade, reg, data); } static void jade_empty_fifo(struct BCState *bcs, int count) { u_char *ptr; struct IsdnCardState *cs = bcs->cs; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "jade_empty_fifo"); if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "jade_empty_fifo: incoming packet too large"); WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_RCMD, jadeRCMD_RMC); bcs->hw.hscx.rcvidx = 0; return; } ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx; bcs->hw.hscx.rcvidx += count; READJADEFIFO(cs, bcs->hw.hscx.hscx, ptr, count); WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_RCMD, jadeRCMD_RMC); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "jade_empty_fifo %c cnt %d", bcs->hw.hscx.hscx ? 'B' : 'A', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void jade_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int more, count; int fifo_size = 32; u_char *ptr; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "jade_fill_fifo"); if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0; if (bcs->tx_skb->len > fifo_size) { more = !0; count = fifo_size; } else count = bcs->tx_skb->len; waitforXFW(cs, bcs->hw.hscx.hscx); ptr = bcs->tx_skb->data; skb_pull(bcs->tx_skb, count); bcs->tx_cnt -= count; bcs->hw.hscx.count += count; WRITEJADEFIFO(cs, bcs->hw.hscx.hscx, ptr, count); WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_XCMD, more ? jadeXCMD_XF : (jadeXCMD_XF|jadeXCMD_XME)); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "jade_fill_fifo %c cnt %d", bcs->hw.hscx.hscx ? 'B' : 'A', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade) { u_char r; struct BCState *bcs = cs->bcs + jade; struct sk_buff *skb; int fifo_size = 32; int count; int i_jade = (int) jade; /* To satisfy the compiler */ if (!test_bit(BC_FLG_INIT, &bcs->Flag)) return; if (val & 0x80) { /* RME */ r = READJADE(cs, i_jade, jade_HDLC_RSTA); if ((r & 0xf0) != 0xa0) { if (!(r & 0x80)) if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %s invalid frame", (jade ? "B":"A")); if ((r & 0x40) && bcs->mode) if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %c RDO mode=%d", 'A'+jade, bcs->mode); if (!(r & 0x20)) if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %c CRC error", 'A'+jade); WriteJADECMDR(cs, jade, jade_HDLC_RCMD, jadeRCMD_RMC); } else { count = READJADE(cs, i_jade, jade_HDLC_RBCL) & 0x1F; if (count == 0) count = fifo_size; jade_empty_fifo(bcs, count); if ((count = bcs->hw.hscx.rcvidx - 1) > 0) { if (cs->debug & L1_DEB_HSCX_FIFO) debugl1(cs, "HX Frame %d", count); if (!(skb = dev_alloc_skb(count))) printk(KERN_WARNING "JADE %s receive out of memory\n", (jade ? "B":"A")); else { memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count); skb_queue_tail(&bcs->rqueue, skb); } } } bcs->hw.hscx.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } if (val & 0x40) { /* RPF */ jade_empty_fifo(bcs, fifo_size); if (bcs->mode == L1_MODE_TRANS) { /* receive audio data */ if (!(skb = dev_alloc_skb(fifo_size))) printk(KERN_WARNING "HiSax: receive out of memory\n"); else { memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size); skb_queue_tail(&bcs->rqueue, skb); } bcs->hw.hscx.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } } if (val & 0x10) { /* XPR */ if (bcs->tx_skb) { if (bcs->tx_skb->len) { jade_fill_fifo(bcs); return; } else { if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->hw.hscx.count; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_irq(bcs->tx_skb); bcs->hw.hscx.count = 0; bcs->tx_skb = NULL; } } if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { bcs->hw.hscx.count = 0; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); jade_fill_fifo(bcs); } else { test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); schedule_event(bcs, B_XMTBUFREADY); } } } static inline void jade_int_main(struct IsdnCardState *cs, u_char val, int jade) { struct BCState *bcs; bcs = cs->bcs + jade; if (val & jadeISR_RFO) { /* handled with RDO */ val &= ~jadeISR_RFO; } if (val & jadeISR_XDU) { /* relevant in HDLC mode only */ /* don't reset XPR here */ if (bcs->mode == 1) jade_fill_fifo(bcs); else { /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ if (bcs->tx_skb) { skb_push(bcs->tx_skb, bcs->hw.hscx.count); bcs->tx_cnt += bcs->hw.hscx.count; bcs->hw.hscx.count = 0; } WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_XCMD, jadeXCMD_XRES); if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %c EXIR %x Lost TX", 'A'+jade, val); } } if (val & (jadeISR_RME|jadeISR_RPF|jadeISR_XPR)) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "JADE %c interrupt %x", 'A'+jade, val); jade_interrupt(cs, val, jade); } }
gpl-2.0
voodik/android_kernel_hardkernel_odroidxu
drivers/platform/x86/dell-wmi-aio.c
8177
4322
/* * WMI hotkeys support for Dell All-In-One series * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <acpi/acpi_drivers.h> #include <linux/acpi.h> #include <linux/string.h> MODULE_DESCRIPTION("WMI hotkeys driver for Dell All-In-One series"); MODULE_LICENSE("GPL"); #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" static const char *dell_wmi_aio_guids[] = { EVENT_GUID1, EVENT_GUID2, NULL }; MODULE_ALIAS("wmi:"EVENT_GUID1); MODULE_ALIAS("wmi:"EVENT_GUID2); static const struct key_entry dell_wmi_aio_keymap[] = { { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, { KE_END, 0 } }; static struct input_dev *dell_wmi_aio_input_dev; static void dell_wmi_aio_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_info("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (obj) { unsigned int scancode; switch (obj->type) { case ACPI_TYPE_INTEGER: /* Most All-In-One correctly return integer scancode */ scancode = obj->integer.value; sparse_keymap_report_event(dell_wmi_aio_input_dev, scancode, 1, true); break; case ACPI_TYPE_BUFFER: /* Broken machines return the scancode in a buffer */ if (obj->buffer.pointer && obj->buffer.length > 0) { scancode = obj->buffer.pointer[0]; sparse_keymap_report_event( dell_wmi_aio_input_dev, scancode, 1, true); } break; } } kfree(obj); } static int __init dell_wmi_aio_input_setup(void) { int err; dell_wmi_aio_input_dev = input_allocate_device(); if (!dell_wmi_aio_input_dev) return -ENOMEM; dell_wmi_aio_input_dev->name = "Dell AIO WMI hotkeys"; dell_wmi_aio_input_dev->phys = "wmi/input0"; dell_wmi_aio_input_dev->id.bustype = BUS_HOST; err = sparse_keymap_setup(dell_wmi_aio_input_dev, dell_wmi_aio_keymap, NULL); if (err) { pr_err("Unable to setup input device keymap\n"); goto err_free_dev; } err = input_register_device(dell_wmi_aio_input_dev); if (err) { pr_info("Unable to register input device\n"); goto err_free_keymap; } return 0; err_free_keymap: sparse_keymap_free(dell_wmi_aio_input_dev); err_free_dev: input_free_device(dell_wmi_aio_input_dev); return err; } static const char *dell_wmi_aio_find(void) { int i; for (i = 0; dell_wmi_aio_guids[i] != NULL; i++) if (wmi_has_guid(dell_wmi_aio_guids[i])) return dell_wmi_aio_guids[i]; return NULL; } static int __init dell_wmi_aio_init(void) { int err; const char *guid; guid = dell_wmi_aio_find(); if (!guid) { pr_warn("No known WMI GUID found\n"); return -ENXIO; } err = dell_wmi_aio_input_setup(); if (err) return err; err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL); if (err) { pr_err("Unable to register notify handler - %d\n", err); sparse_keymap_free(dell_wmi_aio_input_dev); input_unregister_device(dell_wmi_aio_input_dev); return err; } return 0; } static void __exit dell_wmi_aio_exit(void) { const char *guid; guid = dell_wmi_aio_find(); wmi_remove_notify_handler(guid); sparse_keymap_free(dell_wmi_aio_input_dev); input_unregister_device(dell_wmi_aio_input_dev); } module_init(dell_wmi_aio_init); module_exit(dell_wmi_aio_exit);
gpl-2.0
anoane/HTC-HD2-kernel-3.4
arch/sh/boards/mach-migor/lcd_qvga.c
9201
5008
/* * Support for SuperH MigoR Quarter VGA LCD Panel * * Copyright (C) 2008 Magnus Damm * * Based on lcd_powertip.c from Kenati Technologies Pvt Ltd. * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio.h> #include <video/sh_mobile_lcdc.h> #include <cpu/sh7722.h> #include <mach/migor.h> /* LCD Module is a PH240320T according to board schematics. This module * is made up of a 240x320 LCD hooked up to a R61505U (or HX8347-A01?) * Driver IC. This IC is connected to the SH7722 built-in LCDC using a * SYS-80 interface configured in 16 bit mode. * * Index 0: "Device Code Read" returns 0x1505. */ static void reset_lcd_module(void) { gpio_set_value(GPIO_PTH2, 0); mdelay(2); gpio_set_value(GPIO_PTH2, 1); mdelay(1); } /* DB0-DB7 are connected to D1-D8, and DB8-DB15 to D10-D17 */ static unsigned long adjust_reg18(unsigned short data) { unsigned long tmp1, tmp2; tmp1 = (data<<1 | 0x00000001) & 0x000001FF; tmp2 = (data<<2 | 0x00000200) & 0x0003FE00; return tmp1 | tmp2; } static void write_reg(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg << 8 | data)); } static void write_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg, unsigned short data) { sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); sys_ops->write_data(sys_ops_handle, adjust_reg18(data)); } static unsigned long read_reg16(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short reg) { unsigned long data; sys_ops->write_index(sys_ops_handle, adjust_reg18(reg)); data = sys_ops->read_data(sys_ops_handle); return ((data >> 1) & 0xff) | ((data >> 2) & 0xff00); } static void migor_lcd_qvga_seq(void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops, unsigned short const *data, int no_data) { int i; for (i = 0; i < no_data; i += 2) write_reg16(sys_ops_handle, sys_ops, data[i], data[i + 1]); } static const unsigned short sync_data[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned short magic0_data[] = { 0x0060, 0x2700, 0x0008, 0x0808, 0x0090, 0x001A, 0x0007, 0x0001, 0x0017, 0x0001, 0x0019, 0x0000, 0x0010, 0x17B0, 0x0011, 0x0116, 0x0012, 0x0198, 0x0013, 0x1400, 0x0029, 0x000C, 0x0012, 0x01B8, }; static const unsigned short magic1_data[] = { 0x0030, 0x0307, 0x0031, 0x0303, 0x0032, 0x0603, 0x0033, 0x0202, 0x0034, 0x0202, 0x0035, 0x0202, 0x0036, 0x1F1F, 0x0037, 0x0303, 0x0038, 0x0303, 0x0039, 0x0603, 0x003A, 0x0202, 0x003B, 0x0102, 0x003C, 0x0204, 0x003D, 0x0000, 0x0001, 0x0100, 0x0002, 0x0300, 0x0003, 0x5028, 0x0020, 0x00ef, 0x0021, 0x0000, 0x0004, 0x0000, 0x0009, 0x0000, 0x000A, 0x0008, 0x000C, 0x0000, 0x000D, 0x0000, 0x0015, 0x8000, }; static const unsigned short magic2_data[] = { 0x0061, 0x0001, 0x0092, 0x0100, 0x0093, 0x0001, 0x0007, 0x0021, }; static const unsigned short magic3_data[] = { 0x0010, 0x16B0, 0x0011, 0x0111, 0x0007, 0x0061, }; int migor_lcd_qvga_setup(void *sohandle, struct sh_mobile_lcdc_sys_bus_ops *so) { unsigned long xres = 320; unsigned long yres = 240; int k; reset_lcd_module(); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); if (read_reg16(sohandle, so, 0) != 0x1505) return -ENODEV; pr_info("Migo-R QVGA LCD Module detected.\n"); migor_lcd_qvga_seq(sohandle, so, sync_data, ARRAY_SIZE(sync_data)); write_reg16(sohandle, so, 0x00A4, 0x0001); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic0_data, ARRAY_SIZE(magic0_data)); mdelay(100); migor_lcd_qvga_seq(sohandle, so, magic1_data, ARRAY_SIZE(magic1_data)); write_reg16(sohandle, so, 0x0050, 0xef - (yres - 1)); write_reg16(sohandle, so, 0x0051, 0x00ef); write_reg16(sohandle, so, 0x0052, 0x0000); write_reg16(sohandle, so, 0x0053, xres - 1); migor_lcd_qvga_seq(sohandle, so, magic2_data, ARRAY_SIZE(magic2_data)); mdelay(10); migor_lcd_qvga_seq(sohandle, so, magic3_data, ARRAY_SIZE(magic3_data)); mdelay(40); /* clear GRAM to avoid displaying garbage */ write_reg16(sohandle, so, 0x0020, 0x0000); /* horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* vert addr */ for (k = 0; k < (xres * 256); k++) /* yes, 256 words per line */ write_reg16(sohandle, so, 0x0022, 0x0000); write_reg16(sohandle, so, 0x0020, 0x0000); /* reset horiz addr */ write_reg16(sohandle, so, 0x0021, 0x0000); /* reset vert addr */ write_reg16(sohandle, so, 0x0007, 0x0173); mdelay(40); /* enable display */ write_reg(sohandle, so, 0x00, 0x22); mdelay(100); return 0; }
gpl-2.0
lyapota/s7e_nougat
drivers/net/wan/hdlc_raw_eth.c
9201
3146
/* * Generic HDLC support routines for Linux * HDLC Ethernet emulation support * * Copyright (C) 2002-2006 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pkt_sched.h> #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev) { int pad = ETH_ZLEN - skb->len; if (pad > 0) { /* Pad the frame with zeros */ int len = skb->len; if (skb_tailroom(skb) < pad) if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } skb_put(skb, pad); memset(skb->data + len, 0, pad); } return dev_to_hdlc(dev)->xmit(skb, dev); } static struct hdlc_proto proto = { .type_trans = eth_type_trans, .xmit = eth_tx, .ioctl = raw_eth_ioctl, .module = THIS_MODULE, }; static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) { raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; const size_t size = sizeof(raw_hdlc_proto); raw_hdlc_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); int result, old_qlen; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; ifr->ifr_settings.type = IF_PROTO_HDLC_ETH; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(raw_s, hdlc->state, size)) return -EFAULT; return 0; case IF_PROTO_HDLC_ETH: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&new_settings, raw_s, size)) return -EFAULT; if (new_settings.encoding == ENCODING_DEFAULT) new_settings.encoding = ENCODING_NRZ; if (new_settings.parity == PARITY_DEFAULT) new_settings.parity = PARITY_CRC16_PR1_CCITT; result = hdlc->attach(dev, new_settings.encoding, new_settings.parity); if (result) return result; result = attach_hdlc_protocol(dev, &proto, sizeof(raw_hdlc_proto)); if (result) return result; memcpy(hdlc->state, &new_settings, size); old_qlen = dev->tx_queue_len; ether_setup(dev); dev->tx_queue_len = old_qlen; eth_hw_addr_random(dev); netif_dormant_off(dev); return 0; } return -EINVAL; } static int __init mod_init(void) { register_hdlc_protocol(&proto); return 0; } static void __exit mod_exit(void) { unregister_hdlc_protocol(&proto); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC"); MODULE_LICENSE("GPL v2");
gpl-2.0
Koumajutsu/KoumaKernel-3.0.89_N8013
net/netfilter/xt_helper.c
13297
2494
/* iptables module to match on related connections */ /* * (C) 2001 Martin Josefsson <gandalf@wlug.westbo.se> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_helper.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_helper.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>"); MODULE_DESCRIPTION("Xtables: Related connection matching"); MODULE_ALIAS("ipt_helper"); MODULE_ALIAS("ip6t_helper"); static bool helper_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_helper_info *info = par->matchinfo; const struct nf_conn *ct; const struct nf_conn_help *master_help; const struct nf_conntrack_helper *helper; enum ip_conntrack_info ctinfo; bool ret = info->invert; ct = nf_ct_get(skb, &ctinfo); if (!ct || !ct->master) return ret; master_help = nfct_help(ct->master); if (!master_help) return ret; /* rcu_read_lock()ed by nf_hook_slow */ helper = rcu_dereference(master_help->helper); if (!helper) return ret; if (info->name[0] == '\0') ret = !ret; else ret ^= !strncmp(helper->name, info->name, strlen(helper->name)); return ret; } static int helper_mt_check(const struct xt_mtchk_param *par) { struct xt_helper_info *info = par->matchinfo; int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) { pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } info->name[29] = '\0'; return 0; } static void helper_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_match helper_mt_reg __read_mostly = { .name = "helper", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = helper_mt_check, .match = helper_mt, .destroy = helper_mt_destroy, .matchsize = sizeof(struct xt_helper_info), .me = THIS_MODULE, }; static int __init helper_mt_init(void) { return xt_register_match(&helper_mt_reg); } static void __exit helper_mt_exit(void) { xt_unregister_match(&helper_mt_reg); } module_init(helper_mt_init); module_exit(helper_mt_exit);
gpl-2.0
vmobi-d2vmu/android_kernel_samsung_d2vmu
net/802/p8023.c
14065
1687
/* * NET3: 802.3 data link hooks used for IPX 802.3 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * 802.3 isn't really a protocol data link layer. Some old IPX stuff * uses it however. Note that there is only one 802.3 protocol layer * in the system. We don't currently support different protocols * running raw 802.3 on different devices. Thankfully nobody else * has done anything like the old IPX. */ #include <linux/in.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/datalink.h> #include <net/p8022.h> /* * Place an 802.3 header on a packet. The driver will do the mac * addresses, we just need to give it the buffer length. */ static int p8023_request(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) { struct net_device *dev = skb->dev; dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); return dev_queue_xmit(skb); } /* * Create an 802.3 client. Note there can be only one 802.3 client */ struct datalink_proto *make_8023_client(void) { struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC); if (proto) { proto->header_length = 0; proto->request = p8023_request; } return proto; } /* * Destroy the 802.3 client. */ void destroy_8023_client(struct datalink_proto *dl) { kfree(dl); } EXPORT_SYMBOL(destroy_8023_client); EXPORT_SYMBOL(make_8023_client); MODULE_LICENSE("GPL");
gpl-2.0
pombredanne/bcm11351
net/802/p8023.c
14065
1687
/* * NET3: 802.3 data link hooks used for IPX 802.3 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * 802.3 isn't really a protocol data link layer. Some old IPX stuff * uses it however. Note that there is only one 802.3 protocol layer * in the system. We don't currently support different protocols * running raw 802.3 on different devices. Thankfully nobody else * has done anything like the old IPX. */ #include <linux/in.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/datalink.h> #include <net/p8022.h> /* * Place an 802.3 header on a packet. The driver will do the mac * addresses, we just need to give it the buffer length. */ static int p8023_request(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) { struct net_device *dev = skb->dev; dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); return dev_queue_xmit(skb); } /* * Create an 802.3 client. Note there can be only one 802.3 client */ struct datalink_proto *make_8023_client(void) { struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC); if (proto) { proto->header_length = 0; proto->request = p8023_request; } return proto; } /* * Destroy the 802.3 client. */ void destroy_8023_client(struct datalink_proto *dl) { kfree(dl); } EXPORT_SYMBOL(destroy_8023_client); EXPORT_SYMBOL(make_8023_client); MODULE_LICENSE("GPL");
gpl-2.0
goodhanrry/N915S_goodHanrry_kernel
net/ipv6/ip6_tunnel.c
242
44184
/* * IPv6 tunneling device * Linux INET6 implementation * * Authors: * Ville Nuorvala <vnuorval@tcs.hut.fi> * Yasuyuki Kozakai <kozakai@linux-ipv6.org> * * Based on: * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c * * RFC 2473 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/sockios.h> #include <linux/icmp.h> #include <linux/if.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/if_tunnel.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmpv6.h> #include <linux/init.h> #include <linux/route.h> #include <linux/rtnetlink.h> #include <linux/netfilter_ipv6.h> #include <linux/slab.h> #include <linux/hash.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <net/icmp.h> #include <net/ip.h> #include <net/ip_tunnels.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/ip6_tunnel.h> #include <net/xfrm.h> #include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/net_namespace.h> #include <net/netns/generic.h> MODULE_AUTHOR("Ville Nuorvala"); MODULE_DESCRIPTION("IPv6 tunneling device"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETDEV("ip6tnl0"); #ifdef IP6_TNL_DEBUG #define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__) #else #define IP6_TNL_TRACE(x...) do {;} while(0) #endif #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) #define IPV6_TCLASS_SHIFT 20 #define HASH_SIZE_SHIFT 5 #define HASH_SIZE (1 << HASH_SIZE_SHIFT) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) { u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); return hash_32(hash, HASH_SIZE_SHIFT); } static int ip6_tnl_dev_init(struct net_device *dev); static void ip6_tnl_dev_setup(struct net_device *dev); static struct rtnl_link_ops ip6_link_ops __read_mostly; static int ip6_tnl_net_id __read_mostly; struct ip6_tnl_net { /* the IPv6 tunnel fallback device */ struct net_device *fb_tnl_dev; /* lists for storing tunnels in use */ struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; struct ip6_tnl __rcu *tnls_wc[1]; struct ip6_tnl __rcu **tnls[2]; }; static struct net_device_stats *ip6_get_stats(struct net_device *dev) { struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); sum.rx_packets += tstats->rx_packets; sum.rx_bytes += tstats->rx_bytes; sum.tx_packets += tstats->tx_packets; sum.tx_bytes += tstats->tx_bytes; } dev->stats.rx_packets = sum.rx_packets; dev->stats.rx_bytes = sum.rx_bytes; dev->stats.tx_packets = sum.tx_packets; dev->stats.tx_bytes = sum.tx_bytes; return &dev->stats; } /* * Locking : hash tables are protected by RCU and RTNL */ struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) { struct dst_entry *dst = t->dst_cache; if (dst && dst->obsolete && dst->ops->check(dst, t->dst_cookie) == NULL) { t->dst_cache = NULL; dst_release(dst); return NULL; } return dst; } EXPORT_SYMBOL_GPL(ip6_tnl_dst_check); void ip6_tnl_dst_reset(struct ip6_tnl *t) { dst_release(t->dst_cache); t->dst_cache = NULL; } EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *) dst; t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; dst_release(t->dst_cache); t->dst_cache = dst; } EXPORT_SYMBOL_GPL(ip6_tnl_dst_store); /** * ip6_tnl_lookup - fetch tunnel matching the end-point addresses * @remote: the address of the tunnel exit-point * @local: the address of the tunnel entry-point * * Return: * tunnel matching given end-points if found, * else fallback tunnel if its device is up, * else %NULL **/ #define for_each_ip6_tunnel_rcu(start) \ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) static struct ip6_tnl * ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) { unsigned int hash = HASH(remote, local); struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr) && (t->dev->flags & IFF_UP)) return t; } t = rcu_dereference(ip6n->tnls_wc[0]); if (t && (t->dev->flags & IFF_UP)) return t; return NULL; } /** * ip6_tnl_bucket - get head of list matching given tunnel parameters * @p: parameters containing tunnel end-points * * Description: * ip6_tnl_bucket() returns the head of the list matching the * &struct in6_addr entries laddr and raddr in @p. * * Return: head of IPv6 tunnel list **/ static struct ip6_tnl __rcu ** ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p) { const struct in6_addr *remote = &p->raddr; const struct in6_addr *local = &p->laddr; unsigned int h = 0; int prio = 0; if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { prio = 1; h = HASH(remote, local); } return &ip6n->tnls[prio][h]; } /** * ip6_tnl_link - add tunnel to hash table * @t: tunnel to be added **/ static void ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); rcu_assign_pointer(t->next , rtnl_dereference(*tp)); rcu_assign_pointer(*tp, t); } /** * ip6_tnl_unlink - remove tunnel from hash table * @t: tunnel to be removed **/ static void ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl __rcu **tp; struct ip6_tnl *iter; for (tp = ip6_tnl_bucket(ip6n, &t->parms); (iter = rtnl_dereference(*tp)) != NULL; tp = &iter->next) { if (t == iter) { rcu_assign_pointer(*tp, t->next); break; } } } static void ip6_dev_free(struct net_device *dev) { free_percpu(dev->tstats); free_netdev(dev); } static int ip6_tnl_create2(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); int err; t = netdev_priv(dev); err = ip6_tnl_dev_init(dev); if (err < 0) goto out; err = register_netdevice(dev); if (err < 0) goto out; strcpy(t->parms.name, dev->name); dev->rtnl_link_ops = &ip6_link_ops; dev_hold(dev); ip6_tnl_link(ip6n, t); return 0; out: return err; } /** * ip6_tnl_create - create a new tunnel * @p: tunnel parameters * @pt: pointer to new tunnel * * Description: * Create tunnel matching given parameters. * * Return: * created tunnel or NULL **/ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) { struct net_device *dev; struct ip6_tnl *t; char name[IFNAMSIZ]; int err; if (p->name[0]) strlcpy(name, p->name, IFNAMSIZ); else sprintf(name, "ip6tnl%%d"); dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup); if (dev == NULL) goto failed; dev_net_set(dev, net); t = netdev_priv(dev); t->parms = *p; err = ip6_tnl_create2(dev); if (err < 0) goto failed_free; return t; failed_free: ip6_dev_free(dev); failed: return NULL; } /** * ip6_tnl_locate - find or create tunnel matching given parameters * @p: tunnel parameters * @create: != 0 if allowed to create new tunnel if no match found * * Description: * ip6_tnl_locate() first tries to locate an existing tunnel * based on @parms. If this is unsuccessful, but @create is set a new * tunnel device is created and registered for use. * * Return: * matching tunnel or NULL **/ static struct ip6_tnl *ip6_tnl_locate(struct net *net, struct __ip6_tnl_parm *p, int create) { const struct in6_addr *remote = &p->raddr; const struct in6_addr *local = &p->laddr; struct ip6_tnl __rcu **tp; struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); for (tp = ip6_tnl_bucket(ip6n, p); (t = rtnl_dereference(*tp)) != NULL; tp = &t->next) { if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr)) return t; } if (!create) return NULL; return ip6_tnl_create(net, p); } /** * ip6_tnl_dev_uninit - tunnel device uninitializer * @dev: the device to be destroyed * * Description: * ip6_tnl_dev_uninit() removes tunnel from its list **/ static void ip6_tnl_dev_uninit(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (dev == ip6n->fb_tnl_dev) RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); else ip6_tnl_unlink(ip6n, t); ip6_tnl_dst_reset(t); dev_put(dev); } /** * parse_tvl_tnl_enc_lim - handle encapsulation limit option * @skb: received socket buffer * * Return: * 0 if none was found, * else index to encapsulation limit **/ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) { const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; __u8 nexthdr = ipv6h->nexthdr; __u16 off = sizeof (*ipv6h); while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { __u16 optlen = 0; struct ipv6_opt_hdr *hdr; if (raw + off + sizeof (*hdr) > skb->data && !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) break; hdr = (struct ipv6_opt_hdr *) (raw + off); if (nexthdr == NEXTHDR_FRAGMENT) { struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; if (frag_hdr->frag_off) break; optlen = 8; } else if (nexthdr == NEXTHDR_AUTH) { optlen = (hdr->hdrlen + 2) << 2; } else { optlen = ipv6_optlen(hdr); } if (nexthdr == NEXTHDR_DEST) { __u16 i = off + 2; while (1) { struct ipv6_tlv_tnl_enc_lim *tel; /* No more room for encapsulation limit */ if (i + sizeof (*tel) > off + optlen) break; tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; /* return index of option if found and valid */ if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && tel->length == 1) return i; /* else jump to next option */ if (tel->type) i += tel->length + 2; else i++; } } nexthdr = hdr->nexthdr; off += optlen; } return 0; } EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim); /** * ip6_tnl_err - tunnel error handler * * Description: * ip6_tnl_err() should handle errors in the tunnel according * to the specifications in RFC 2473. **/ static int ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, u8 *type, u8 *code, int *msg, __u32 *info, int offset) { const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data; struct ip6_tnl *t; int rel_msg = 0; u8 rel_type = ICMPV6_DEST_UNREACH; u8 rel_code = ICMPV6_ADDR_UNREACH; __u32 rel_info = 0; __u16 len; int err = -ENOENT; /* If the packet doesn't contain the original IPv6 header we are in trouble since we might need the source address for further processing of the error. */ rcu_read_lock(); if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr)) == NULL) goto out; if (t->parms.proto != ipproto && t->parms.proto != 0) goto out; err = 0; switch (*type) { __u32 teli; struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", t->parms.name); rel_msg = 1; break; case ICMPV6_TIME_EXCEED: if ((*code) == ICMPV6_EXC_HOPLIMIT) { net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", t->parms.name); rel_msg = 1; } break; case ICMPV6_PARAMPROB: teli = 0; if ((*code) == ICMPV6_HDR_FIELD) teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); if (teli && teli == *info - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", t->parms.name); rel_msg = 1; } } else { net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", t->parms.name); } break; case ICMPV6_PKT_TOOBIG: mtu = *info - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { rel_type = ICMPV6_PKT_TOOBIG; rel_code = 0; rel_info = mtu; rel_msg = 1; } break; } *type = rel_type; *code = rel_code; *info = rel_info; *msg = rel_msg; out: rcu_read_unlock(); return err; } static int ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { int rel_msg = 0; u8 rel_type = type; u8 rel_code = code; __u32 rel_info = ntohl(info); int err; struct sk_buff *skb2; const struct iphdr *eiph; struct rtable *rt; struct flowi4 fl4; err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, &rel_msg, &rel_info, offset); if (err < 0) return err; if (rel_msg == 0) return 0; switch (rel_type) { case ICMPV6_DEST_UNREACH: if (rel_code != ICMPV6_ADDR_UNREACH) return 0; rel_type = ICMP_DEST_UNREACH; rel_code = ICMP_HOST_UNREACH; break; case ICMPV6_PKT_TOOBIG: if (rel_code != 0) return 0; rel_type = ICMP_DEST_UNREACH; rel_code = ICMP_FRAG_NEEDED; break; case NDISC_REDIRECT: rel_type = ICMP_REDIRECT; rel_code = ICMP_REDIR_HOST; default: return 0; } if (!pskb_may_pull(skb, offset + sizeof(struct iphdr))) return 0; skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return 0; skb_dst_drop(skb2); skb_pull(skb2, offset); skb_reset_network_header(skb2); eiph = ip_hdr(skb2); /* Try to guess incoming interface */ rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr, 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0); if (IS_ERR(rt)) goto out; skb2->dev = rt->dst.dev; /* route "incoming" packet */ if (rt->rt_flags & RTCF_LOCAL) { ip_rt_put(rt); rt = NULL; rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->daddr, eiph->saddr, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0); if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { if (!IS_ERR(rt)) ip_rt_put(rt); goto out; } skb_dst_set(skb2, &rt->dst); } else { ip_rt_put(rt); if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) || skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) goto out; } /* change mtu on this route */ if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { if (rel_info > dst_mtu(skb_dst(skb2))) goto out; skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info); } if (rel_type == ICMP_REDIRECT) skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2); icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); out: kfree_skb(skb2); return 0; } static int ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { int rel_msg = 0; u8 rel_type = type; u8 rel_code = code; __u32 rel_info = ntohl(info); int err; err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code, &rel_msg, &rel_info, offset); if (err < 0) return err; if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) { struct rt6_info *rt; struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return 0; skb_dst_drop(skb2); skb_pull(skb2, offset); skb_reset_network_header(skb2); /* Try to guess incoming interface */ rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); if (rt && rt->dst.dev) skb2->dev = rt->dst.dev; icmpv6_send(skb2, rel_type, rel_code, rel_info); ip6_rt_put(rt); kfree_skb(skb2); } return 0; } static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, const struct ipv6hdr *ipv6h, struct sk_buff *skb) { __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); return IP6_ECN_decapsulate(ipv6h, skb); } static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, const struct ipv6hdr *ipv6h, struct sk_buff *skb) { if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); return IP6_ECN_decapsulate(ipv6h, skb); } __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr) { struct __ip6_tnl_parm *p = &t->parms; int ltype = ipv6_addr_type(laddr); int rtype = ipv6_addr_type(raddr); __u32 flags = 0; if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { flags = IP6_TNL_F_CAP_PER_PACKET; } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { if (ltype&IPV6_ADDR_UNICAST) flags |= IP6_TNL_F_CAP_XMIT; if (rtype&IPV6_ADDR_UNICAST) flags |= IP6_TNL_F_CAP_RCV; } return flags; } EXPORT_SYMBOL(ip6_tnl_get_cap); /* called with rcu_read_lock() */ int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr) { struct __ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = dev_net(t->dev); if ((p->flags & IP6_TNL_F_CAP_RCV) || ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { struct net_device *ldev = NULL; if (p->link) ldev = dev_get_by_index_rcu(net, p->link); if ((ipv6_addr_is_multicast(laddr) || likely(ipv6_chk_addr(net, laddr, ldev, 0))) && likely(!ipv6_chk_addr(net, raddr, NULL, 0))) ret = 1; } return ret; } EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); /** * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally * @skb: received socket buffer * @protocol: ethernet protocol ID * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN * * Return: 0 **/ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, __u8 ipproto, int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, const struct ipv6hdr *ipv6h, struct sk_buff *skb)) { struct ip6_tnl *t; const struct ipv6hdr *ipv6h = ipv6_hdr(skb); int err; rcu_read_lock(); if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr)) != NULL) { struct pcpu_tstats *tstats; if (t->parms.proto != ipproto && t->parms.proto != 0) { rcu_read_unlock(); goto discard; } if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { rcu_read_unlock(); goto discard; } if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { t->dev->stats.rx_dropped++; rcu_read_unlock(); goto discard; } secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->protocol = htons(protocol); skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); __skb_tunnel_rx(skb, t->dev); err = dscp_ecn_decapsulate(t, ipv6h, skb); if (unlikely(err)) { if (log_ecn_error) net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n", &ipv6h->saddr, ipv6_get_dsfield(ipv6h)); if (err > 1) { ++t->dev->stats.rx_frame_errors; ++t->dev->stats.rx_errors; rcu_read_unlock(); goto discard; } } tstats = this_cpu_ptr(t->dev->tstats); tstats->rx_packets++; tstats->rx_bytes += skb->len; netif_rx(skb); rcu_read_unlock(); return 0; } rcu_read_unlock(); return 1; discard: kfree_skb(skb); return 0; } static int ip4ip6_rcv(struct sk_buff *skb) { return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP, ip4ip6_dscp_ecn_decapsulate); } static int ip6ip6_rcv(struct sk_buff *skb) { return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6, ip6ip6_dscp_ecn_decapsulate); } struct ipv6_tel_txoption { struct ipv6_txoptions ops; __u8 dst_opt[8]; }; static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) { memset(opt, 0, sizeof(struct ipv6_tel_txoption)); opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; opt->dst_opt[3] = 1; opt->dst_opt[4] = encap_limit; opt->dst_opt[5] = IPV6_TLV_PADN; opt->dst_opt[6] = 1; opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; opt->ops.opt_nflen = 8; } /** * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own * @t: the outgoing tunnel device * @hdr: IPv6 header from the incoming packet * * Description: * Avoid trivial tunneling loop by checking that tunnel exit-point * doesn't match source of incoming packet. * * Return: * 1 if conflict, * 0 else **/ static inline bool ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) { return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); } int ip6_tnl_xmit_ctl(struct ip6_tnl *t) { struct __ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = dev_net(t->dev); if (p->flags & IP6_TNL_F_CAP_XMIT) { struct net_device *ldev = NULL; rcu_read_lock(); if (p->link) ldev = dev_get_by_index_rcu(net, p->link); if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) pr_warn("%s xmit: Local address not yet configured!\n", p->name); else if (!ipv6_addr_is_multicast(&p->raddr) && unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", p->name); else ret = 1; rcu_read_unlock(); } return ret; } EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); /** * ip6_tnl_xmit2 - encapsulate packet and send * @skb: the outgoing socket buffer * @dev: the outgoing tunnel device * @dsfield: dscp code for outer header * @fl: flow of tunneled packet * @encap_limit: encapsulation limit * @pmtu: Path MTU is stored if packet is too big * * Description: * Build new header and do some sanity checks on the packet before sending * it. * * Return: * 0 on success * -1 fail * %-EMSGSIZE message too big. return mtu in this case. **/ static int ip6_tnl_xmit2(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, struct flowi6 *fl6, int encap_limit, __u32 *pmtu) { struct net *net = dev_net(dev); struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6_tel_txoption opt; struct dst_entry *dst = NULL, *ndst = NULL; struct net_device *tdev; int mtu; unsigned int max_headroom = sizeof(struct ipv6hdr); u8 proto; int err = -1; if (!fl6->flowi6_mark) dst = ip6_tnl_dst_check(t); if (!dst) { ndst = ip6_route_output(net, NULL, fl6); if (ndst->error) goto tx_err_link_failure; ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); if (IS_ERR(ndst)) { err = PTR_ERR(ndst); ndst = NULL; goto tx_err_link_failure; } dst = ndst; } tdev = dst->dev; if (tdev == dev) { stats->collisions++; net_warn_ratelimited("%s: Local routing loop detected!\n", t->parms.name); goto tx_err_dst_release; } mtu = dst_mtu(dst) - sizeof (*ipv6h); if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); if (skb->len > mtu) { *pmtu = mtu; err = -EMSGSIZE; goto tx_err_dst_release; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom += LL_RESERVED_SPACE(tdev); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb; if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) goto tx_err_dst_release; if (skb->sk) skb_set_owner_w(new_skb, skb->sk); consume_skb(skb); skb = new_skb; } skb_dst_drop(skb); if (fl6->flowi6_mark) { skb_dst_set(skb, dst); ndst = NULL; } else { skb_dst_set_noref(skb, dst); } skb->transport_header = skb->network_header; proto = fl6->flowi6_proto; if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel); ipv6h->hop_limit = t->parms.hop_limit; ipv6h->nexthdr = proto; ipv6h->saddr = fl6->saddr; ipv6h->daddr = fl6->daddr; ip6tunnel_xmit(skb, dev); if (ndst) ip6_tnl_dst_store(t, ndst); return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: dst_release(ndst); return err; } static inline int ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); const struct iphdr *iph = ip_hdr(skb); int encap_limit = -1; struct flowi6 fl6; __u8 dsfield; __u32 mtu; int err; if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || !ip6_tnl_xmit_ctl(t)) return -1; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); fl6.flowi6_proto = IPPROTO_IPIP; dsfield = ipv4_get_dsfield(iph); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) & IPV6_TCLASS_MASK; if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); if (err != 0) { /* XXX: send ICMP error even if DF is not set. */ if (err == -EMSGSIZE) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); return -1; } return 0; } static inline int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct ipv6hdr *ipv6h = ipv6_hdr(skb); int encap_limit = -1; __u16 offset; struct flowi6 fl6; __u8 dsfield; __u32 mtu; int err; if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) return -1; offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; if (tel->encap_limit == 0) { icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD, offset + 2); return -1; } encap_limit = tel->encap_limit - 1; } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); fl6.flowi6_proto = IPPROTO_IPV6; dsfield = ipv6_get_dsfield(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); if (err != 0) { if (err == -EMSGSIZE) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); return -1; } return 0; } static netdev_tx_t ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; int ret; switch (skb->protocol) { case htons(ETH_P_IP): ret = ip4ip6_tnl_xmit(skb, dev); break; case htons(ETH_P_IPV6): ret = ip6ip6_tnl_xmit(skb, dev); break; default: goto tx_err; } if (ret < 0) goto tx_err; return NETDEV_TX_OK; tx_err: stats->tx_errors++; stats->tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } static void ip6_tnl_link_config(struct ip6_tnl *t) { struct net_device *dev = t->dev; struct __ip6_tnl_parm *p = &t->parms; struct flowi6 *fl6 = &t->fl.u.ip6; memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); /* Set up flowi template */ fl6->saddr = p->laddr; fl6->daddr = p->raddr; fl6->flowi6_oif = p->link; fl6->flowlabel = 0; if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) dev->flags |= IFF_POINTOPOINT; else dev->flags &= ~IFF_POINTOPOINT; dev->iflink = p->link; if (p->flags & IP6_TNL_F_CAP_XMIT) { int strict = (ipv6_addr_type(&p->raddr) & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); struct rt6_info *rt = rt6_lookup(dev_net(dev), &p->raddr, &p->laddr, p->link, strict); if (rt == NULL) return; if (rt->dst.dev) { dev->hard_header_len = rt->dst.dev->hard_header_len + sizeof (struct ipv6hdr); dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr); if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu-=8; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } ip6_rt_put(rt); } } /** * ip6_tnl_change - update the tunnel parameters * @t: tunnel to be changed * @p: tunnel configuration parameters * * Description: * ip6_tnl_change() updates the tunnel parameters **/ static int ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) { t->parms.laddr = p->laddr; t->parms.raddr = p->raddr; t->parms.flags = p->flags; t->parms.hop_limit = p->hop_limit; t->parms.encap_limit = p->encap_limit; t->parms.flowinfo = p->flowinfo; t->parms.link = p->link; t->parms.proto = p->proto; ip6_tnl_dst_reset(t); ip6_tnl_link_config(t); return 0; } static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) { struct net *net = dev_net(t->dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); int err; ip6_tnl_unlink(ip6n, t); synchronize_net(); err = ip6_tnl_change(t, p); ip6_tnl_link(ip6n, t); netdev_state_change(t->dev); return err; } static void ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) { p->laddr = u->laddr; p->raddr = u->raddr; p->flags = u->flags; p->hop_limit = u->hop_limit; p->encap_limit = u->encap_limit; p->flowinfo = u->flowinfo; p->link = u->link; p->proto = u->proto; memcpy(p->name, u->name, sizeof(u->name)); } static void ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p) { u->laddr = p->laddr; u->raddr = p->raddr; u->flags = p->flags; u->hop_limit = p->hop_limit; u->encap_limit = p->encap_limit; u->flowinfo = p->flowinfo; u->link = p->link; u->proto = p->proto; memcpy(u->name, p->name, sizeof(u->name)); } /** * ip6_tnl_ioctl - configure ipv6 tunnels from userspace * @dev: virtual device associated with tunnel * @ifr: parameters passed from userspace * @cmd: command to be performed * * Description: * ip6_tnl_ioctl() is used for managing IPv6 tunnels * from userspace. * * The possible commands are the following: * %SIOCGETTUNNEL: get tunnel parameters for device * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters * %SIOCCHGTUNNEL: change tunnel parameters to those given * %SIOCDELTUNNEL: delete tunnel * * The fallback device "ip6tnl0", created during module * initialization, can be used for creating other tunnel devices. * * Return: * 0 on success, * %-EFAULT if unable to copy data to or from userspace, * %-EPERM if current process hasn't %CAP_NET_ADMIN set * %-EINVAL if passed tunnel parameters are invalid, * %-EEXIST if changing a tunnel's parameters would cause a conflict * %-ENODEV if attempting to change or delete a nonexisting device **/ static int ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip6_tnl_parm p; struct __ip6_tnl_parm p1; struct ip6_tnl *t = NULL; struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { err = -EFAULT; break; } ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, 0); } else { memset(&p, 0, sizeof(p)); } if (t == NULL) t = netdev_priv(dev); ip6_tnl_parm_to_user(&p, &t->parms); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { err = -EFAULT; } break; case SIOCADDTUNNEL: case SIOCCHGTUNNEL: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) break; err = -EINVAL; if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && p.proto != 0) break; ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; break; } } else t = netdev_priv(dev); err = ip6_tnl_update(t, &p1); } if (t) { err = 0; ip6_tnl_parm_to_user(&p, &t->parms); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); break; case SIOCDELTUNNEL: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; if (dev == ip6n->fb_tnl_dev) { err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) break; err = -ENOENT; ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, 0); if (t == NULL) break; err = -EPERM; if (t->dev == ip6n->fb_tnl_dev) break; dev = t->dev; } err = 0; unregister_netdevice(dev); break; default: err = -EINVAL; } return err; } /** * ip6_tnl_change_mtu - change mtu manually for tunnel device * @dev: virtual device associated with tunnel * @new_mtu: the new mtu * * Return: * 0 on success, * %-EINVAL if mtu too small **/ static int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < IPV6_MIN_MTU) { return -EINVAL; } dev->mtu = new_mtu; return 0; } static const struct net_device_ops ip6_tnl_netdev_ops = { .ndo_uninit = ip6_tnl_dev_uninit, .ndo_start_xmit = ip6_tnl_xmit, .ndo_do_ioctl = ip6_tnl_ioctl, .ndo_change_mtu = ip6_tnl_change_mtu, .ndo_get_stats = ip6_get_stats, }; /** * ip6_tnl_dev_setup - setup virtual tunnel device * @dev: virtual device associated with tunnel * * Description: * Initialize function pointers and device parameters **/ static void ip6_tnl_dev_setup(struct net_device *dev) { struct ip6_tnl *t; dev->netdev_ops = &ip6_tnl_netdev_ops; dev->destructor = ip6_dev_free; dev->type = ARPHRD_TUNNEL6; dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); t = netdev_priv(dev); if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu-=8; dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; } /** * ip6_tnl_dev_init_gen - general initializer for all tunnel devices * @dev: virtual device associated with tunnel **/ static inline int ip6_tnl_dev_init_gen(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); t->dev = dev; dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; return 0; } /** * ip6_tnl_dev_init - initializer for all non fallback tunnel devices * @dev: virtual device associated with tunnel **/ static int ip6_tnl_dev_init(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); int err = ip6_tnl_dev_init_gen(dev); if (err) return err; ip6_tnl_link_config(t); return 0; } /** * ip6_fb_tnl_dev_init - initializer for fallback tunnel device * @dev: fallback device * * Return: 0 **/ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); int err = ip6_tnl_dev_init_gen(dev); if (err) return err; t->parms.proto = IPPROTO_IPV6; dev_hold(dev); ip6_tnl_link_config(t); rcu_assign_pointer(ip6n->tnls_wc[0], t); return 0; } static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[]) { u8 proto; if (!data) return 0; proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); if (proto != IPPROTO_IPV6 && proto != IPPROTO_IPIP && proto != 0) return -EINVAL; return 0; } static void ip6_tnl_netlink_parms(struct nlattr *data[], struct __ip6_tnl_parm *parms) { memset(parms, 0, sizeof(*parms)); if (!data) return; if (data[IFLA_IPTUN_LINK]) parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); if (data[IFLA_IPTUN_LOCAL]) nla_memcpy(&parms->laddr, data[IFLA_IPTUN_LOCAL], sizeof(struct in6_addr)); if (data[IFLA_IPTUN_REMOTE]) nla_memcpy(&parms->raddr, data[IFLA_IPTUN_REMOTE], sizeof(struct in6_addr)); if (data[IFLA_IPTUN_TTL]) parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); if (data[IFLA_IPTUN_ENCAP_LIMIT]) parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); if (data[IFLA_IPTUN_FLOWINFO]) parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); if (data[IFLA_IPTUN_FLAGS]) parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); if (data[IFLA_IPTUN_PROTO]) parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); } static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net *net = dev_net(dev); struct ip6_tnl *nt; nt = netdev_priv(dev); ip6_tnl_netlink_parms(data, &nt->parms); if (ip6_tnl_locate(net, &nt->parms, 0)) return -EEXIST; return ip6_tnl_create2(dev); } static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip6_tnl *t; struct __ip6_tnl_parm p; struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (dev == ip6n->fb_tnl_dev) return -EINVAL; ip6_tnl_netlink_parms(data, &p); t = ip6_tnl_locate(net, &p, 0); if (t) { if (t->dev != dev) return -EEXIST; } else t = netdev_priv(dev); return ip6_tnl_update(t, &p); } static size_t ip6_tnl_get_size(const struct net_device *dev) { return /* IFLA_IPTUN_LINK */ nla_total_size(4) + /* IFLA_IPTUN_LOCAL */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_IPTUN_REMOTE */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_IPTUN_TTL */ nla_total_size(1) + /* IFLA_IPTUN_ENCAP_LIMIT */ nla_total_size(1) + /* IFLA_IPTUN_FLOWINFO */ nla_total_size(4) + /* IFLA_IPTUN_FLAGS */ nla_total_size(4) + /* IFLA_IPTUN_PROTO */ nla_total_size(1) + 0; } static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip6_tnl *tunnel = netdev_priv(dev); struct __ip6_tnl_parm *parm = &tunnel->parms; if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr), &parm->raddr) || nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr), &parm->laddr) || nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, }; static struct rtnl_link_ops ip6_link_ops __read_mostly = { .kind = "ip6tnl", .maxtype = IFLA_IPTUN_MAX, .policy = ip6_tnl_policy, .priv_size = sizeof(struct ip6_tnl), .setup = ip6_tnl_dev_setup, .validate = ip6_tnl_validate, .newlink = ip6_tnl_newlink, .changelink = ip6_tnl_changelink, .get_size = ip6_tnl_get_size, .fill_info = ip6_tnl_fill_info, }; static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { .handler = ip4ip6_rcv, .err_handler = ip4ip6_err, .priority = 1, }; static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { .handler = ip6ip6_rcv, .err_handler = ip6ip6_err, .priority = 1, }; static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) { int h; struct ip6_tnl *t; LIST_HEAD(list); for (h = 0; h < HASH_SIZE; h++) { t = rtnl_dereference(ip6n->tnls_r_l[h]); while (t != NULL) { unregister_netdevice_queue(t->dev, &list); t = rtnl_dereference(t->next); } } t = rtnl_dereference(ip6n->tnls_wc[0]); unregister_netdevice_queue(t->dev, &list); unregister_netdevice_many(&list); } static int __net_init ip6_tnl_init_net(struct net *net) { struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); struct ip6_tnl *t = NULL; int err; ip6n->tnls[0] = ip6n->tnls_wc; ip6n->tnls[1] = ip6n->tnls_r_l; err = -ENOMEM; ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0", ip6_tnl_dev_setup); if (!ip6n->fb_tnl_dev) goto err_alloc_dev; dev_net_set(ip6n->fb_tnl_dev, net); err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); if (err < 0) goto err_register; err = register_netdev(ip6n->fb_tnl_dev); if (err < 0) goto err_register; t = netdev_priv(ip6n->fb_tnl_dev); strcpy(t->parms.name, ip6n->fb_tnl_dev->name); return 0; err_register: ip6_dev_free(ip6n->fb_tnl_dev); err_alloc_dev: return err; } static void __net_exit ip6_tnl_exit_net(struct net *net) { struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); rtnl_lock(); ip6_tnl_destroy_tunnels(ip6n); rtnl_unlock(); } static struct pernet_operations ip6_tnl_net_ops = { .init = ip6_tnl_init_net, .exit = ip6_tnl_exit_net, .id = &ip6_tnl_net_id, .size = sizeof(struct ip6_tnl_net), }; /** * ip6_tunnel_init - register protocol and reserve needed resources * * Return: 0 on success **/ static int __init ip6_tunnel_init(void) { int err; err = register_pernet_device(&ip6_tnl_net_ops); if (err < 0) goto out_pernet; err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); if (err < 0) { pr_err("%s: can't register ip4ip6\n", __func__); goto out_ip4ip6; } err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); if (err < 0) { pr_err("%s: can't register ip6ip6\n", __func__); goto out_ip6ip6; } err = rtnl_link_register(&ip6_link_ops); if (err < 0) goto rtnl_link_failed; return 0; rtnl_link_failed: xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); out_ip6ip6: xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); out_ip4ip6: unregister_pernet_device(&ip6_tnl_net_ops); out_pernet: return err; } /** * ip6_tunnel_cleanup - free resources and unregister protocol **/ static void __exit ip6_tunnel_cleanup(void) { rtnl_link_unregister(&ip6_link_ops); if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) pr_info("%s: can't deregister ip4ip6\n", __func__); if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) pr_info("%s: can't deregister ip6ip6\n", __func__); unregister_pernet_device(&ip6_tnl_net_ops); } module_init(ip6_tunnel_init); module_exit(ip6_tunnel_cleanup);
gpl-2.0
drod2169/Linux-3.13.x
drivers/net/wireless/iwlwifi/iwl-1000.c
242
4544
/****************************************************************************** * * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" #include "iwl-csr.h" #include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL1000_UCODE_API_MAX 5 #define IWL100_UCODE_API_MAX 5 /* Oldest version we won't warn about */ #define IWL1000_UCODE_API_OK 5 #define IWL100_UCODE_API_OK 5 /* Lowest firmware API version supported */ #define IWL1000_UCODE_API_MIN 1 #define IWL100_UCODE_API_MIN 5 /* EEPROM version */ #define EEPROM_1000_TX_POWER_VERSION (4) #define EEPROM_1000_EEPROM_VERSION (0x15C) #define IWL1000_FW_PRE "iwlwifi-1000-" #define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" #define IWL100_FW_PRE "iwlwifi-100-" #define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl1000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, .eeprom_size = OTP_LOW_IMAGE_SIZE, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .max_ll_items = OTP_MAX_LL_ITEMS_1000, .shadow_ram_support = false, .led_compensation = 51, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 128, }; static const struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ .ht40_bands = BIT(IEEE80211_BAND_2GHZ), }; static const struct iwl_eeprom_params iwl1000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, EEPROM_REG_BAND_2_CHANNELS, EEPROM_REG_BAND_3_CHANNELS, EEPROM_REG_BAND_4_CHANNELS, EEPROM_REG_BAND_5_CHANNELS, EEPROM_REG_BAND_24_HT40_CHANNELS, EEPROM_REGULATORY_BAND_NO_HT40, } }; #define IWL_DEVICE_1000 \ .fw_name_pre = IWL1000_FW_PRE, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \ .ucode_api_ok = IWL1000_UCODE_API_OK, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_BLINK const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", IWL_DEVICE_1000, .ht_params = &iwl1000_ht_params, }; const struct iwl_cfg iwl1000_bg_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", IWL_DEVICE_1000, }; #define IWL_DEVICE_100 \ .fw_name_pre = IWL100_FW_PRE, \ .ucode_api_max = IWL100_UCODE_API_MAX, \ .ucode_api_ok = IWL100_UCODE_API_OK, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", IWL_DEVICE_100, .ht_params = &iwl1000_ht_params, }; const struct iwl_cfg iwl100_bg_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BG", IWL_DEVICE_100, }; MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
gpl-2.0
gallagth/keecker_kernel
net/netlink/af_netlink.c
498
48882
/* * NETLINK Kernel-user communication protocol. * * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith * added netlink_proto_exit * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> * use nlk_sk, as sk->protinfo is on a diet 8) * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> * - inc module use count of module that owns * the kernel socket in case userspace opens * socket of same protocol * - remove all module support, since netlink is * mandatory if CONFIG_NET=y these days */ #include <linux/module.h> #include <linux/capability.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/fs.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/security.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/random.h> #include <linux/bitops.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/audit.h> #include <linux/mutex.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/scm.h> #include <net/netlink.h> #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) struct netlink_sock { /* struct sock has to be the first member of netlink_sock */ struct sock sk; u32 pid; u32 dst_pid; u32 dst_group; u32 flags; u32 subscriptions; u32 ngroups; unsigned long *groups; unsigned long state; wait_queue_head_t wait; struct netlink_callback *cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; void (*netlink_rcv)(struct sk_buff *skb); struct module *module; }; struct listeners { struct rcu_head rcu; unsigned long masks[0]; }; #define NETLINK_KERNEL_SOCKET 0x1 #define NETLINK_RECV_PKTINFO 0x2 #define NETLINK_BROADCAST_SEND_ERROR 0x4 #define NETLINK_RECV_NO_ENOBUFS 0x8 static inline struct netlink_sock *nlk_sk(struct sock *sk) { return container_of(sk, struct netlink_sock, sk); } static inline int netlink_is_kernel(struct sock *sk) { return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; } struct nl_pid_hash { struct hlist_head *table; unsigned long rehash_time; unsigned int mask; unsigned int shift; unsigned int entries; unsigned int max_shift; u32 rnd; }; struct netlink_table { struct nl_pid_hash hash; struct hlist_head mc_list; struct listeners __rcu *listeners; unsigned int nl_nonroot; unsigned int groups; struct mutex *cb_mutex; struct module *module; int registered; }; static struct netlink_table *nl_table; static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static void netlink_destroy_callback(struct netlink_callback *cb); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); static ATOMIC_NOTIFIER_HEAD(netlink_chain); static u32 netlink_group_mask(u32 group) { return group ? 1 << (group - 1) : 0; } static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) { return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; } static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->cb) { if (nlk->cb->done) nlk->cb->done(nlk->cb); netlink_destroy_callback(nlk->cb); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(nlk_sk(sk)->groups); } /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves * this, _but_ remember, it adds useless work on UP machines. */ void netlink_table_grab(void) __acquires(nl_table_lock) { might_sleep(); write_lock_irq(&nl_table_lock); if (atomic_read(&nl_table_users)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&nl_table_wait, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (atomic_read(&nl_table_users) == 0) break; write_unlock_irq(&nl_table_lock); schedule(); write_lock_irq(&nl_table_lock); } __set_current_state(TASK_RUNNING); remove_wait_queue(&nl_table_wait, &wait); } } void netlink_table_ungrab(void) __releases(nl_table_lock) { write_unlock_irq(&nl_table_lock); wake_up(&nl_table_wait); } static inline void netlink_lock_table(void) { /* read_lock() synchronizes us to netlink_table_grab */ read_lock(&nl_table_lock); atomic_inc(&nl_table_users); read_unlock(&nl_table_lock); } static inline void netlink_unlock_table(void) { if (atomic_dec_and_test(&nl_table_users)) wake_up(&nl_table_wait); } static inline struct sock *netlink_lookup(struct net *net, int protocol, u32 pid) { struct nl_pid_hash *hash = &nl_table[protocol].hash; struct hlist_head *head; struct sock *sk; struct hlist_node *node; read_lock(&nl_table_lock); head = nl_pid_hashfn(hash, pid); sk_for_each(sk, node, head) { if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) { sock_hold(sk); goto found; } } sk = NULL; found: read_unlock(&nl_table_lock); return sk; } static inline struct hlist_head *nl_pid_hash_zalloc(size_t size) { if (size <= PAGE_SIZE) return kzalloc(size, GFP_ATOMIC); else return (struct hlist_head *) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, get_order(size)); } static inline void nl_pid_hash_free(struct hlist_head *table, size_t size) { if (size <= PAGE_SIZE) kfree(table); else free_pages((unsigned long)table, get_order(size)); } static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) { unsigned int omask, mask, shift; size_t osize, size; struct hlist_head *otable, *table; int i; omask = mask = hash->mask; osize = size = (mask + 1) * sizeof(*table); shift = hash->shift; if (grow) { if (++shift > hash->max_shift) return 0; mask = mask * 2 + 1; size *= 2; } table = nl_pid_hash_zalloc(size); if (!table) return 0; otable = hash->table; hash->table = table; hash->mask = mask; hash->shift = shift; get_random_bytes(&hash->rnd, sizeof(hash->rnd)); for (i = 0; i <= omask; i++) { struct sock *sk; struct hlist_node *node, *tmp; sk_for_each_safe(sk, node, tmp, &otable[i]) __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); } nl_pid_hash_free(otable, osize); hash->rehash_time = jiffies + 10 * 60 * HZ; return 1; } static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) { int avg = hash->entries >> hash->shift; if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) return 1; if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { nl_pid_hash_rehash(hash, 0); return 1; } return 0; } static const struct proto_ops netlink_ops; static void netlink_update_listeners(struct sock *sk) { struct netlink_table *tbl = &nl_table[sk->sk_protocol]; struct hlist_node *node; unsigned long mask; unsigned int i; for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { mask = 0; sk_for_each_bound(sk, node, &tbl->mc_list) { if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) mask |= nlk_sk(sk)->groups[i]; } tbl->listeners->masks[i] = mask; } /* this function is only called with the netlink table "grabbed", which * makes sure updates are visible before bind or setsockopt return. */ } static int netlink_insert(struct sock *sk, struct net *net, u32 pid) { struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; int err = -EADDRINUSE; struct sock *osk; struct hlist_node *node; int len; netlink_table_grab(); head = nl_pid_hashfn(hash, pid); len = 0; sk_for_each(osk, node, head) { if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid)) break; len++; } if (node) goto err; err = -EBUSY; if (nlk_sk(sk)->pid) goto err; err = -ENOMEM; if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) goto err; if (len && nl_pid_hash_dilute(hash, len)) head = nl_pid_hashfn(hash, pid); hash->entries++; nlk_sk(sk)->pid = pid; sk_add_node(sk, head); err = 0; err: netlink_table_ungrab(); return err; } static void netlink_remove(struct sock *sk) { netlink_table_grab(); if (sk_del_node_init(sk)) nl_table[sk->sk_protocol].hash.entries--; if (nlk_sk(sk)->subscriptions) __sk_del_bind_node(sk); netlink_table_ungrab(); } static struct proto netlink_proto = { .name = "NETLINK", .owner = THIS_MODULE, .obj_size = sizeof(struct netlink_sock), }; static int __netlink_create(struct net *net, struct socket *sock, struct mutex *cb_mutex, int protocol) { struct sock *sk; struct netlink_sock *nlk; sock->ops = &netlink_ops; sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); nlk = nlk_sk(sk); if (cb_mutex) nlk->cb_mutex = cb_mutex; else { nlk->cb_mutex = &nlk->cb_def_mutex; mutex_init(nlk->cb_mutex); } init_waitqueue_head(&nlk->wait); sk->sk_destruct = netlink_sock_destruct; sk->sk_protocol = protocol; return 0; } static int netlink_create(struct net *net, struct socket *sock, int protocol, int kern) { struct module *module = NULL; struct mutex *cb_mutex; struct netlink_sock *nlk; int err = 0; sock->state = SS_UNCONNECTED; if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; netlink_lock_table(); #ifdef CONFIG_MODULES if (!nl_table[protocol].registered) { netlink_unlock_table(); request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); netlink_lock_table(); } #endif if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; else err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; netlink_unlock_table(); if (err < 0) goto out; err = __netlink_create(net, sock, cb_mutex, protocol); if (err < 0) goto out_module; local_bh_disable(); sock_prot_inuse_add(net, &netlink_proto, 1); local_bh_enable(); nlk = nlk_sk(sock->sk); nlk->module = module; out: return err; out_module: module_put(module); goto out; } static int netlink_release(struct socket *sock) { struct sock *sk = sock->sk; struct netlink_sock *nlk; if (!sk) return 0; netlink_remove(sk); sock_orphan(sk); nlk = nlk_sk(sk); /* * OK. Socket is unlinked, any packets that arrive now * will be purged. */ sock->sk = NULL; wake_up_interruptible_all(&nlk->wait); skb_queue_purge(&sk->sk_write_queue); if (nlk->pid) { struct netlink_notify n = { .net = sock_net(sk), .protocol = sk->sk_protocol, .pid = nlk->pid, }; atomic_notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); } module_put(nlk->module); netlink_table_grab(); if (netlink_is_kernel(sk)) { BUG_ON(nl_table[sk->sk_protocol].registered == 0); if (--nl_table[sk->sk_protocol].registered == 0) { kfree(nl_table[sk->sk_protocol].listeners); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].registered = 0; } } else if (nlk->subscriptions) netlink_update_listeners(sk); netlink_table_ungrab(); kfree(nlk->groups); nlk->groups = NULL; local_bh_disable(); sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); local_bh_enable(); sock_put(sk); return 0; } static int netlink_autobind(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; struct sock *osk; struct hlist_node *node; s32 pid = task_tgid_vnr(current); int err; static s32 rover = -4097; retry: cond_resched(); netlink_table_grab(); head = nl_pid_hashfn(hash, pid); sk_for_each(osk, node, head) { if (!net_eq(sock_net(osk), net)) continue; if (nlk_sk(osk)->pid == pid) { /* Bind collision, search negative pid values. */ pid = rover--; if (rover > -4097) rover = -4097; netlink_table_ungrab(); goto retry; } } netlink_table_ungrab(); err = netlink_insert(sk, net, pid); if (err == -EADDRINUSE) goto retry; /* If 2 threads race to autobind, that is fine. */ if (err == -EBUSY) err = 0; return err; } static inline int netlink_capable(struct socket *sock, unsigned int flag) { return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || capable(CAP_NET_ADMIN); } static void netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->subscriptions && !subscriptions) __sk_del_bind_node(sk); else if (!nlk->subscriptions && subscriptions) sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); nlk->subscriptions = subscriptions; } static int netlink_realloc_groups(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); unsigned int groups; unsigned long *new_groups; int err = 0; netlink_table_grab(); groups = nl_table[sk->sk_protocol].groups; if (!nl_table[sk->sk_protocol].registered) { err = -ENOENT; goto out_unlock; } if (nlk->ngroups >= groups) goto out_unlock; new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); if (new_groups == NULL) { err = -ENOMEM; goto out_unlock; } memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0, NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); nlk->groups = new_groups; nlk->ngroups = groups; out_unlock: netlink_table_ungrab(); return err; } static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; if (nladdr->nl_family != AF_NETLINK) return -EINVAL; /* Only superuser is allowed to listen multicasts */ if (nladdr->nl_groups) { if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); if (err) return err; } if (nlk->pid) { if (nladdr->nl_pid != nlk->pid) return -EINVAL; } else { err = nladdr->nl_pid ? netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); if (err) return err; } if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) return 0; netlink_table_grab(); netlink_update_subscriptions(sk, nlk->subscriptions + hweight32(nladdr->nl_groups) - hweight32(nlk->groups[0])); nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; netlink_update_listeners(sk); netlink_table_ungrab(); return 0; } static int netlink_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { int err = 0; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; if (alen < sizeof(addr->sa_family)) return -EINVAL; if (addr->sa_family == AF_UNSPEC) { sk->sk_state = NETLINK_UNCONNECTED; nlk->dst_pid = 0; nlk->dst_group = 0; return 0; } if (addr->sa_family != AF_NETLINK) return -EINVAL; /* Only superuser is allowed to send multicasts */ if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) return -EPERM; if (!nlk->pid) err = netlink_autobind(sock); if (err == 0) { sk->sk_state = NETLINK_CONNECTED; nlk->dst_pid = nladdr->nl_pid; nlk->dst_group = ffs(nladdr->nl_groups); } return err; } static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); nladdr->nl_family = AF_NETLINK; nladdr->nl_pad = 0; *addr_len = sizeof(*nladdr); if (peer) { nladdr->nl_pid = nlk->dst_pid; nladdr->nl_groups = netlink_group_mask(nlk->dst_group); } else { nladdr->nl_pid = nlk->pid; nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; } return 0; } static void netlink_overrun(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); } } atomic_inc(&sk->sk_drops); } static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) { struct sock *sock; struct netlink_sock *nlk; sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid); if (!sock) return ERR_PTR(-ECONNREFUSED); /* Don't bother queuing skb if kernel socket has no input function */ nlk = nlk_sk(sock); if (sock->sk_state == NETLINK_CONNECTED && nlk->dst_pid != nlk_sk(ssk)->pid) { sock_put(sock); return ERR_PTR(-ECONNREFUSED); } return sock; } struct sock *netlink_getsockbyfilp(struct file *filp) { struct inode *inode = filp->f_path.dentry->d_inode; struct sock *sock; if (!S_ISSOCK(inode->i_mode)) return ERR_PTR(-ENOTSOCK); sock = SOCKET_I(inode)->sk; if (sock->sk_family != AF_NETLINK) return ERR_PTR(-EINVAL); sock_hold(sock); return sock; } /* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the * reference is dropped. The skb is not send to the destination, just all * all error checks are performed and memory in the queue is reserved. * Return values: * < 0: error. skb freed, reference to sock dropped. * 0: continue * 1: repeat lookup - reference dropped while waiting for socket memory. */ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk) { struct netlink_sock *nlk; nlk = nlk_sk(sk); if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); if (!*timeo) { if (!ssk || netlink_is_kernel(ssk)) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); return -EAGAIN; } __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&nlk->wait, &wait); if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) && !sock_flag(sk, SOCK_DEAD)) *timeo = schedule_timeout(*timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&nlk->wait, &wait); sock_put(sk); if (signal_pending(current)) { kfree_skb(skb); return sock_intr_errno(*timeo); } return 1; } skb_set_owner_r(skb, sk); return 0; } static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); return len; } int netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = __netlink_sendskb(sk, skb); sock_put(sk); return len; } void netlink_detachskb(struct sock *sk, struct sk_buff *skb) { kfree_skb(skb); sock_put(sk); } static inline struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) { int delta; skb_orphan(skb); delta = skb->end - skb->tail; if (delta * 2 < skb->truesize) return skb; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, allocation); if (!nskb) return skb; kfree_skb(skb); skb = nskb; } if (!pskb_expand_head(skb, 0, -delta, allocation)) skb->truesize -= delta; return skb; } static inline void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (skb_queue_empty(&sk->sk_receive_queue)) clear_bit(0, &nlk->state); if (!test_bit(0, &nlk->state)) wake_up_interruptible(&nlk->wait); } static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) { int ret; struct netlink_sock *nlk = nlk_sk(sk); ret = -ECONNREFUSED; if (nlk->netlink_rcv != NULL) { ret = skb->len; skb_set_owner_r(skb, sk); nlk->netlink_rcv(skb); } kfree_skb(skb); sock_put(sk); return ret; } int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) { struct sock *sk; int err; long timeo; skb = netlink_trim(skb, gfp_any()); timeo = sock_sndtimeo(ssk, nonblock); retry: sk = netlink_getsockbypid(ssk, pid); if (IS_ERR(sk)) { kfree_skb(skb); return PTR_ERR(sk); } if (netlink_is_kernel(sk)) return netlink_unicast_kernel(sk, skb); if (sk_filter(sk, skb)) { err = skb->len; kfree_skb(skb); sock_put(sk); return err; } err = netlink_attachskb(sk, skb, &timeo, ssk); if (err == 1) goto retry; if (err) return err; return netlink_sendskb(sk, skb); } EXPORT_SYMBOL(netlink_unicast); int netlink_has_listeners(struct sock *sk, unsigned int group) { int res = 0; struct listeners *listeners; BUG_ON(!netlink_is_kernel(sk)); rcu_read_lock(); listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); if (group - 1 < nl_table[sk->sk_protocol].groups) res = test_bit(group - 1, listeners->masks); rcu_read_unlock(); return res; } EXPORT_SYMBOL_GPL(netlink_has_listeners); static inline int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) { struct netlink_sock *nlk = nlk_sk(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && !test_bit(0, &nlk->state)) { skb_set_owner_r(skb, sk); __netlink_sendskb(sk, skb); return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; } return -1; } struct netlink_broadcast_data { struct sock *exclude_sk; struct net *net; u32 pid; u32 group; int failure; int delivery_failure; int congested; int delivered; gfp_t allocation; struct sk_buff *skb, *skb2; int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data); void *tx_data; }; static inline int do_one_broadcast(struct sock *sk, struct netlink_broadcast_data *p) { struct netlink_sock *nlk = nlk_sk(sk); int val; if (p->exclude_sk == sk) goto out; if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || !test_bit(p->group - 1, nlk->groups)) goto out; if (!net_eq(sock_net(sk), p->net)) goto out; if (p->failure) { netlink_overrun(sk); goto out; } sock_hold(sk); if (p->skb2 == NULL) { if (skb_shared(p->skb)) { p->skb2 = skb_clone(p->skb, p->allocation); } else { p->skb2 = skb_get(p->skb); /* * skb ownership may have been set when * delivered to a previous socket. */ skb_orphan(p->skb2); } } if (p->skb2 == NULL) { netlink_overrun(sk); /* Clone failed. Notify ALL listeners. */ p->failure = 1; if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) p->delivery_failure = 1; } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { kfree_skb(p->skb2); p->skb2 = NULL; } else if (sk_filter(sk, p->skb2)) { kfree_skb(p->skb2); p->skb2 = NULL; } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { netlink_overrun(sk); if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) p->delivery_failure = 1; } else { p->congested |= val; p->delivered = 1; p->skb2 = NULL; } sock_put(sk); out: return 0; } int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, gfp_t allocation, int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), void *filter_data) { struct net *net = sock_net(ssk); struct netlink_broadcast_data info; struct hlist_node *node; struct sock *sk; skb = netlink_trim(skb, allocation); info.exclude_sk = ssk; info.net = net; info.pid = pid; info.group = group; info.failure = 0; info.delivery_failure = 0; info.congested = 0; info.delivered = 0; info.allocation = allocation; info.skb = skb; info.skb2 = NULL; info.tx_filter = filter; info.tx_data = filter_data; /* While we sleep in clone, do not allow to change socket list */ netlink_lock_table(); sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) do_one_broadcast(sk, &info); consume_skb(skb); netlink_unlock_table(); if (info.delivery_failure) { kfree_skb(info.skb2); return -ENOBUFS; } else consume_skb(info.skb2); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) yield(); return 0; } return -ESRCH; } EXPORT_SYMBOL(netlink_broadcast_filtered); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, gfp_t allocation) { return netlink_broadcast_filtered(ssk, skb, pid, group, allocation, NULL, NULL); } EXPORT_SYMBOL(netlink_broadcast); struct netlink_set_err_data { struct sock *exclude_sk; u32 pid; u32 group; int code; }; static inline int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) { struct netlink_sock *nlk = nlk_sk(sk); int ret = 0; if (sk == p->exclude_sk) goto out; if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) goto out; if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || !test_bit(p->group - 1, nlk->groups)) goto out; if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) { ret = 1; goto out; } sk->sk_err = p->code; sk->sk_error_report(sk); out: return ret; } /** * netlink_set_err - report error to broadcast listeners * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() * @pid: the PID of a process that we want to skip (if any) * @groups: the broadcast group that will notice the error * @code: error code, must be negative (as usual in kernelspace) * * This function returns the number of broadcast listeners that have set the * NETLINK_RECV_NO_ENOBUFS socket option. */ int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) { struct netlink_set_err_data info; struct hlist_node *node; struct sock *sk; int ret = 0; info.exclude_sk = ssk; info.pid = pid; info.group = group; /* sk->sk_err wants a positive error value */ info.code = -code; read_lock(&nl_table_lock); sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) ret += do_one_set_err(sk, &info); read_unlock(&nl_table_lock); return ret; } EXPORT_SYMBOL(netlink_set_err); /* must be called with netlink table grabbed */ static void netlink_update_socket_mc(struct netlink_sock *nlk, unsigned int group, int is_new) { int old, new = !!is_new, subscriptions; old = test_bit(group - 1, nlk->groups); subscriptions = nlk->subscriptions - old + new; if (new) __set_bit(group - 1, nlk->groups); else __clear_bit(group - 1, nlk->groups); netlink_update_subscriptions(&nlk->sk, subscriptions); netlink_update_listeners(&nlk->sk); } static int netlink_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); unsigned int val = 0; int err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (optlen >= sizeof(int) && get_user(val, (unsigned int __user *)optval)) return -EFAULT; switch (optname) { case NETLINK_PKTINFO: if (val) nlk->flags |= NETLINK_RECV_PKTINFO; else nlk->flags &= ~NETLINK_RECV_PKTINFO; err = 0; break; case NETLINK_ADD_MEMBERSHIP: case NETLINK_DROP_MEMBERSHIP: { if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); if (err) return err; if (!val || val - 1 >= nlk->ngroups) return -EINVAL; netlink_table_grab(); netlink_update_socket_mc(nlk, val, optname == NETLINK_ADD_MEMBERSHIP); netlink_table_ungrab(); err = 0; break; } case NETLINK_BROADCAST_ERROR: if (val) nlk->flags |= NETLINK_BROADCAST_SEND_ERROR; else nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR; err = 0; break; case NETLINK_NO_ENOBUFS: if (val) { nlk->flags |= NETLINK_RECV_NO_ENOBUFS; clear_bit(0, &nlk->state); wake_up_interruptible(&nlk->wait); } else nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; err = 0; break; default: err = -ENOPROTOOPT; } return err; } static int netlink_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); int len, val, err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case NETLINK_PKTINFO: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; case NETLINK_BROADCAST_ERROR: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; case NETLINK_NO_ENOBUFS: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; default: err = -ENOPROTOOPT; } return err; } static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) { struct nl_pktinfo info; info.group = NETLINK_CB(skb).dst_group; put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); } static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *addr = msg->msg_name; u32 dst_pid; u32 dst_group; struct sk_buff *skb; int err; struct scm_cookie scm; if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; if (NULL == siocb->scm) { siocb->scm = &scm; memset(&scm, 0, sizeof(scm)); } err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; if (msg->msg_namelen) { err = -EINVAL; if (addr->nl_family != AF_NETLINK) goto out; dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; } if (!nlk->pid) { err = netlink_autobind(sock); if (err) goto out; } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; NETLINK_CB(skb).pid = nlk->pid; NETLINK_CB(skb).dst_group = dst_group; memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); goto out; } err = security_netlink_send(sk, skb); if (err) { kfree_skb(skb); goto out; } if (dst_group) { atomic_inc(&skb->users); netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); } err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: scm_destroy(siocb->scm); return err; } static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct scm_cookie scm; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); int noblock = flags&MSG_DONTWAIT; size_t copied; struct sk_buff *skb, *data_skb; int err, ret; if (flags&MSG_OOB) return -EOPNOTSUPP; copied = 0; skb = skb_recv_datagram(sk, flags, noblock, &err); if (skb == NULL) goto out; data_skb = skb; #ifdef CONFIG_COMPAT_NETLINK_MESSAGES if (unlikely(skb_shinfo(skb)->frag_list)) { /* * If this skb has a frag_list, then here that means that we * will have to use the frag_list skb's data for compat tasks * and the regular skb's data for normal (non-compat) tasks. * * If we need to send the compat skb, assign it to the * 'data_skb' variable so that it will be used below for data * copying. We keep 'skb' for everything else, including * freeing both later. */ if (flags & MSG_CMSG_COMPAT) data_skb = skb_shinfo(skb)->frag_list; } #endif msg->msg_namelen = 0; copied = data_skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(data_skb); err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); if (msg->msg_name) { struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; addr->nl_family = AF_NETLINK; addr->nl_pad = 0; addr->nl_pid = NETLINK_CB(skb).pid; addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); msg->msg_namelen = sizeof(*addr); } if (nlk->flags & NETLINK_RECV_PKTINFO) netlink_cmsg_recv_pktinfo(msg, skb); if (NULL == siocb->scm) { memset(&scm, 0, sizeof(scm)); siocb->scm = &scm; } siocb->scm->creds = *NETLINK_CREDS(skb); if (flags & MSG_TRUNC) copied = data_skb->len; skb_free_datagram(sk, skb); if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { ret = netlink_dump(sk); if (ret) { sk->sk_err = ret; sk->sk_error_report(sk); } } scm_recv(sock, msg, siocb->scm, flags); out: netlink_rcv_wake(sk); return err ? : copied; } static void netlink_data_ready(struct sock *sk, int len) { BUG(); } /* * We export these functions to other modules. They provide a * complete set of kernel non-blocking support for message * queueing. */ struct sock * netlink_kernel_create(struct net *net, int unit, unsigned int groups, void (*input)(struct sk_buff *skb), struct mutex *cb_mutex, struct module *module) { struct socket *sock; struct sock *sk; struct netlink_sock *nlk; struct listeners *listeners = NULL; BUG_ON(!nl_table); if (unit < 0 || unit >= MAX_LINKS) return NULL; if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) return NULL; /* * We have to just have a reference on the net from sk, but don't * get_net it. Besides, we cannot get and then put the net here. * So we create one inside init_net and the move it to net. */ if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0) goto out_sock_release_nosk; sk = sock->sk; sk_change_net(sk, net); if (groups < 32) groups = 32; listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); if (!listeners) goto out_sock_release; sk->sk_data_ready = netlink_data_ready; if (input) nlk_sk(sk)->netlink_rcv = input; if (netlink_insert(sk, net, 0)) goto out_sock_release; nlk = nlk_sk(sk); nlk->flags |= NETLINK_KERNEL_SOCKET; netlink_table_grab(); if (!nl_table[unit].registered) { nl_table[unit].groups = groups; rcu_assign_pointer(nl_table[unit].listeners, listeners); nl_table[unit].cb_mutex = cb_mutex; nl_table[unit].module = module; nl_table[unit].registered = 1; } else { kfree(listeners); nl_table[unit].registered++; } netlink_table_ungrab(); return sk; out_sock_release: kfree(listeners); netlink_kernel_release(sk); return NULL; out_sock_release_nosk: sock_release(sock); return NULL; } EXPORT_SYMBOL(netlink_kernel_create); void netlink_kernel_release(struct sock *sk) { sk_release_kernel(sk); } EXPORT_SYMBOL(netlink_kernel_release); int __netlink_change_ngroups(struct sock *sk, unsigned int groups) { struct listeners *new, *old; struct netlink_table *tbl = &nl_table[sk->sk_protocol]; if (groups < 32) groups = 32; if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); if (!new) return -ENOMEM; old = rcu_dereference_raw(tbl->listeners); memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); rcu_assign_pointer(tbl->listeners, new); kfree_rcu(old, rcu); } tbl->groups = groups; return 0; } /** * netlink_change_ngroups - change number of multicast groups * * This changes the number of multicast groups that are available * on a certain netlink family. Note that it is not possible to * change the number of groups to below 32. Also note that it does * not implicitly call netlink_clear_multicast_users() when the * number of groups is reduced. * * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). * @groups: The new number of groups. */ int netlink_change_ngroups(struct sock *sk, unsigned int groups) { int err; netlink_table_grab(); err = __netlink_change_ngroups(sk, groups); netlink_table_ungrab(); return err; } void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) { struct sock *sk; struct hlist_node *node; struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; sk_for_each_bound(sk, node, &tbl->mc_list) netlink_update_socket_mc(nlk_sk(sk), group, 0); } /** * netlink_clear_multicast_users - kick off multicast listeners * * This function removes all listeners from the given group. * @ksk: The kernel netlink socket, as returned by * netlink_kernel_create(). * @group: The multicast group to clear. */ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) { netlink_table_grab(); __netlink_clear_multicast_users(ksk, group); netlink_table_ungrab(); } void netlink_set_nonroot(int protocol, unsigned int flags) { if ((unsigned int)protocol < MAX_LINKS) nl_table[protocol].nl_nonroot = flags; } EXPORT_SYMBOL(netlink_set_nonroot); static void netlink_destroy_callback(struct netlink_callback *cb) { kfree_skb(cb->skb); kfree(cb); } /* * It looks a bit ugly. * It would be better to create kernel thread. */ static int netlink_dump(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); struct netlink_callback *cb; struct sk_buff *skb; struct nlmsghdr *nlh; int len, err = -ENOBUFS; skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL); if (!skb) goto errout; mutex_lock(nlk->cb_mutex); cb = nlk->cb; if (cb == NULL) { err = -EINVAL; goto errout_skb; } len = cb->dump(skb, cb); if (len > 0) { mutex_unlock(nlk->cb_mutex); if (sk_filter(sk, skb)) kfree_skb(skb); else __netlink_sendskb(sk, skb); return 0; } nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); if (!nlh) goto errout_skb; memcpy(nlmsg_data(nlh), &len, sizeof(len)); if (sk_filter(sk, skb)) kfree_skb(skb); else __netlink_sendskb(sk, skb); if (cb->done) cb->done(cb); nlk->cb = NULL; mutex_unlock(nlk->cb_mutex); netlink_destroy_callback(cb); return 0; errout_skb: mutex_unlock(nlk->cb_mutex); kfree_skb(skb); errout: return err; } int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, int (*dump)(struct sk_buff *skb, struct netlink_callback *), int (*done)(struct netlink_callback *)) { struct netlink_callback *cb; struct sock *sk; struct netlink_sock *nlk; int ret; cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (cb == NULL) return -ENOBUFS; cb->dump = dump; cb->done = done; cb->nlh = nlh; atomic_inc(&skb->users); cb->skb = skb; sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid); if (sk == NULL) { netlink_destroy_callback(cb); return -ECONNREFUSED; } nlk = nlk_sk(sk); /* A dump is in progress... */ mutex_lock(nlk->cb_mutex); if (nlk->cb) { mutex_unlock(nlk->cb_mutex); netlink_destroy_callback(cb); sock_put(sk); return -EBUSY; } nlk->cb = cb; mutex_unlock(nlk->cb_mutex); ret = netlink_dump(sk); sock_put(sk); if (ret) return ret; /* We successfully started a dump, by returning -EINTR we * signal not to send ACK even if it was requested. */ return -EINTR; } EXPORT_SYMBOL(netlink_dump_start); void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) { struct sk_buff *skb; struct nlmsghdr *rep; struct nlmsgerr *errmsg; size_t payload = sizeof(*errmsg); /* error messages get the original request appened */ if (err) payload += nlmsg_len(nlh); skb = nlmsg_new(payload, GFP_KERNEL); if (!skb) { struct sock *sk; sk = netlink_lookup(sock_net(in_skb->sk), in_skb->sk->sk_protocol, NETLINK_CB(in_skb).pid); if (sk) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); sock_put(sk); } return; } rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); errmsg = nlmsg_data(rep); errmsg->error = err; memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); } EXPORT_SYMBOL(netlink_ack); int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, struct nlmsghdr *)) { struct nlmsghdr *nlh; int err; while (skb->len >= nlmsg_total_size(0)) { int msglen; nlh = nlmsg_hdr(skb); err = 0; if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return 0; /* Only requests are handled by the kernel */ if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) goto ack; /* Skip control messages */ if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; err = cb(skb, nlh); if (err == -EINTR) goto skip; ack: if (nlh->nlmsg_flags & NLM_F_ACK || err) netlink_ack(skb, nlh, err); skip: msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) msglen = skb->len; skb_pull(skb, msglen); } return 0; } EXPORT_SYMBOL(netlink_rcv_skb); /** * nlmsg_notify - send a notification netlink message * @sk: netlink socket to use * @skb: notification message * @pid: destination netlink pid for reports or 0 * @group: destination multicast group or 0 * @report: 1 to report back, 0 to disable * @flags: allocation flags */ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, unsigned int group, int report, gfp_t flags) { int err = 0; if (group) { int exclude_pid = 0; if (report) { atomic_inc(&skb->users); exclude_pid = pid; } /* errors reported via destination sk->sk_err, but propagate * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ err = nlmsg_multicast(sk, skb, exclude_pid, group, flags); } if (report) { int err2; err2 = nlmsg_unicast(sk, skb, pid); if (!err || err == -ESRCH) err = err2; } return err; } EXPORT_SYMBOL(nlmsg_notify); #ifdef CONFIG_PROC_FS struct nl_seq_iter { struct seq_net_private p; int link; int hash_idx; }; static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) { struct nl_seq_iter *iter = seq->private; int i, j; struct sock *s; struct hlist_node *node; loff_t off = 0; for (i = 0; i < MAX_LINKS; i++) { struct nl_pid_hash *hash = &nl_table[i].hash; for (j = 0; j <= hash->mask; j++) { sk_for_each(s, node, &hash->table[j]) { if (sock_net(s) != seq_file_net(seq)) continue; if (off == pos) { iter->link = i; iter->hash_idx = j; return s; } ++off; } } } return NULL; } static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) __acquires(nl_table_lock) { read_lock(&nl_table_lock); return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *s; struct nl_seq_iter *iter; int i, j; ++*pos; if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0); iter = seq->private; s = v; do { s = sk_next(s); } while (s && sock_net(s) != seq_file_net(seq)); if (s) return s; i = iter->link; j = iter->hash_idx + 1; do { struct nl_pid_hash *hash = &nl_table[i].hash; for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); while (s && sock_net(s) != seq_file_net(seq)) s = sk_next(s); if (s) { iter->link = i; iter->hash_idx = j; return s; } } j = 0; } while (++i < MAX_LINKS); return NULL; } static void netlink_seq_stop(struct seq_file *seq, void *v) __releases(nl_table_lock) { read_unlock(&nl_table_lock); } static int netlink_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "sk Eth Pid Groups " "Rmem Wmem Dump Locks Drops Inode\n"); else { struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->pid, nlk->groups ? (u32)nlk->groups[0] : 0, sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), nlk->cb, atomic_read(&s->sk_refcnt), atomic_read(&s->sk_drops), sock_i_ino(s) ); } return 0; } static const struct seq_operations netlink_seq_ops = { .start = netlink_seq_start, .next = netlink_seq_next, .stop = netlink_seq_stop, .show = netlink_seq_show, }; static int netlink_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &netlink_seq_ops, sizeof(struct nl_seq_iter)); } static const struct file_operations netlink_seq_fops = { .owner = THIS_MODULE, .open = netlink_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif int netlink_register_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&netlink_chain, nb); } EXPORT_SYMBOL(netlink_register_notifier); int netlink_unregister_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&netlink_chain, nb); } EXPORT_SYMBOL(netlink_unregister_notifier); static const struct proto_ops netlink_ops = { .family = PF_NETLINK, .owner = THIS_MODULE, .release = netlink_release, .bind = netlink_bind, .connect = netlink_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = netlink_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = netlink_setsockopt, .getsockopt = netlink_getsockopt, .sendmsg = netlink_sendmsg, .recvmsg = netlink_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family netlink_family_ops = { .family = PF_NETLINK, .create = netlink_create, .owner = THIS_MODULE, /* for consistency 8) */ }; static int __net_init netlink_net_init(struct net *net) { #ifdef CONFIG_PROC_FS if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) return -ENOMEM; #endif return 0; } static void __net_exit netlink_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS proc_net_remove(net, "netlink"); #endif } static void __init netlink_add_usersock_entry(void) { struct listeners *listeners; int groups = 32; listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); if (!listeners) panic("netlink_add_usersock_entry: Cannot allocate listeners\n"); netlink_table_grab(); nl_table[NETLINK_USERSOCK].groups = groups; rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); nl_table[NETLINK_USERSOCK].module = THIS_MODULE; nl_table[NETLINK_USERSOCK].registered = 1; netlink_table_ungrab(); } static struct pernet_operations __net_initdata netlink_net_ops = { .init = netlink_net_init, .exit = netlink_net_exit, }; static int __init netlink_proto_init(void) { struct sk_buff *dummy_skb; int i; unsigned long limit; unsigned int order; int err = proto_register(&netlink_proto, 0); if (err != 0) goto out; BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)); nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); if (!nl_table) goto panic; if (totalram_pages >= (128 * 1024)) limit = totalram_pages >> (21 - PAGE_SHIFT); else limit = totalram_pages >> (23 - PAGE_SHIFT); order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; limit = (1UL << order) / sizeof(struct hlist_head); order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; for (i = 0; i < MAX_LINKS; i++) { struct nl_pid_hash *hash = &nl_table[i].hash; hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table)); if (!hash->table) { while (i-- > 0) nl_pid_hash_free(nl_table[i].hash.table, 1 * sizeof(*hash->table)); kfree(nl_table); goto panic; } hash->max_shift = order; hash->shift = 0; hash->mask = 0; hash->rehash_time = jiffies; } netlink_add_usersock_entry(); sock_register(&netlink_family_ops); register_pernet_subsys(&netlink_net_ops); /* The netlink device handler may be needed early. */ rtnetlink_init(); out: return err; panic: panic("netlink_init: Cannot allocate nl_table\n"); } core_initcall(netlink_proto_init);
gpl-2.0
tyler6389/android_kernel_samsung_SHV-E270K
drivers/phone_svn/ipc_spi/spi_test.c
498
7185
#include "spi_main.h" #include "spi_test.h" #include "spi_os.h" #include "spi_dev.h" #include "spi_app.h" enum SPI_TEST_SCENARIO_T spi_test_scenario_mode = SPI_TEST_SCENARIO_NONE; static void _scenario_physical(void); static void _scenario_sending(int param); static void _scenario_physical(void) { int count = 0; int failcount = 0; int i; char txbuf[SPI_DEV_MAX_PACKET_SIZE]; char rxbuf[SPI_DEV_MAX_PACKET_SIZE]; SPI_OS_TRACE_MID(("spi_scenario_physical\n")); spi_os_memset(txbuf, 0x00, sizeof(txbuf)); for (i = 0 ; i < SPI_DEV_MAX_PACKET_SIZE ; i++) txbuf[i] = i%100; while (count < 100) { SPI_OS_TRACE_MID(("spi _scenario_physical %d\n", count)); SPI_OS_TRACE_MID(("%s mrdy %d srdy %d submrdy %d subsrdy %d\n", "spi_scenario_physical test start", spi_dev_get_gpio(spi_dev_gpio_mrdy), spi_dev_get_gpio(spi_dev_gpio_srdy), spi_dev_get_gpio(spi_dev_gpio_submrdy), spi_dev_get_gpio(spi_dev_gpio_subsrdy))); txbuf[0] = count; spi_os_memset(rxbuf, 0x00, sizeof(rxbuf)); #ifdef SPI_FEATURE_MASTER spi_dev_set_gpio(spi_dev_gpio_mrdy, SPI_DEV_GPIOLEVEL_LOW); spi_os_sleep(SPI_FEATURE_TEST_DURATION); spi_dev_set_gpio(spi_dev_gpio_submrdy, SPI_DEV_GPIOLEVEL_LOW); spi_os_sleep(SPI_FEATURE_TEST_DURATION); SPI_OS_TRACE_MID(("%s, subsrdy high\n", "spi_scenario_physical test wait srdy")); spi_dev_set_gpio(spi_dev_gpio_mrdy, SPI_DEV_GPIOLEVEL_HIGH); while (spi_dev_get_gpio(spi_dev_gpio_srdy) == SPI_DEV_GPIOLEVEL_LOW) ; /* spreadtrum recommend. */ /* master should send/receive after that slave is ready. */ spi_os_sleep(20); if (count % 2 == 0) { spi_dev_send(txbuf, SPI_DEV_MAX_PACKET_SIZE); } else { spi_dev_receive(rxbuf, SPI_DEV_MAX_PACKET_SIZE); for (i = 1 ; i < SPI_DEV_MAX_PACKET_SIZE ; i++) { if (rxbuf[i] != txbuf[i]) { failcount++; spi_os_trace_dump("spi_scenario_physical receiving fail", &rxbuf[i-8], 16); SPI_OS_TRACE_MID(("%s %d count %d/%d\n", "spi_scenario_physical test receiving fail", i, failcount, count)); i = sizeof(rxbuf); break; } } } spi_os_sleep(20); spi_dev_set_gpio(spi_dev_gpio_submrdy, SPI_DEV_GPIOLEVEL_HIGH); spi_os_sleep(SPI_FEATURE_TEST_DURATION); #elif defined SPI_FEATURE_SLAVE spi_dev_set_gpio(spi_dev_gpio_srdy, SPI_DEV_GPIOLEVEL_LOW); spi_os_sleep(SPI_FEATURE_TEST_DURATION); spi_dev_set_gpio(spi_dev_gpio_subsrdy, SPI_DEV_GPIOLEVEL_LOW); spi_os_sleep(SPI_FEATURE_TEST_DURATION); while (spi_dev_get_gpio(spi_dev_gpio_mrdy) == SPI_DEV_GPIOLEVEL_LOW) ; spi_dev_set_gpio(spi_dev_gpio_srdy, SPI_DEV_GPIOLEVEL_HIGH); if (count % 2 == 0) { spi_dev_receive(rxbuf, SPI_DEV_MAX_PACKET_SIZE); for (i = 1 ; i < SPI_DEV_MAX_PACKET_SIZE ; i++) { if (rxbuf[i] != txbuf[i]) { failcount++; spi_os_trace_dump("spi_scenario_phy rx fail", &rxbuf[i-8], 16); SPI_OS_TRACE_MID(("%s %d count %d/%d\n", "spi_scenario_physical test receiving fail", i, failcount, count)); i = sizeof(rxbuf); break; } } } else spi_dev_send(txbuf, SPI_DEV_MAX_PACKET_SIZE); spi_os_sleep(SPI_FEATURE_TEST_DURATION); #endif count++; SPI_OS_TRACE_MID(("%s %d/%d\n", "spi_scenario_physical test receiving result count", failcount, count)); } } SPI_OS_TIMER_CALLBACK(spi_test_timer_callback) { SPI_OS_TRACE_MID(("spi_test_timer_callback\n")); _scenario_sending(0); } static char tempdata1[135]; /* temp code. becuase can not allocate */ static char tempdata2[367]; /* temp code. becuase can not allocate */ static char tempdata3[1057]; /* temp code. becuase can not allocate */ static char tempdata4[35]; /* temp code. becuase can not allocate */ static char tempdata5[2079]; /* temp code. becuase can not allocate */ static char tempdata6[200]; /* temp code. becuase can not allocate */ static char tempdata7[2052]; /* temp code. becuase can not allocate */ static void _scenario_sending(int param) { #define NB_COUNT 50 #define NB_STEP 7 static int step; static int duration; static int count; static void *timer_id; char *data = NULL; int i, value; struct DATABYSTEP_T { SPI_MAIN_MSG_T type; unsigned int size; char *buf; } databystep[NB_STEP] = { {SPI_MAIN_MSG_IPC_SEND, 135, tempdata1}, {SPI_MAIN_MSG_IPC_SEND, 187, tempdata2}, {SPI_MAIN_MSG_RAW_SEND, 1057, tempdata3}, {SPI_MAIN_MSG_IPC_SEND, 35, tempdata4}, {SPI_MAIN_MSG_RAW_SEND, 2079, tempdata5}, {SPI_MAIN_MSG_IPC_SEND, 100, tempdata6}, {SPI_MAIN_MSG_RAW_SEND, 2052, tempdata7} }; if (spi_test_scenario_mode == SPI_TEST_SCENARIO_SLAVE_SENDING) data = databystep[step].buf; /* param is 0 to fix duration. */ /* call this function with param to change timer duration. */ if (param != 0) duration = param; if (spi_test_scenario_mode != SPI_TEST_SCENARIO_SLAVE_SENDING) data = (char *)spi_os_malloc(databystep[step].size); spi_os_memset(data, 0x00, databystep[step].size); SPI_OS_TRACE_MID(("spi _scenario_sending step %d\n", step)); /* generate data to send, It is serial number from 0 to 99 */ for (i = 0 ; i < databystep[step].size ; i++) data[i] = i%100; do { #ifdef SPI_FEATURE_OMAP4430 if (spi_is_ready() != 0) { struct spi_os_msg *msg; SPI_MAIN_MSG_T signal_code; signal_code = databystep[step].type; msg = (struct spi_os_msg *) spi_os_malloc(sizeof(struct spi_os_msg)); msg->signal_code = signal_code; msg->data_length = databystep[step].size; msg->data = data; spi_receive_msg_from_app(msg); spi_main_send_signal(signal_code); break; } else spi_os_sleep(50); #elif defined SPI_FEATURE_SC8800G if (app_send_data_to_spi(databystep[step].type, data, databystep[step].size) == 0) /* send fail */ spi_os_sleep(50); else /* send success */ break; #endif } while (1); if (spi_test_scenario_mode == SPI_TEST_SCENARIO_MASTER_SENDING) return; step++; count++; step %= NB_STEP; if (timer_id == 0) timer_id = spi_os_create_timer("spi_test_timer", spi_test_timer_callback, step, duration); if (timer_id == 0) { SPI_OS_TRACE_MID(("spi _scenario_sending invalid timer id\n")); return; } SPI_OS_TRACE_MID(("spi _scenario_sending timer %x\n", timer_id)); SPI_OS_TRACE_MID(("spi _scenario_sending start timer count %d\n", count)); if (count == NB_COUNT) { spi_os_stop_timer(timer_id); return; } value = spi_os_start_timer(timer_id, spi_test_timer_callback, step, duration); SPI_OS_TRACE_MID(("spi _scenario_sending start timer%d\n", value)); #undef NB_STEP } void spi_test_run(enum SPI_TEST_SCENARIO_T scenario, int param) { SPI_OS_TRACE_MID(("spi_test_run %d\n", (int) scenario)); if (scenario == SPI_TEST_SCENARIO_NONE) return; spi_test_scenario_mode = scenario; switch ((int)scenario) { case SPI_TEST_SCENARIO_PHYSICAL: _scenario_physical(); break; case SPI_TEST_SCENARIO_MASTER_SENDING: #ifdef SPI_FEATURE_MASTER _scenario_sending(param); #endif break; case SPI_TEST_SCENARIO_SLAVE_SENDING: #ifdef SPI_FEATURE_SLAVE _scenario_sending(param); #endif break; case SPI_TEST_SCENARIO_COMPLEX_SENDING: _scenario_sending(param); break; case SPI_TEST_SCENARIO_NONE: default: break; } }
gpl-2.0
sndnvaps/linux-1
drivers/input/keyboard/nspire-keypad.c
1266
7008
/* * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. */ #include <linux/input/matrix_keypad.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/of.h> #define KEYPAD_SCAN_MODE 0x00 #define KEYPAD_CNTL 0x04 #define KEYPAD_INT 0x08 #define KEYPAD_INTMSK 0x0C #define KEYPAD_DATA 0x10 #define KEYPAD_GPIO 0x30 #define KEYPAD_UNKNOWN_INT 0x40 #define KEYPAD_UNKNOWN_INT_STS 0x44 #define KEYPAD_BITMASK_COLS 11 #define KEYPAD_BITMASK_ROWS 8 struct nspire_keypad { void __iomem *reg_base; u32 int_mask; struct input_dev *input; struct clk *clk; struct matrix_keymap_data *keymap; int row_shift; /* Maximum delay estimated assuming 33MHz APB */ u32 scan_interval; /* In microseconds (~2000us max) */ u32 row_delay; /* In microseconds (~500us max) */ u16 state[KEYPAD_BITMASK_ROWS]; bool active_low; }; static irqreturn_t nspire_keypad_irq(int irq, void *dev_id) { struct nspire_keypad *keypad = dev_id; struct input_dev *input = keypad->input; unsigned short *keymap = input->keycode; unsigned int code; int row, col; u32 int_sts; u16 state[8]; u16 bits, changed; int_sts = readl(keypad->reg_base + KEYPAD_INT) & keypad->int_mask; if (!int_sts) return IRQ_NONE; memcpy_fromio(state, keypad->reg_base + KEYPAD_DATA, sizeof(state)); for (row = 0; row < KEYPAD_BITMASK_ROWS; row++) { bits = state[row]; if (keypad->active_low) bits = ~bits; changed = bits ^ keypad->state[row]; if (!changed) continue; keypad->state[row] = bits; for (col = 0; col < KEYPAD_BITMASK_COLS; col++) { if (!(changed & (1U << col))) continue; code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); input_event(input, EV_MSC, MSC_SCAN, code); input_report_key(input, keymap[code], bits & (1U << col)); } } input_sync(input); writel(0x3, keypad->reg_base + KEYPAD_INT); return IRQ_HANDLED; } static int nspire_keypad_chip_init(struct nspire_keypad *keypad) { unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles; cycles_per_us = (clk_get_rate(keypad->clk) / 1000000); if (cycles_per_us == 0) cycles_per_us = 1; delay_cycles = cycles_per_us * keypad->scan_interval; WARN_ON(delay_cycles >= (1 << 16)); /* Overflow */ delay_cycles &= 0xffff; row_delay_cycles = cycles_per_us * keypad->row_delay; WARN_ON(row_delay_cycles >= (1 << 14)); /* Overflow */ row_delay_cycles &= 0x3fff; val |= 3 << 0; /* Set scan mode to 3 (continuous scan) */ val |= row_delay_cycles << 2; /* Delay between scanning each row */ val |= delay_cycles << 16; /* Delay between scans */ writel(val, keypad->reg_base + KEYPAD_SCAN_MODE); val = (KEYPAD_BITMASK_ROWS & 0xff) | (KEYPAD_BITMASK_COLS & 0xff)<<8; writel(val, keypad->reg_base + KEYPAD_CNTL); /* Enable interrupts */ keypad->int_mask = 1 << 1; writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); /* Disable GPIO interrupts to prevent hanging on touchpad */ /* Possibly used to detect touchpad events */ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); /* Acknowledge existing interrupts */ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); return 0; } static int nspire_keypad_open(struct input_dev *input) { struct nspire_keypad *keypad = input_get_drvdata(input); int error; error = clk_prepare_enable(keypad->clk); if (error) return error; error = nspire_keypad_chip_init(keypad); if (error) { clk_disable_unprepare(keypad->clk); return error; } return 0; } static void nspire_keypad_close(struct input_dev *input) { struct nspire_keypad *keypad = input_get_drvdata(input); clk_disable_unprepare(keypad->clk); } static int nspire_keypad_probe(struct platform_device *pdev) { const struct device_node *of_node = pdev->dev.of_node; struct nspire_keypad *keypad; struct input_dev *input; struct resource *res; int irq; int error; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get keypad irq\n"); return -EINVAL; } keypad = devm_kzalloc(&pdev->dev, sizeof(struct nspire_keypad), GFP_KERNEL); if (!keypad) { dev_err(&pdev->dev, "failed to allocate keypad memory\n"); return -ENOMEM; } keypad->row_shift = get_count_order(KEYPAD_BITMASK_COLS); error = of_property_read_u32(of_node, "scan-interval", &keypad->scan_interval); if (error) { dev_err(&pdev->dev, "failed to get scan-interval\n"); return error; } error = of_property_read_u32(of_node, "row-delay", &keypad->row_delay); if (error) { dev_err(&pdev->dev, "failed to get row-delay\n"); return error; } keypad->active_low = of_property_read_bool(of_node, "active-low"); keypad->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(keypad->clk)) { dev_err(&pdev->dev, "unable to get clock\n"); return PTR_ERR(keypad->clk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); keypad->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(keypad->reg_base)) return PTR_ERR(keypad->reg_base); keypad->input = input = devm_input_allocate_device(&pdev->dev); if (!input) { dev_err(&pdev->dev, "failed to allocate input device\n"); return -ENOMEM; } input_set_drvdata(input, keypad); input->id.bustype = BUS_HOST; input->name = "nspire-keypad"; input->open = nspire_keypad_open; input->close = nspire_keypad_close; __set_bit(EV_KEY, input->evbit); __set_bit(EV_REP, input->evbit); input_set_capability(input, EV_MSC, MSC_SCAN); error = matrix_keypad_build_keymap(NULL, NULL, KEYPAD_BITMASK_ROWS, KEYPAD_BITMASK_COLS, NULL, input); if (error) { dev_err(&pdev->dev, "building keymap failed\n"); return error; } error = devm_request_irq(&pdev->dev, irq, nspire_keypad_irq, 0, "nspire_keypad", keypad); if (error) { dev_err(&pdev->dev, "allocate irq %d failed\n", irq); return error; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "unable to register input device: %d\n", error); return error; } platform_set_drvdata(pdev, keypad); dev_dbg(&pdev->dev, "TI-NSPIRE keypad at %pR (scan_interval=%uus, row_delay=%uus%s)\n", res, keypad->row_delay, keypad->scan_interval, keypad->active_low ? ", active_low" : ""); return 0; } static const struct of_device_id nspire_keypad_dt_match[] = { { .compatible = "ti,nspire-keypad" }, { }, }; MODULE_DEVICE_TABLE(of, nspire_keypad_dt_match); static struct platform_driver nspire_keypad_driver = { .driver = { .name = "nspire-keypad", .of_match_table = nspire_keypad_dt_match, }, .probe = nspire_keypad_probe, }; module_platform_driver(nspire_keypad_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TI-NSPIRE Keypad Driver");
gpl-2.0
RazerRom/kernel_oneplus_msm8974
drivers/gpu/drm/i915/i915_drv.c
1522
29585
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- */ /* * * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/device.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_drv.h" #include <linux/console.h> #include <linux/module.h> #include "drm_crtc_helper.h" static int i915_modeset __read_mostly = -1; module_param_named(modeset, i915_modeset, int, 0400); MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " "1=on, -1=force vga console preference [default])"); unsigned int i915_fbpercrtc __always_unused = 0; module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); int i915_panel_ignore_lid __read_mostly = 0; module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); MODULE_PARM_DESC(panel_ignore_lid, "Override lid status (0=autodetect [default], 1=lid open, " "-1=lid closed)"); unsigned int i915_powersave __read_mostly = 1; module_param_named(powersave, i915_powersave, int, 0600); MODULE_PARM_DESC(powersave, "Enable powersavings, fbc, downclocking, etc. (default: true)"); int i915_semaphores __read_mostly = -1; module_param_named(semaphores, i915_semaphores, int, 0600); MODULE_PARM_DESC(semaphores, "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); int i915_enable_rc6 __read_mostly = -1; module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); MODULE_PARM_DESC(i915_enable_rc6, "Enable power-saving render C-state 6. " "Different stages can be selected via bitmask values " "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "default: -1 (use per-chip default)"); int i915_enable_fbc __read_mostly = -1; module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); MODULE_PARM_DESC(i915_enable_fbc, "Enable frame buffer compression for power savings " "(default: -1 (use per-chip default))"); unsigned int i915_lvds_downclock __read_mostly = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); MODULE_PARM_DESC(lvds_downclock, "Use panel (LVDS/eDP) downclocking for power savings " "(default: false)"); int i915_panel_use_ssc __read_mostly = -1; module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); MODULE_PARM_DESC(lvds_use_ssc, "Use Spread Spectrum Clock with panels [LVDS/eDP] " "(default: auto from VBT)"); int i915_vbt_sdvo_panel_type __read_mostly = -1; module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); MODULE_PARM_DESC(vbt_sdvo_panel_type, "Override selection of SDVO panel mode in the VBT " "(default: auto)"); static bool i915_try_reset __read_mostly = true; module_param_named(reset, i915_try_reset, bool, 0600); MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); bool i915_enable_hangcheck __read_mostly = true; module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); MODULE_PARM_DESC(enable_hangcheck, "Periodically check GPU activity for detecting hangs. " "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); int i915_enable_ppgtt __read_mostly = -1; module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); MODULE_PARM_DESC(i915_enable_ppgtt, "Enable PPGTT (default: true)"); static struct drm_driver driver; extern int intel_agp_enabled; #define INTEL_VGA_DEVICE(id, info) { \ .class = PCI_BASE_CLASS_DISPLAY << 16, \ .class_mask = 0xff0000, \ .vendor = 0x8086, \ .device = id, \ .subvendor = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, \ .driver_data = (unsigned long) info } static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i85x_info = { .gen = 2, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915g_info = { .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { .gen = 3, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i945g_info = { .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { .gen = 3, .is_i945gm = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i965g_info = { .gen = 4, .is_broadwater = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_i965gm_info = { .gen = 4, .is_crestline = 1, .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .has_overlay = 1, .supports_tv = 1, }; static const struct intel_device_info intel_g33_info = { .gen = 3, .is_g33 = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_gm45_info = { .gen = 4, .is_g4x = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_pineview_info = { .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_force_wake = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { .is_ivybridge = 1, .gen = 7, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { .is_ivybridge = 1, .gen = 7, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_force_wake = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ {0, 0, 0} }; #if defined(CONFIG_DRM_I915_KMS) MODULE_DEVICE_TABLE(pci, pciidlist); #endif #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pch; /* * The reason to probe ISA bridge instead of Dev31:Fun0 is to * make graphics device passthrough work easy for VMM, that only * need to expose ISA bridge to let driver know the real hardware * underneath. This is a requirement from virtualization team. */ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); if (pch) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { int id; id = pch->device & INTEL_PCH_DEVICE_ID_MASK; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_IBX; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); } } pci_dev_put(pch); } } void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) udelay(10); I915_WRITE_NOTRACE(FORCEWAKE, 1); POSTING_READ(FORCEWAKE); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) udelay(10); } void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) udelay(10); I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); POSTING_READ(FORCEWAKE_MT); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) udelay(10); } /* * Generally this is called implicitly by the register read function. However, * if some sequence requires the GT to not power down then this function should * be called at the beginning of the sequence followed by a call to * gen6_gt_force_wake_put() at the end of the sequence. */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { unsigned long irqflags; spin_lock_irqsave(&dev_priv->gt_lock, irqflags); if (dev_priv->forcewake_count++ == 0) dev_priv->display.force_wake_get(dev_priv); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) { u32 gtfifodbg; gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, "MMIO read or write has been dropped %x\n", gtfifodbg)) I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); } void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } /* * see gen6_gt_force_wake_get() */ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { unsigned long irqflags; spin_lock_irqsave(&dev_priv->gt_lock, irqflags); if (--dev_priv->forcewake_count == 0) dev_priv->display.force_wake_put(dev_priv); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) { int ret = 0; if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { int loop = 500; u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { udelay(10); fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); } if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) ++ret; dev_priv->gt_fifo_count = fifo; } dev_priv->gt_fifo_count--; return ret; } static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; drm_kms_helper_poll_disable(dev); pci_save_state(dev->pdev); /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { int error = i915_gem_idle(dev); if (error) { dev_err(&dev->pdev->dev, "GEM idle failed, resume might fail\n"); return error; } drm_irq_uninstall(dev); } i915_save_state(dev); intel_opregion_fini(dev); /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; console_lock(); intel_fbdev_set_suspend(dev, 1); console_unlock(); return 0; } int i915_suspend(struct drm_device *dev, pm_message_t state) { int error; if (!dev || !dev->dev_private) { DRM_ERROR("dev: %p\n", dev); DRM_ERROR("DRM not initialized, aborting suspend.\n"); return -ENODEV; } if (state.event == PM_EVENT_PRETHAW) return 0; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; error = i915_drm_freeze(dev); if (error) return error; if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); } return 0; } static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); if (HAS_PCH_SPLIT(dev)) ironlake_init_pch_refclk(dev); drm_mode_config_reset(dev); drm_irq_install(dev); /* Resume the modeset for every activated CRTC */ mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; console_lock(); intel_fbdev_set_suspend(dev, 0); console_unlock(); return error; } int i915_resume(struct drm_device *dev) { int ret; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); ret = i915_drm_thaw(dev); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; } static int i8xx_do_reset(struct drm_device *dev, u8 flags) { struct drm_i915_private *dev_priv = dev->dev_private; if (IS_I85X(dev)) return -ENODEV; I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); if (IS_I830(dev) || IS_845G(dev)) { I915_WRITE(DEBUG_RESET_I830, DEBUG_RESET_DISPLAY | DEBUG_RESET_RENDER | DEBUG_RESET_FULL); POSTING_READ(DEBUG_RESET_I830); msleep(1); I915_WRITE(DEBUG_RESET_I830, 0); POSTING_READ(DEBUG_RESET_I830); } msleep(1); I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); return 0; } static int i965_reset_complete(struct drm_device *dev) { u8 gdrst; pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); return gdrst & 0x1; } static int i965_do_reset(struct drm_device *dev, u8 flags) { u8 gdrst; /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); return wait_for(i965_reset_complete(dev), 500); } static int ironlake_do_reset(struct drm_device *dev, u8 flags) { struct drm_i915_private *dev_priv = dev->dev_private; u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); } static int gen6_do_reset(struct drm_device *dev, u8 flags) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; unsigned long irqflags; /* Hold gt_lock across reset to prevent any register access * with forcewake not set correctly */ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); /* Reset the chip */ /* GEN6_GDRST is not in the gt power well, no need to check * for fifo space for the write or forcewake the chip for * the read */ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); /* Spin waiting for the device to ack the reset request */ ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); /* If reset with a user forcewake, try to restore, otherwise turn it off */ if (dev_priv->forcewake_count) dev_priv->display.force_wake_get(dev_priv); else dev_priv->display.force_wake_put(dev_priv); /* Restore fifo count */ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); return ret; } /** * i915_reset - reset chip after a hang * @dev: drm device to reset * @flags: reset domains * * Reset the chip. Useful if a hang is detected. Returns zero on successful * reset or otherwise an error code. * * Procedure is fairly simple: * - reset the chip using the reset reg * - re-init context state * - re-init hardware status page * - re-init ring buffer * - re-init interrupt state * - re-init display */ int i915_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; int ret; if (!i915_try_reset) return 0; if (!mutex_trylock(&dev->struct_mutex)) return -EBUSY; i915_gem_reset(dev); ret = -ENODEV; if (get_seconds() - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else switch (INTEL_INFO(dev)->gen) { case 7: case 6: ret = gen6_do_reset(dev, flags); break; case 5: ret = ironlake_do_reset(dev, flags); break; case 4: ret = i965_do_reset(dev, flags); break; case 2: ret = i8xx_do_reset(dev, flags); break; } dev_priv->last_gpu_reset = get_seconds(); if (ret) { DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); return ret; } /* Ok, now get things going again... */ /* * Everything depends on having the GTT running, so we need to start * there. Fortunately we don't need to do this unless we reset the * chip at a PCI level. * * Next we need to restore the context, but we don't use those * yet either... * * Ring buffer needs to be re-initialized in the KMS case, or if X * was running at the time of the reset (i.e. we weren't VT * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); if (HAS_BSD(dev)) dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); if (HAS_BLT(dev)) dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); i915_gem_init_ppgtt(dev); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_mode_config_reset(dev); drm_irq_install(dev); mutex_lock(&dev->struct_mutex); } mutex_unlock(&dev->struct_mutex); /* * Perform a full modeset as on later generations, e.g. Ironlake, we may * need to retrain the display link and cannot just restore the register * values. */ if (need_display) { mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); } return 0; } static int __devinit i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { /* Only bind to function 0 of the device. Early generations * used function 1 as a placeholder for multi-head. This causes * us confusion instead, especially on the systems where both * functions have the same PCI-ID! */ if (PCI_FUNC(pdev->devfn)) return -ENODEV; return drm_get_pci_dev(pdev, ent, &driver); } static void i915_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); drm_put_dev(dev); } static int i915_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); int error; if (!drm_dev || !drm_dev->dev_private) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; error = i915_drm_freeze(drm_dev); if (error) return error; pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int i915_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return i915_resume(drm_dev); } static int i915_pm_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); if (!drm_dev || !drm_dev->dev_private) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } return i915_drm_freeze(drm_dev); } static int i915_pm_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return i915_drm_thaw(drm_dev); } static int i915_pm_poweroff(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return i915_drm_freeze(drm_dev); } static const struct dev_pm_ops i915_pm_ops = { .suspend = i915_pm_suspend, .resume = i915_pm_resume, .freeze = i915_pm_freeze, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, .restore = i915_pm_resume, }; static struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static const struct file_operations i915_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = drm_gem_mmap, .poll = drm_poll, .fasync = drm_fasync, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = i915_compat_ioctl, #endif .llseek = noop_llseek, }; static struct drm_driver driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, .load = i915_driver_load, .unload = i915_driver_unload, .open = i915_driver_open, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, .postclose = i915_driver_postclose, /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ .suspend = i915_suspend, .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, .reclaim_buffers = drm_core_reclaim_buffers, .master_create = i915_master_create, .master_destroy = i915_master_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = i915_debugfs_init, .debugfs_cleanup = i915_debugfs_cleanup, #endif .gem_init_object = i915_gem_init_object, .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, .dumb_destroy = i915_gem_dumb_destroy, .ioctls = i915_ioctls, .fops = &i915_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static struct pci_driver i915_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = i915_pci_probe, .remove = i915_pci_remove, .driver.pm = &i915_pm_ops, }; static int __init i915_init(void) { if (!intel_agp_enabled) { DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); return -ENODEV; } driver.num_ioctls = i915_max_ioctl; /* * If CONFIG_DRM_I915_KMS is set, default to KMS unless * explicitly disabled with the module pararmeter. * * Otherwise, just follow the parameter (defaulting to off). * * Allow optional vga_text_mode_force boot option to override * the default behavior. */ #if defined(CONFIG_DRM_I915_KMS) if (i915_modeset != 0) driver.driver_features |= DRIVER_MODESET; #endif if (i915_modeset == 1) driver.driver_features |= DRIVER_MODESET; #ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && i915_modeset == -1) driver.driver_features &= ~DRIVER_MODESET; #endif if (!(driver.driver_features & DRIVER_MODESET)) driver.get_vblank_timestamp = NULL; return drm_pci_init(&driver, &i915_pci_driver); } static void __exit i915_exit(void) { drm_pci_exit(&driver, &i915_pci_driver); } module_init(i915_init); module_exit(i915_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(dev_priv, reg) \ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ ((reg) < 0x40000) && \ ((reg) != FORCEWAKE)) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ unsigned long irqflags; \ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_get(dev_priv); \ val = read##y(dev_priv->regs + reg); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_put(dev_priv); \ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } else { \ val = read##y(dev_priv->regs + reg); \ } \ trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } __i915_read(8, b) __i915_read(16, w) __i915_read(32, l) __i915_read(64, q) #undef __i915_read #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ u32 __fifo_ret = 0; \ trace_i915_reg_rw(true, reg, val, sizeof(val)); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ write##y(val, dev_priv->regs + reg); \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ } __i915_write(8, b) __i915_write(16, w) __i915_write(32, l) __i915_write(64, q) #undef __i915_write
gpl-2.0
lowtraxx/kernel
sound/soc/codecs/wm5102.c
1522
53389
/* * wm5102.c -- WM5102 ALSA SoC Audio driver * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/jack.h> #include <sound/initval.h> #include <sound/tlv.h> #include <linux/mfd/arizona/core.h> #include <linux/mfd/arizona/registers.h> #include "arizona.h" #include "wm5102.h" #include "wm_adsp.h" struct wm5102_priv { struct arizona_priv core; struct arizona_fll fll[2]; }; static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0); static const struct wm_adsp_region wm5102_dsp1_regions[] = { { .type = WMFW_ADSP2_PM, .base = 0x100000 }, { .type = WMFW_ADSP2_ZM, .base = 0x180000 }, { .type = WMFW_ADSP2_XM, .base = 0x190000 }, { .type = WMFW_ADSP2_YM, .base = 0x1a8000 }, }; static const struct reg_default wm5102_sysclk_reva_patch[] = { { 0x3000, 0x2225 }, { 0x3001, 0x3a03 }, { 0x3002, 0x0225 }, { 0x3003, 0x0801 }, { 0x3004, 0x6249 }, { 0x3005, 0x0c04 }, { 0x3006, 0x0225 }, { 0x3007, 0x5901 }, { 0x3008, 0xe249 }, { 0x3009, 0x030d }, { 0x300a, 0x0249 }, { 0x300b, 0x2c01 }, { 0x300c, 0xe249 }, { 0x300d, 0x4342 }, { 0x300e, 0xe249 }, { 0x300f, 0x73c0 }, { 0x3010, 0x4249 }, { 0x3011, 0x0c00 }, { 0x3012, 0x0225 }, { 0x3013, 0x1f01 }, { 0x3014, 0x0225 }, { 0x3015, 0x1e01 }, { 0x3016, 0x0225 }, { 0x3017, 0xfa00 }, { 0x3018, 0x0000 }, { 0x3019, 0xf000 }, { 0x301a, 0x0000 }, { 0x301b, 0xf000 }, { 0x301c, 0x0000 }, { 0x301d, 0xf000 }, { 0x301e, 0x0000 }, { 0x301f, 0xf000 }, { 0x3020, 0x0000 }, { 0x3021, 0xf000 }, { 0x3022, 0x0000 }, { 0x3023, 0xf000 }, { 0x3024, 0x0000 }, { 0x3025, 0xf000 }, { 0x3026, 0x0000 }, { 0x3027, 0xf000 }, { 0x3028, 0x0000 }, { 0x3029, 0xf000 }, { 0x302a, 0x0000 }, { 0x302b, 0xf000 }, { 0x302c, 0x0000 }, { 0x302d, 0xf000 }, { 0x302e, 0x0000 }, { 0x302f, 0xf000 }, { 0x3030, 0x0225 }, { 0x3031, 0x1a01 }, { 0x3032, 0x0225 }, { 0x3033, 0x1e00 }, { 0x3034, 0x0225 }, { 0x3035, 0x1f00 }, { 0x3036, 0x6225 }, { 0x3037, 0xf800 }, { 0x3038, 0x0000 }, { 0x3039, 0xf000 }, { 0x303a, 0x0000 }, { 0x303b, 0xf000 }, { 0x303c, 0x0000 }, { 0x303d, 0xf000 }, { 0x303e, 0x0000 }, { 0x303f, 0xf000 }, { 0x3040, 0x2226 }, { 0x3041, 0x3a03 }, { 0x3042, 0x0226 }, { 0x3043, 0x0801 }, { 0x3044, 0x6249 }, { 0x3045, 0x0c06 }, { 0x3046, 0x0226 }, { 0x3047, 0x5901 }, { 0x3048, 0xe249 }, { 0x3049, 0x030d }, { 0x304a, 0x0249 }, { 0x304b, 0x2c01 }, { 0x304c, 0xe249 }, { 0x304d, 0x4342 }, { 0x304e, 0xe249 }, { 0x304f, 0x73c0 }, { 0x3050, 0x4249 }, { 0x3051, 0x0c00 }, { 0x3052, 0x0226 }, { 0x3053, 0x1f01 }, { 0x3054, 0x0226 }, { 0x3055, 0x1e01 }, { 0x3056, 0x0226 }, { 0x3057, 0xfa00 }, { 0x3058, 0x0000 }, { 0x3059, 0xf000 }, { 0x305a, 0x0000 }, { 0x305b, 0xf000 }, { 0x305c, 0x0000 }, { 0x305d, 0xf000 }, { 0x305e, 0x0000 }, { 0x305f, 0xf000 }, { 0x3060, 0x0000 }, { 0x3061, 0xf000 }, { 0x3062, 0x0000 }, { 0x3063, 0xf000 }, { 0x3064, 0x0000 }, { 0x3065, 0xf000 }, { 0x3066, 0x0000 }, { 0x3067, 0xf000 }, { 0x3068, 0x0000 }, { 0x3069, 0xf000 }, { 0x306a, 0x0000 }, { 0x306b, 0xf000 }, { 0x306c, 0x0000 }, { 0x306d, 0xf000 }, { 0x306e, 0x0000 }, { 0x306f, 0xf000 }, { 0x3070, 0x0226 }, { 0x3071, 0x1a01 }, { 0x3072, 0x0226 }, { 0x3073, 0x1e00 }, { 0x3074, 0x0226 }, { 0x3075, 0x1f00 }, { 0x3076, 0x6226 }, { 0x3077, 0xf800 }, { 0x3078, 0x0000 }, { 0x3079, 0xf000 }, { 0x307a, 0x0000 }, { 0x307b, 0xf000 }, { 0x307c, 0x0000 }, { 0x307d, 0xf000 }, { 0x307e, 0x0000 }, { 0x307f, 0xf000 }, { 0x3080, 0x2227 }, { 0x3081, 0x3a03 }, { 0x3082, 0x0227 }, { 0x3083, 0x0801 }, { 0x3084, 0x6255 }, { 0x3085, 0x0c04 }, { 0x3086, 0x0227 }, { 0x3087, 0x5901 }, { 0x3088, 0xe255 }, { 0x3089, 0x030d }, { 0x308a, 0x0255 }, { 0x308b, 0x2c01 }, { 0x308c, 0xe255 }, { 0x308d, 0x4342 }, { 0x308e, 0xe255 }, { 0x308f, 0x73c0 }, { 0x3090, 0x4255 }, { 0x3091, 0x0c00 }, { 0x3092, 0x0227 }, { 0x3093, 0x1f01 }, { 0x3094, 0x0227 }, { 0x3095, 0x1e01 }, { 0x3096, 0x0227 }, { 0x3097, 0xfa00 }, { 0x3098, 0x0000 }, { 0x3099, 0xf000 }, { 0x309a, 0x0000 }, { 0x309b, 0xf000 }, { 0x309c, 0x0000 }, { 0x309d, 0xf000 }, { 0x309e, 0x0000 }, { 0x309f, 0xf000 }, { 0x30a0, 0x0000 }, { 0x30a1, 0xf000 }, { 0x30a2, 0x0000 }, { 0x30a3, 0xf000 }, { 0x30a4, 0x0000 }, { 0x30a5, 0xf000 }, { 0x30a6, 0x0000 }, { 0x30a7, 0xf000 }, { 0x30a8, 0x0000 }, { 0x30a9, 0xf000 }, { 0x30aa, 0x0000 }, { 0x30ab, 0xf000 }, { 0x30ac, 0x0000 }, { 0x30ad, 0xf000 }, { 0x30ae, 0x0000 }, { 0x30af, 0xf000 }, { 0x30b0, 0x0227 }, { 0x30b1, 0x1a01 }, { 0x30b2, 0x0227 }, { 0x30b3, 0x1e00 }, { 0x30b4, 0x0227 }, { 0x30b5, 0x1f00 }, { 0x30b6, 0x6227 }, { 0x30b7, 0xf800 }, { 0x30b8, 0x0000 }, { 0x30b9, 0xf000 }, { 0x30ba, 0x0000 }, { 0x30bb, 0xf000 }, { 0x30bc, 0x0000 }, { 0x30bd, 0xf000 }, { 0x30be, 0x0000 }, { 0x30bf, 0xf000 }, { 0x30c0, 0x2228 }, { 0x30c1, 0x3a03 }, { 0x30c2, 0x0228 }, { 0x30c3, 0x0801 }, { 0x30c4, 0x6255 }, { 0x30c5, 0x0c06 }, { 0x30c6, 0x0228 }, { 0x30c7, 0x5901 }, { 0x30c8, 0xe255 }, { 0x30c9, 0x030d }, { 0x30ca, 0x0255 }, { 0x30cb, 0x2c01 }, { 0x30cc, 0xe255 }, { 0x30cd, 0x4342 }, { 0x30ce, 0xe255 }, { 0x30cf, 0x73c0 }, { 0x30d0, 0x4255 }, { 0x30d1, 0x0c00 }, { 0x30d2, 0x0228 }, { 0x30d3, 0x1f01 }, { 0x30d4, 0x0228 }, { 0x30d5, 0x1e01 }, { 0x30d6, 0x0228 }, { 0x30d7, 0xfa00 }, { 0x30d8, 0x0000 }, { 0x30d9, 0xf000 }, { 0x30da, 0x0000 }, { 0x30db, 0xf000 }, { 0x30dc, 0x0000 }, { 0x30dd, 0xf000 }, { 0x30de, 0x0000 }, { 0x30df, 0xf000 }, { 0x30e0, 0x0000 }, { 0x30e1, 0xf000 }, { 0x30e2, 0x0000 }, { 0x30e3, 0xf000 }, { 0x30e4, 0x0000 }, { 0x30e5, 0xf000 }, { 0x30e6, 0x0000 }, { 0x30e7, 0xf000 }, { 0x30e8, 0x0000 }, { 0x30e9, 0xf000 }, { 0x30ea, 0x0000 }, { 0x30eb, 0xf000 }, { 0x30ec, 0x0000 }, { 0x30ed, 0xf000 }, { 0x30ee, 0x0000 }, { 0x30ef, 0xf000 }, { 0x30f0, 0x0228 }, { 0x30f1, 0x1a01 }, { 0x30f2, 0x0228 }, { 0x30f3, 0x1e00 }, { 0x30f4, 0x0228 }, { 0x30f5, 0x1f00 }, { 0x30f6, 0x6228 }, { 0x30f7, 0xf800 }, { 0x30f8, 0x0000 }, { 0x30f9, 0xf000 }, { 0x30fa, 0x0000 }, { 0x30fb, 0xf000 }, { 0x30fc, 0x0000 }, { 0x30fd, 0xf000 }, { 0x30fe, 0x0000 }, { 0x30ff, 0xf000 }, { 0x3100, 0x222b }, { 0x3101, 0x3a03 }, { 0x3102, 0x222b }, { 0x3103, 0x5803 }, { 0x3104, 0xe26f }, { 0x3105, 0x030d }, { 0x3106, 0x626f }, { 0x3107, 0x2c01 }, { 0x3108, 0xe26f }, { 0x3109, 0x4342 }, { 0x310a, 0xe26f }, { 0x310b, 0x73c0 }, { 0x310c, 0x026f }, { 0x310d, 0x0c00 }, { 0x310e, 0x022b }, { 0x310f, 0x1f01 }, { 0x3110, 0x022b }, { 0x3111, 0x1e01 }, { 0x3112, 0x022b }, { 0x3113, 0xfa00 }, { 0x3114, 0x0000 }, { 0x3115, 0xf000 }, { 0x3116, 0x0000 }, { 0x3117, 0xf000 }, { 0x3118, 0x0000 }, { 0x3119, 0xf000 }, { 0x311a, 0x0000 }, { 0x311b, 0xf000 }, { 0x311c, 0x0000 }, { 0x311d, 0xf000 }, { 0x311e, 0x0000 }, { 0x311f, 0xf000 }, { 0x3120, 0x022b }, { 0x3121, 0x0a01 }, { 0x3122, 0x022b }, { 0x3123, 0x1e00 }, { 0x3124, 0x022b }, { 0x3125, 0x1f00 }, { 0x3126, 0x622b }, { 0x3127, 0xf800 }, { 0x3128, 0x0000 }, { 0x3129, 0xf000 }, { 0x312a, 0x0000 }, { 0x312b, 0xf000 }, { 0x312c, 0x0000 }, { 0x312d, 0xf000 }, { 0x312e, 0x0000 }, { 0x312f, 0xf000 }, { 0x3130, 0x0000 }, { 0x3131, 0xf000 }, { 0x3132, 0x0000 }, { 0x3133, 0xf000 }, { 0x3134, 0x0000 }, { 0x3135, 0xf000 }, { 0x3136, 0x0000 }, { 0x3137, 0xf000 }, { 0x3138, 0x0000 }, { 0x3139, 0xf000 }, { 0x313a, 0x0000 }, { 0x313b, 0xf000 }, { 0x313c, 0x0000 }, { 0x313d, 0xf000 }, { 0x313e, 0x0000 }, { 0x313f, 0xf000 }, { 0x3140, 0x0000 }, { 0x3141, 0xf000 }, { 0x3142, 0x0000 }, { 0x3143, 0xf000 }, { 0x3144, 0x0000 }, { 0x3145, 0xf000 }, { 0x3146, 0x0000 }, { 0x3147, 0xf000 }, { 0x3148, 0x0000 }, { 0x3149, 0xf000 }, { 0x314a, 0x0000 }, { 0x314b, 0xf000 }, { 0x314c, 0x0000 }, { 0x314d, 0xf000 }, { 0x314e, 0x0000 }, { 0x314f, 0xf000 }, { 0x3150, 0x0000 }, { 0x3151, 0xf000 }, { 0x3152, 0x0000 }, { 0x3153, 0xf000 }, { 0x3154, 0x0000 }, { 0x3155, 0xf000 }, { 0x3156, 0x0000 }, { 0x3157, 0xf000 }, { 0x3158, 0x0000 }, { 0x3159, 0xf000 }, { 0x315a, 0x0000 }, { 0x315b, 0xf000 }, { 0x315c, 0x0000 }, { 0x315d, 0xf000 }, { 0x315e, 0x0000 }, { 0x315f, 0xf000 }, { 0x3160, 0x0000 }, { 0x3161, 0xf000 }, { 0x3162, 0x0000 }, { 0x3163, 0xf000 }, { 0x3164, 0x0000 }, { 0x3165, 0xf000 }, { 0x3166, 0x0000 }, { 0x3167, 0xf000 }, { 0x3168, 0x0000 }, { 0x3169, 0xf000 }, { 0x316a, 0x0000 }, { 0x316b, 0xf000 }, { 0x316c, 0x0000 }, { 0x316d, 0xf000 }, { 0x316e, 0x0000 }, { 0x316f, 0xf000 }, { 0x3170, 0x0000 }, { 0x3171, 0xf000 }, { 0x3172, 0x0000 }, { 0x3173, 0xf000 }, { 0x3174, 0x0000 }, { 0x3175, 0xf000 }, { 0x3176, 0x0000 }, { 0x3177, 0xf000 }, { 0x3178, 0x0000 }, { 0x3179, 0xf000 }, { 0x317a, 0x0000 }, { 0x317b, 0xf000 }, { 0x317c, 0x0000 }, { 0x317d, 0xf000 }, { 0x317e, 0x0000 }, { 0x317f, 0xf000 }, { 0x3180, 0x2001 }, { 0x3181, 0xf101 }, { 0x3182, 0x0000 }, { 0x3183, 0xf000 }, { 0x3184, 0x0000 }, { 0x3185, 0xf000 }, { 0x3186, 0x0000 }, { 0x3187, 0xf000 }, { 0x3188, 0x0000 }, { 0x3189, 0xf000 }, { 0x318a, 0x0000 }, { 0x318b, 0xf000 }, { 0x318c, 0x0000 }, { 0x318d, 0xf000 }, { 0x318e, 0x0000 }, { 0x318f, 0xf000 }, { 0x3190, 0x0000 }, { 0x3191, 0xf000 }, { 0x3192, 0x0000 }, { 0x3193, 0xf000 }, { 0x3194, 0x0000 }, { 0x3195, 0xf000 }, { 0x3196, 0x0000 }, { 0x3197, 0xf000 }, { 0x3198, 0x0000 }, { 0x3199, 0xf000 }, { 0x319a, 0x0000 }, { 0x319b, 0xf000 }, { 0x319c, 0x0000 }, { 0x319d, 0xf000 }, { 0x319e, 0x0000 }, { 0x319f, 0xf000 }, { 0x31a0, 0x0000 }, { 0x31a1, 0xf000 }, { 0x31a2, 0x0000 }, { 0x31a3, 0xf000 }, { 0x31a4, 0x0000 }, { 0x31a5, 0xf000 }, { 0x31a6, 0x0000 }, { 0x31a7, 0xf000 }, { 0x31a8, 0x0000 }, { 0x31a9, 0xf000 }, { 0x31aa, 0x0000 }, { 0x31ab, 0xf000 }, { 0x31ac, 0x0000 }, { 0x31ad, 0xf000 }, { 0x31ae, 0x0000 }, { 0x31af, 0xf000 }, { 0x31b0, 0x0000 }, { 0x31b1, 0xf000 }, { 0x31b2, 0x0000 }, { 0x31b3, 0xf000 }, { 0x31b4, 0x0000 }, { 0x31b5, 0xf000 }, { 0x31b6, 0x0000 }, { 0x31b7, 0xf000 }, { 0x31b8, 0x0000 }, { 0x31b9, 0xf000 }, { 0x31ba, 0x0000 }, { 0x31bb, 0xf000 }, { 0x31bc, 0x0000 }, { 0x31bd, 0xf000 }, { 0x31be, 0x0000 }, { 0x31bf, 0xf000 }, { 0x31c0, 0x0000 }, { 0x31c1, 0xf000 }, { 0x31c2, 0x0000 }, { 0x31c3, 0xf000 }, { 0x31c4, 0x0000 }, { 0x31c5, 0xf000 }, { 0x31c6, 0x0000 }, { 0x31c7, 0xf000 }, { 0x31c8, 0x0000 }, { 0x31c9, 0xf000 }, { 0x31ca, 0x0000 }, { 0x31cb, 0xf000 }, { 0x31cc, 0x0000 }, { 0x31cd, 0xf000 }, { 0x31ce, 0x0000 }, { 0x31cf, 0xf000 }, { 0x31d0, 0x0000 }, { 0x31d1, 0xf000 }, { 0x31d2, 0x0000 }, { 0x31d3, 0xf000 }, { 0x31d4, 0x0000 }, { 0x31d5, 0xf000 }, { 0x31d6, 0x0000 }, { 0x31d7, 0xf000 }, { 0x31d8, 0x0000 }, { 0x31d9, 0xf000 }, { 0x31da, 0x0000 }, { 0x31db, 0xf000 }, { 0x31dc, 0x0000 }, { 0x31dd, 0xf000 }, { 0x31de, 0x0000 }, { 0x31df, 0xf000 }, { 0x31e0, 0x0000 }, { 0x31e1, 0xf000 }, { 0x31e2, 0x0000 }, { 0x31e3, 0xf000 }, { 0x31e4, 0x0000 }, { 0x31e5, 0xf000 }, { 0x31e6, 0x0000 }, { 0x31e7, 0xf000 }, { 0x31e8, 0x0000 }, { 0x31e9, 0xf000 }, { 0x31ea, 0x0000 }, { 0x31eb, 0xf000 }, { 0x31ec, 0x0000 }, { 0x31ed, 0xf000 }, { 0x31ee, 0x0000 }, { 0x31ef, 0xf000 }, { 0x31f0, 0x0000 }, { 0x31f1, 0xf000 }, { 0x31f2, 0x0000 }, { 0x31f3, 0xf000 }, { 0x31f4, 0x0000 }, { 0x31f5, 0xf000 }, { 0x31f6, 0x0000 }, { 0x31f7, 0xf000 }, { 0x31f8, 0x0000 }, { 0x31f9, 0xf000 }, { 0x31fa, 0x0000 }, { 0x31fb, 0xf000 }, { 0x31fc, 0x0000 }, { 0x31fd, 0xf000 }, { 0x31fe, 0x0000 }, { 0x31ff, 0xf000 }, { 0x024d, 0xff50 }, { 0x0252, 0xff50 }, { 0x0259, 0x0112 }, { 0x025e, 0x0112 }, }; static const struct reg_default wm5102_sysclk_revb_patch[] = { { 0x3081, 0x08FE }, { 0x3083, 0x00ED }, { 0x30C1, 0x08FE }, { 0x30C3, 0x00ED }, }; static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct arizona *arizona = dev_get_drvdata(codec->dev->parent); struct regmap *regmap = codec->control_data; const struct reg_default *patch = NULL; int i, patch_size; switch (arizona->rev) { case 0: patch = wm5102_sysclk_reva_patch; patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch); break; default: patch = wm5102_sysclk_revb_patch; patch_size = ARRAY_SIZE(wm5102_sysclk_revb_patch); break; } switch (event) { case SND_SOC_DAPM_POST_PMU: if (patch) for (i = 0; i < patch_size; i++) regmap_write(regmap, patch[i].reg, patch[i].def); break; default: break; } return 0; } static const char *wm5102_osr_text[] = { "Low power", "Normal", "High performance", }; static const unsigned int wm5102_osr_val[] = { 0x0, 0x3, 0x5, }; static const struct soc_enum wm5102_hpout_osr[] = { SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_1L, ARIZONA_OUT1_OSR_SHIFT, 0x7, 3, wm5102_osr_text, wm5102_osr_val), SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_2L, ARIZONA_OUT2_OSR_SHIFT, 0x7, 3, wm5102_osr_text, wm5102_osr_val), SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_3L, ARIZONA_OUT3_OSR_SHIFT, 0x7, 3, wm5102_osr_text, wm5102_osr_val), }; #define WM5102_NG_SRC(name, base) \ SOC_SINGLE(name " NG HPOUT1L Switch", base, 0, 1, 0), \ SOC_SINGLE(name " NG HPOUT1R Switch", base, 1, 1, 0), \ SOC_SINGLE(name " NG HPOUT2L Switch", base, 2, 1, 0), \ SOC_SINGLE(name " NG HPOUT2R Switch", base, 3, 1, 0), \ SOC_SINGLE(name " NG EPOUT Switch", base, 4, 1, 0), \ SOC_SINGLE(name " NG SPKOUTL Switch", base, 6, 1, 0), \ SOC_SINGLE(name " NG SPKOUTR Switch", base, 7, 1, 0), \ SOC_SINGLE(name " NG SPKDAT1L Switch", base, 8, 1, 0), \ SOC_SINGLE(name " NG SPKDAT1R Switch", base, 9, 1, 0) static const struct snd_kcontrol_new wm5102_snd_controls[] = { SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL, ARIZONA_IN1_OSR_SHIFT, 1, 0), SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL, ARIZONA_IN2_OSR_SHIFT, 1, 0), SOC_SINGLE("IN3 High Performance Switch", ARIZONA_IN3L_CONTROL, ARIZONA_IN3_OSR_SHIFT, 1, 0), SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL, ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN1R Volume", ARIZONA_IN1R_CONTROL, ARIZONA_IN1R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN2L Volume", ARIZONA_IN2L_CONTROL, ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN2R Volume", ARIZONA_IN2R_CONTROL, ARIZONA_IN2R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN3L Volume", ARIZONA_IN3L_CONTROL, ARIZONA_IN3L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN3R Volume", ARIZONA_IN3R_CONTROL, ARIZONA_IN3R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_TLV("IN1L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1L, ARIZONA_IN1L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN1R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1R, ARIZONA_IN1R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN2L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2L, ARIZONA_IN2L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN2R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2R, ARIZONA_IN2R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN3L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_3L, ARIZONA_IN3L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN3R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_3R, ARIZONA_IN3R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_ENUM("Input Ramp Up", arizona_in_vi_ramp), SOC_ENUM("Input Ramp Down", arizona_in_vd_ramp), ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE), SND_SOC_BYTES_MASK("EQ1 Coefficeints", ARIZONA_EQ1_1, 21, ARIZONA_EQ1_ENA_MASK), SND_SOC_BYTES_MASK("EQ2 Coefficeints", ARIZONA_EQ2_1, 21, ARIZONA_EQ2_ENA_MASK), SND_SOC_BYTES_MASK("EQ3 Coefficeints", ARIZONA_EQ3_1, 21, ARIZONA_EQ3_ENA_MASK), SND_SOC_BYTES_MASK("EQ4 Coefficeints", ARIZONA_EQ4_1, 21, ARIZONA_EQ4_ENA_MASK), SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B3 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B3 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B3 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B3 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B4 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE), SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5, ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA), ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE), SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1), SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1), SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1), SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1), ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE), SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode), SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode), SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode), SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode), SOC_VALUE_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]), SOC_VALUE_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]), ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE), SOC_SINGLE_TLV("Noise Generator Volume", ARIZONA_COMFORT_NOISE_GENERATOR, ARIZONA_NOISE_GEN_GAIN_SHIFT, 0x16, 0, noise_tlv), ARIZONA_MIXER_CONTROLS("HPOUT1L", ARIZONA_OUT1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT1R", ARIZONA_OUT1RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT2L", ARIZONA_OUT2LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT2R", ARIZONA_OUT2RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EPOUT", ARIZONA_OUT3LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKOUTL", ARIZONA_OUT4LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKOUTR", ARIZONA_OUT4RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE), SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L, ARIZONA_OUT4_OSR_SHIFT, 1, 0), SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L, ARIZONA_OUT5_OSR_SHIFT, 1, 0), SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L, ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1), SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_OUT3L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("HPOUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L, ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_OUT3L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_VALUE_ENUM("HPOUT1 OSR", wm5102_hpout_osr[0]), SOC_VALUE_ENUM("HPOUT2 OSR", wm5102_hpout_osr[1]), SOC_VALUE_ENUM("HPOUT3 OSR", wm5102_hpout_osr[2]), SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp), SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp), SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT, ARIZONA_SPK1R_MUTE_SHIFT, 1, 1), SOC_SINGLE("Noise Gate Switch", ARIZONA_NOISE_GATE_CONTROL, ARIZONA_NGATE_ENA_SHIFT, 1, 0), SOC_SINGLE_TLV("Noise Gate Threshold Volume", ARIZONA_NOISE_GATE_CONTROL, ARIZONA_NGATE_THR_SHIFT, 7, 1, ng_tlv), SOC_ENUM("Noise Gate Hold", arizona_ng_hold), WM5102_NG_SRC("HPOUT1L", ARIZONA_NOISE_GATE_SELECT_1L), WM5102_NG_SRC("HPOUT1R", ARIZONA_NOISE_GATE_SELECT_1R), WM5102_NG_SRC("HPOUT2L", ARIZONA_NOISE_GATE_SELECT_2L), WM5102_NG_SRC("HPOUT2R", ARIZONA_NOISE_GATE_SELECT_2R), WM5102_NG_SRC("EPOUT", ARIZONA_NOISE_GATE_SELECT_3L), WM5102_NG_SRC("SPKOUTL", ARIZONA_NOISE_GATE_SELECT_4L), WM5102_NG_SRC("SPKOUTR", ARIZONA_NOISE_GATE_SELECT_4R), WM5102_NG_SRC("SPKDAT1L", ARIZONA_NOISE_GATE_SELECT_5L), WM5102_NG_SRC("SPKDAT1R", ARIZONA_NOISE_GATE_SELECT_5R), ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX4", ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX5", ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX6", ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX7", ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX8", ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX1", ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX2", ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE), }; ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF3, ARIZONA_HPLP3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF4, ARIZONA_HPLP4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(Mic, ARIZONA_MICMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(Noise, ARIZONA_NOISEMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(PWM1, ARIZONA_PWM1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(PWM2, ARIZONA_PWM2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT1L, ARIZONA_OUT1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT1R, ARIZONA_OUT1RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT2L, ARIZONA_OUT2LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT2R, ARIZONA_OUT2RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT3, ARIZONA_OUT3LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKOUTL, ARIZONA_OUT4LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKOUTR, ARIZONA_OUT4RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT1L, ARIZONA_OUT5LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT1R, ARIZONA_OUT5RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX1, ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX2, ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX3, ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX4, ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX5, ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX6, ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX7, ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX8, ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX1, ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF3TX1, ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF3TX2, ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT1, ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT2, ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC1, ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC2, ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT1, ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT2, ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC1, ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC2, ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP1L, ARIZONA_DSP1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP1R, ARIZONA_DSP1RMIX_INPUT_1_SOURCE); ARIZONA_DSP_AUX_ENUMS(DSP1, ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE); static const char *wm5102_aec_loopback_texts[] = { "HPOUT1L", "HPOUT1R", "HPOUT2L", "HPOUT2R", "EPOUT", "SPKOUTL", "SPKOUTR", "SPKDAT1L", "SPKDAT1R", }; static const unsigned int wm5102_aec_loopback_values[] = { 0, 1, 2, 3, 4, 6, 7, 8, 9, }; static const struct soc_enum wm5102_aec_loopback = SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf, ARRAY_SIZE(wm5102_aec_loopback_texts), wm5102_aec_loopback_texts, wm5102_aec_loopback_values); static const struct snd_kcontrol_new wm5102_aec_loopback_mux = SOC_DAPM_VALUE_ENUM("AEC Loopback", wm5102_aec_loopback); static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, 0, wm5102_sysclk_ev, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK, ARIZONA_OPCLK_ASYNC_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD3", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("MICVDD", 0, SND_SOC_DAPM_REGULATOR_BYPASS), SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDL", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDR", 0, 0), SND_SOC_DAPM_SIGGEN("TONE"), SND_SOC_DAPM_SIGGEN("NOISE"), SND_SOC_DAPM_SIGGEN("HAPTICS"), SND_SOC_DAPM_INPUT("IN1L"), SND_SOC_DAPM_INPUT("IN1R"), SND_SOC_DAPM_INPUT("IN2L"), SND_SOC_DAPM_INPUT("IN2R"), SND_SOC_DAPM_INPUT("IN3L"), SND_SOC_DAPM_INPUT("IN3R"), SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN3L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN3R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1, ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2, ARIZONA_MICB2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3, ARIZONA_MICB3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR, ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Tone Generator 1", ARIZONA_TONE_GENERATOR_1, ARIZONA_TONE1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Tone Generator 2", ARIZONA_TONE_GENERATOR_1, ARIZONA_TONE2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Mic Mute Mixer", ARIZONA_MIC_NOISE_MIX_CONTROL_1, ARIZONA_MICMUTE_MIX_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ1", ARIZONA_EQ1_1, ARIZONA_EQ1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ2", ARIZONA_EQ2_1, ARIZONA_EQ2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ3", ARIZONA_EQ3_1, ARIZONA_EQ3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ4", ARIZONA_EQ4_1, ARIZONA_EQ4_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF2", ARIZONA_HPLPF2_1, ARIZONA_LHPF2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF3", ARIZONA_HPLPF3_1, ARIZONA_LHPF3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF4", ARIZONA_HPLPF4_1, ARIZONA_LHPF4_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("PWM1 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("PWM2 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC1L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC1R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC2L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1INT1", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1INT2", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC1", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC2", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT1", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT2", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC1", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC2", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0, ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0, ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0, ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0, ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0), ARIZONA_DSP_WIDGETS(DSP1, "DSP1"), SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0, &wm5102_aec_loopback_mux), SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM, ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM, ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT2L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT2R", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT2R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT5R", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT5R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), ARIZONA_MIXER_WIDGETS(EQ1, "EQ1"), ARIZONA_MIXER_WIDGETS(EQ2, "EQ2"), ARIZONA_MIXER_WIDGETS(EQ3, "EQ3"), ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"), ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"), ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"), ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"), ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"), ARIZONA_MIXER_WIDGETS(LHPF3, "LHPF3"), ARIZONA_MIXER_WIDGETS(LHPF4, "LHPF4"), ARIZONA_MIXER_WIDGETS(Mic, "Mic"), ARIZONA_MIXER_WIDGETS(Noise, "Noise"), ARIZONA_MIXER_WIDGETS(PWM1, "PWM1"), ARIZONA_MIXER_WIDGETS(PWM2, "PWM2"), ARIZONA_MIXER_WIDGETS(OUT1L, "HPOUT1L"), ARIZONA_MIXER_WIDGETS(OUT1R, "HPOUT1R"), ARIZONA_MIXER_WIDGETS(OUT2L, "HPOUT2L"), ARIZONA_MIXER_WIDGETS(OUT2R, "HPOUT2R"), ARIZONA_MIXER_WIDGETS(OUT3, "EPOUT"), ARIZONA_MIXER_WIDGETS(SPKOUTL, "SPKOUTL"), ARIZONA_MIXER_WIDGETS(SPKOUTR, "SPKOUTR"), ARIZONA_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"), ARIZONA_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"), ARIZONA_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"), ARIZONA_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"), ARIZONA_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"), ARIZONA_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"), ARIZONA_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"), ARIZONA_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"), ARIZONA_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"), ARIZONA_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"), ARIZONA_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"), ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"), ARIZONA_MIXER_WIDGETS(AIF3TX1, "AIF3TX1"), ARIZONA_MIXER_WIDGETS(AIF3TX2, "AIF3TX2"), ARIZONA_MUX_WIDGETS(ASRC1L, "ASRC1L"), ARIZONA_MUX_WIDGETS(ASRC1R, "ASRC1R"), ARIZONA_MUX_WIDGETS(ASRC2L, "ASRC2L"), ARIZONA_MUX_WIDGETS(ASRC2R, "ASRC2R"), ARIZONA_MUX_WIDGETS(ISRC1DEC1, "ISRC1DEC1"), ARIZONA_MUX_WIDGETS(ISRC1DEC2, "ISRC1DEC2"), ARIZONA_MUX_WIDGETS(ISRC1INT1, "ISRC1INT1"), ARIZONA_MUX_WIDGETS(ISRC1INT2, "ISRC1INT2"), ARIZONA_MUX_WIDGETS(ISRC2DEC1, "ISRC2DEC1"), ARIZONA_MUX_WIDGETS(ISRC2DEC2, "ISRC2DEC2"), ARIZONA_MUX_WIDGETS(ISRC2INT1, "ISRC2INT1"), ARIZONA_MUX_WIDGETS(ISRC2INT2, "ISRC2INT2"), WM_ADSP2("DSP1", 0), SND_SOC_DAPM_OUTPUT("HPOUT1L"), SND_SOC_DAPM_OUTPUT("HPOUT1R"), SND_SOC_DAPM_OUTPUT("HPOUT2L"), SND_SOC_DAPM_OUTPUT("HPOUT2R"), SND_SOC_DAPM_OUTPUT("EPOUTN"), SND_SOC_DAPM_OUTPUT("EPOUTP"), SND_SOC_DAPM_OUTPUT("SPKOUTLN"), SND_SOC_DAPM_OUTPUT("SPKOUTLP"), SND_SOC_DAPM_OUTPUT("SPKOUTRN"), SND_SOC_DAPM_OUTPUT("SPKOUTRP"), SND_SOC_DAPM_OUTPUT("SPKDAT1L"), SND_SOC_DAPM_OUTPUT("SPKDAT1R"), SND_SOC_DAPM_OUTPUT("MICSUPP"), }; #define ARIZONA_MIXER_INPUT_ROUTES(name) \ { name, "Noise Generator", "Noise Generator" }, \ { name, "Tone Generator 1", "Tone Generator 1" }, \ { name, "Tone Generator 2", "Tone Generator 2" }, \ { name, "Haptics", "HAPTICS" }, \ { name, "AEC", "AEC Loopback" }, \ { name, "IN1L", "IN1L PGA" }, \ { name, "IN1R", "IN1R PGA" }, \ { name, "IN2L", "IN2L PGA" }, \ { name, "IN2R", "IN2R PGA" }, \ { name, "IN3L", "IN3L PGA" }, \ { name, "IN3R", "IN3R PGA" }, \ { name, "Mic Mute Mixer", "Mic Mute Mixer" }, \ { name, "AIF1RX1", "AIF1RX1" }, \ { name, "AIF1RX2", "AIF1RX2" }, \ { name, "AIF1RX3", "AIF1RX3" }, \ { name, "AIF1RX4", "AIF1RX4" }, \ { name, "AIF1RX5", "AIF1RX5" }, \ { name, "AIF1RX6", "AIF1RX6" }, \ { name, "AIF1RX7", "AIF1RX7" }, \ { name, "AIF1RX8", "AIF1RX8" }, \ { name, "AIF2RX1", "AIF2RX1" }, \ { name, "AIF2RX2", "AIF2RX2" }, \ { name, "AIF3RX1", "AIF3RX1" }, \ { name, "AIF3RX2", "AIF3RX2" }, \ { name, "EQ1", "EQ1" }, \ { name, "EQ2", "EQ2" }, \ { name, "EQ3", "EQ3" }, \ { name, "EQ4", "EQ4" }, \ { name, "DRC1L", "DRC1L" }, \ { name, "DRC1R", "DRC1R" }, \ { name, "LHPF1", "LHPF1" }, \ { name, "LHPF2", "LHPF2" }, \ { name, "LHPF3", "LHPF3" }, \ { name, "LHPF4", "LHPF4" }, \ { name, "ASRC1L", "ASRC1L" }, \ { name, "ASRC1R", "ASRC1R" }, \ { name, "ASRC2L", "ASRC2L" }, \ { name, "ASRC2R", "ASRC2R" }, \ { name, "ISRC1DEC1", "ISRC1DEC1" }, \ { name, "ISRC1DEC2", "ISRC1DEC2" }, \ { name, "ISRC1INT1", "ISRC1INT1" }, \ { name, "ISRC1INT2", "ISRC1INT2" }, \ { name, "ISRC2DEC1", "ISRC2DEC1" }, \ { name, "ISRC2DEC2", "ISRC2DEC2" }, \ { name, "ISRC2INT1", "ISRC2INT1" }, \ { name, "ISRC2INT2", "ISRC2INT2" }, \ { name, "DSP1.1", "DSP1" }, \ { name, "DSP1.2", "DSP1" }, \ { name, "DSP1.3", "DSP1" }, \ { name, "DSP1.4", "DSP1" }, \ { name, "DSP1.5", "DSP1" }, \ { name, "DSP1.6", "DSP1" } static const struct snd_soc_dapm_route wm5102_dapm_routes[] = { { "AIF2 Capture", NULL, "DBVDD2" }, { "AIF2 Playback", NULL, "DBVDD2" }, { "AIF3 Capture", NULL, "DBVDD3" }, { "AIF3 Playback", NULL, "DBVDD3" }, { "OUT1L", NULL, "CPVDD" }, { "OUT1R", NULL, "CPVDD" }, { "OUT2L", NULL, "CPVDD" }, { "OUT2R", NULL, "CPVDD" }, { "OUT3L", NULL, "CPVDD" }, { "OUT4L", NULL, "SPKVDDL" }, { "OUT4R", NULL, "SPKVDDR" }, { "OUT1L", NULL, "SYSCLK" }, { "OUT1R", NULL, "SYSCLK" }, { "OUT2L", NULL, "SYSCLK" }, { "OUT2R", NULL, "SYSCLK" }, { "OUT3L", NULL, "SYSCLK" }, { "OUT4L", NULL, "SYSCLK" }, { "OUT4R", NULL, "SYSCLK" }, { "OUT5L", NULL, "SYSCLK" }, { "OUT5R", NULL, "SYSCLK" }, { "MICBIAS1", NULL, "MICVDD" }, { "MICBIAS2", NULL, "MICVDD" }, { "MICBIAS3", NULL, "MICVDD" }, { "Noise Generator", NULL, "NOISE" }, { "Tone Generator 1", NULL, "TONE" }, { "Tone Generator 2", NULL, "TONE" }, { "Mic Mute Mixer", NULL, "Noise Mixer" }, { "Mic Mute Mixer", NULL, "Mic Mixer" }, { "AIF1 Capture", NULL, "AIF1TX1" }, { "AIF1 Capture", NULL, "AIF1TX2" }, { "AIF1 Capture", NULL, "AIF1TX3" }, { "AIF1 Capture", NULL, "AIF1TX4" }, { "AIF1 Capture", NULL, "AIF1TX5" }, { "AIF1 Capture", NULL, "AIF1TX6" }, { "AIF1 Capture", NULL, "AIF1TX7" }, { "AIF1 Capture", NULL, "AIF1TX8" }, { "AIF1RX1", NULL, "AIF1 Playback" }, { "AIF1RX2", NULL, "AIF1 Playback" }, { "AIF1RX3", NULL, "AIF1 Playback" }, { "AIF1RX4", NULL, "AIF1 Playback" }, { "AIF1RX5", NULL, "AIF1 Playback" }, { "AIF1RX6", NULL, "AIF1 Playback" }, { "AIF1RX7", NULL, "AIF1 Playback" }, { "AIF1RX8", NULL, "AIF1 Playback" }, { "AIF2 Capture", NULL, "AIF2TX1" }, { "AIF2 Capture", NULL, "AIF2TX2" }, { "AIF2RX1", NULL, "AIF2 Playback" }, { "AIF2RX2", NULL, "AIF2 Playback" }, { "AIF3 Capture", NULL, "AIF3TX1" }, { "AIF3 Capture", NULL, "AIF3TX2" }, { "AIF3RX1", NULL, "AIF3 Playback" }, { "AIF3RX2", NULL, "AIF3 Playback" }, { "AIF1 Playback", NULL, "SYSCLK" }, { "AIF2 Playback", NULL, "SYSCLK" }, { "AIF3 Playback", NULL, "SYSCLK" }, { "AIF1 Capture", NULL, "SYSCLK" }, { "AIF2 Capture", NULL, "SYSCLK" }, { "AIF3 Capture", NULL, "SYSCLK" }, { "IN1L PGA", NULL, "IN1L" }, { "IN1R PGA", NULL, "IN1R" }, { "IN2L PGA", NULL, "IN2L" }, { "IN2R PGA", NULL, "IN2R" }, { "IN3L PGA", NULL, "IN3L" }, { "IN3R PGA", NULL, "IN3R" }, { "ASRC1L", NULL, "ASRC1L Input" }, { "ASRC1R", NULL, "ASRC1R Input" }, { "ASRC2L", NULL, "ASRC2L Input" }, { "ASRC2R", NULL, "ASRC2R Input" }, { "ISRC1DEC1", NULL, "ISRC1DEC1 Input" }, { "ISRC1DEC2", NULL, "ISRC1DEC2 Input" }, { "ISRC1INT1", NULL, "ISRC1INT1 Input" }, { "ISRC1INT2", NULL, "ISRC1INT2 Input" }, { "ISRC2DEC1", NULL, "ISRC2DEC1 Input" }, { "ISRC2DEC2", NULL, "ISRC2DEC2 Input" }, { "ISRC2INT1", NULL, "ISRC2INT1 Input" }, { "ISRC2INT2", NULL, "ISRC2INT2 Input" }, ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), ARIZONA_MIXER_ROUTES("OUT2R", "HPOUT2R"), ARIZONA_MIXER_ROUTES("OUT3L", "EPOUT"), ARIZONA_MIXER_ROUTES("OUT4L", "SPKOUTL"), ARIZONA_MIXER_ROUTES("OUT4R", "SPKOUTR"), ARIZONA_MIXER_ROUTES("OUT5L", "SPKDAT1L"), ARIZONA_MIXER_ROUTES("OUT5R", "SPKDAT1R"), ARIZONA_MIXER_ROUTES("PWM1 Driver", "PWM1"), ARIZONA_MIXER_ROUTES("PWM2 Driver", "PWM2"), ARIZONA_MIXER_ROUTES("AIF1TX1", "AIF1TX1"), ARIZONA_MIXER_ROUTES("AIF1TX2", "AIF1TX2"), ARIZONA_MIXER_ROUTES("AIF1TX3", "AIF1TX3"), ARIZONA_MIXER_ROUTES("AIF1TX4", "AIF1TX4"), ARIZONA_MIXER_ROUTES("AIF1TX5", "AIF1TX5"), ARIZONA_MIXER_ROUTES("AIF1TX6", "AIF1TX6"), ARIZONA_MIXER_ROUTES("AIF1TX7", "AIF1TX7"), ARIZONA_MIXER_ROUTES("AIF1TX8", "AIF1TX8"), ARIZONA_MIXER_ROUTES("AIF2TX1", "AIF2TX1"), ARIZONA_MIXER_ROUTES("AIF2TX2", "AIF2TX2"), ARIZONA_MIXER_ROUTES("AIF3TX1", "AIF3TX1"), ARIZONA_MIXER_ROUTES("AIF3TX2", "AIF3TX2"), ARIZONA_MIXER_ROUTES("EQ1", "EQ1"), ARIZONA_MIXER_ROUTES("EQ2", "EQ2"), ARIZONA_MIXER_ROUTES("EQ3", "EQ3"), ARIZONA_MIXER_ROUTES("EQ4", "EQ4"), ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"), ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"), ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"), ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"), ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"), ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"), ARIZONA_MUX_ROUTES("ASRC1L"), ARIZONA_MUX_ROUTES("ASRC1R"), ARIZONA_MUX_ROUTES("ASRC2L"), ARIZONA_MUX_ROUTES("ASRC2R"), ARIZONA_MUX_ROUTES("ISRC1INT1"), ARIZONA_MUX_ROUTES("ISRC1INT2"), ARIZONA_MUX_ROUTES("ISRC1DEC1"), ARIZONA_MUX_ROUTES("ISRC1DEC2"), ARIZONA_MUX_ROUTES("ISRC2INT1"), ARIZONA_MUX_ROUTES("ISRC2INT2"), ARIZONA_MUX_ROUTES("ISRC2DEC1"), ARIZONA_MUX_ROUTES("ISRC2DEC2"), ARIZONA_DSP_ROUTES("DSP1"), { "AEC Loopback", "HPOUT1L", "OUT1L" }, { "AEC Loopback", "HPOUT1R", "OUT1R" }, { "HPOUT1L", NULL, "OUT1L" }, { "HPOUT1R", NULL, "OUT1R" }, { "AEC Loopback", "HPOUT2L", "OUT2L" }, { "AEC Loopback", "HPOUT2R", "OUT2R" }, { "HPOUT2L", NULL, "OUT2L" }, { "HPOUT2R", NULL, "OUT2R" }, { "AEC Loopback", "EPOUT", "OUT3L" }, { "EPOUTN", NULL, "OUT3L" }, { "EPOUTP", NULL, "OUT3L" }, { "AEC Loopback", "SPKOUTL", "OUT4L" }, { "SPKOUTLN", NULL, "OUT4L" }, { "SPKOUTLP", NULL, "OUT4L" }, { "AEC Loopback", "SPKOUTR", "OUT4R" }, { "SPKOUTRN", NULL, "OUT4R" }, { "SPKOUTRP", NULL, "OUT4R" }, { "AEC Loopback", "SPKDAT1L", "OUT5L" }, { "AEC Loopback", "SPKDAT1R", "OUT5R" }, { "SPKDAT1L", NULL, "OUT5L" }, { "SPKDAT1R", NULL, "OUT5R" }, { "MICSUPP", NULL, "SYSCLK" }, }; static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source, unsigned int Fref, unsigned int Fout) { struct wm5102_priv *wm5102 = snd_soc_codec_get_drvdata(codec); switch (fll_id) { case WM5102_FLL1: return arizona_set_fll(&wm5102->fll[0], source, Fref, Fout); case WM5102_FLL2: return arizona_set_fll(&wm5102->fll[1], source, Fref, Fout); case WM5102_FLL1_REFCLK: return arizona_set_fll_refclk(&wm5102->fll[0], source, Fref, Fout); case WM5102_FLL2_REFCLK: return arizona_set_fll_refclk(&wm5102->fll[1], source, Fref, Fout); default: return -EINVAL; } } #define WM5102_RATES SNDRV_PCM_RATE_8000_192000 #define WM5102_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver wm5102_dai[] = { { .name = "wm5102-aif1", .id = 1, .base = ARIZONA_AIF1_BCLK_CTRL, .playback = { .stream_name = "AIF1 Playback", .channels_min = 1, .channels_max = 8, .rates = WM5102_RATES, .formats = WM5102_FORMATS, }, .capture = { .stream_name = "AIF1 Capture", .channels_min = 1, .channels_max = 8, .rates = WM5102_RATES, .formats = WM5102_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rates = 1, }, { .name = "wm5102-aif2", .id = 2, .base = ARIZONA_AIF2_BCLK_CTRL, .playback = { .stream_name = "AIF2 Playback", .channels_min = 1, .channels_max = 2, .rates = WM5102_RATES, .formats = WM5102_FORMATS, }, .capture = { .stream_name = "AIF2 Capture", .channels_min = 1, .channels_max = 2, .rates = WM5102_RATES, .formats = WM5102_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rates = 1, }, { .name = "wm5102-aif3", .id = 3, .base = ARIZONA_AIF3_BCLK_CTRL, .playback = { .stream_name = "AIF3 Playback", .channels_min = 1, .channels_max = 2, .rates = WM5102_RATES, .formats = WM5102_FORMATS, }, .capture = { .stream_name = "AIF3 Capture", .channels_min = 1, .channels_max = 2, .rates = WM5102_RATES, .formats = WM5102_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rates = 1, }, }; static int wm5102_codec_probe(struct snd_soc_codec *codec) { struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); int ret; codec->control_data = priv->core.arizona->regmap; ret = snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP); if (ret != 0) return ret; ret = snd_soc_add_codec_controls(codec, wm_adsp2_fw_controls, 2); if (ret != 0) return ret; arizona_init_spk(codec); snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS"); priv->core.arizona->dapm = &codec->dapm; return 0; } static int wm5102_codec_remove(struct snd_soc_codec *codec) { struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); priv->core.arizona->dapm = NULL; return 0; } #define WM5102_DIG_VU 0x0200 static unsigned int wm5102_digital_vu[] = { ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_DAC_DIGITAL_VOLUME_2L, ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, }; static struct snd_soc_codec_driver soc_codec_dev_wm5102 = { .probe = wm5102_codec_probe, .remove = wm5102_codec_remove, .idle_bias_off = true, .set_sysclk = arizona_set_sysclk, .set_pll = wm5102_set_fll, .controls = wm5102_snd_controls, .num_controls = ARRAY_SIZE(wm5102_snd_controls), .dapm_widgets = wm5102_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm5102_dapm_widgets), .dapm_routes = wm5102_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm5102_dapm_routes), }; static int wm5102_probe(struct platform_device *pdev) { struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); struct wm5102_priv *wm5102; int i, ret; wm5102 = devm_kzalloc(&pdev->dev, sizeof(struct wm5102_priv), GFP_KERNEL); if (wm5102 == NULL) return -ENOMEM; platform_set_drvdata(pdev, wm5102); wm5102->core.arizona = arizona; wm5102->core.num_inputs = 6; wm5102->core.adsp[0].part = "wm5102"; wm5102->core.adsp[0].num = 1; wm5102->core.adsp[0].type = WMFW_ADSP2; wm5102->core.adsp[0].base = ARIZONA_DSP1_CONTROL_1; wm5102->core.adsp[0].dev = arizona->dev; wm5102->core.adsp[0].regmap = arizona->regmap; wm5102->core.adsp[0].mem = wm5102_dsp1_regions; wm5102->core.adsp[0].num_mems = ARRAY_SIZE(wm5102_dsp1_regions); ret = wm_adsp2_init(&wm5102->core.adsp[0], true); if (ret != 0) return ret; for (i = 0; i < ARRAY_SIZE(wm5102->fll); i++) wm5102->fll[i].vco_mult = 1; arizona_init_fll(arizona, 1, ARIZONA_FLL1_CONTROL_1 - 1, ARIZONA_IRQ_FLL1_LOCK, ARIZONA_IRQ_FLL1_CLOCK_OK, &wm5102->fll[0]); arizona_init_fll(arizona, 2, ARIZONA_FLL2_CONTROL_1 - 1, ARIZONA_IRQ_FLL2_LOCK, ARIZONA_IRQ_FLL2_CLOCK_OK, &wm5102->fll[1]); /* SR2 fixed at 8kHz, SR3 fixed at 16kHz */ regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_2, ARIZONA_SAMPLE_RATE_2_MASK, 0x11); regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_3, ARIZONA_SAMPLE_RATE_3_MASK, 0x12); for (i = 0; i < ARRAY_SIZE(wm5102_dai); i++) arizona_init_dai(&wm5102->core, i); /* Latch volume update bits */ for (i = 0; i < ARRAY_SIZE(wm5102_digital_vu); i++) regmap_update_bits(arizona->regmap, wm5102_digital_vu[i], WM5102_DIG_VU, WM5102_DIG_VU); pm_runtime_enable(&pdev->dev); pm_runtime_idle(&pdev->dev); return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5102, wm5102_dai, ARRAY_SIZE(wm5102_dai)); } static int wm5102_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static struct platform_driver wm5102_codec_driver = { .driver = { .name = "wm5102-codec", .owner = THIS_MODULE, }, .probe = wm5102_probe, .remove = wm5102_remove, }; module_platform_driver(wm5102_codec_driver); MODULE_DESCRIPTION("ASoC WM5102 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm5102-codec");
gpl-2.0
wuxianlin/android_kernel_zte_pluto
drivers/atm/solos-pci.c
1522
35220
/* * Driver for the Solos PCI ADSL2+ card, designed to support Linux by * Traverse Technologies -- http://www.traverse.com.au/ * Xrio Limited -- http://www.xrio.com/ * * * Copyright © 2008 Traverse Technologies * Copyright © 2008 Intel Corporation * * Authors: Nathan Williams <nathan@traverse.com.au> * David Woodhouse <dwmw2@infradead.org> * Treker Chen <treker@xrio.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define DEBUG #define VERBOSE_DEBUG #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/skbuff.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/kobject.h> #include <linux/firmware.h> #include <linux/ctype.h> #include <linux/swab.h> #include <linux/slab.h> #define VERSION "0.07" #define PTAG "solos-pci" #define CONFIG_RAM_SIZE 128 #define FLAGS_ADDR 0x7C #define IRQ_EN_ADDR 0x78 #define FPGA_VER 0x74 #define IRQ_CLEAR 0x70 #define WRITE_FLASH 0x6C #define PORTS 0x68 #define FLASH_BLOCK 0x64 #define FLASH_BUSY 0x60 #define FPGA_MODE 0x5C #define FLASH_MODE 0x58 #define TX_DMA_ADDR(port) (0x40 + (4 * (port))) #define RX_DMA_ADDR(port) (0x30 + (4 * (port))) #define DATA_RAM_SIZE 32768 #define BUF_SIZE 2048 #define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/ #define FPGA_PAGE 528 /* FPGA flash page size*/ #define SOLOS_PAGE 512 /* Solos flash page size*/ #define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/ #define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/ #define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2) #define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size)) #define FLASH_BUF ((card->buffers) + 4*(card->buffer_size)*2) #define RX_DMA_SIZE 2048 #define FPGA_VERSION(a,b) (((a) << 8) + (b)) #define LEGACY_BUFFERS 2 #define DMA_SUPPORTED 4 static int reset = 0; static int atmdebug = 0; static int firmware_upgrade = 0; static int fpga_upgrade = 0; static int db_firmware_upgrade = 0; static int db_fpga_upgrade = 0; struct pkt_hdr { __le16 size; __le16 vpi; __le16 vci; __le16 type; }; struct solos_skb_cb { struct atm_vcc *vcc; uint32_t dma_addr; }; #define SKB_CB(skb) ((struct solos_skb_cb *)skb->cb) #define PKT_DATA 0 #define PKT_COMMAND 1 #define PKT_POPEN 3 #define PKT_PCLOSE 4 #define PKT_STATUS 5 struct solos_card { void __iomem *config_regs; void __iomem *buffers; int nr_ports; int tx_mask; struct pci_dev *dev; struct atm_dev *atmdev[4]; struct tasklet_struct tlet; spinlock_t tx_lock; spinlock_t tx_queue_lock; spinlock_t cli_queue_lock; spinlock_t param_queue_lock; struct list_head param_queue; struct sk_buff_head tx_queue[4]; struct sk_buff_head cli_queue[4]; struct sk_buff *tx_skb[4]; struct sk_buff *rx_skb[4]; wait_queue_head_t param_wq; wait_queue_head_t fw_wq; int using_dma; int fpga_version; int buffer_size; }; struct solos_param { struct list_head list; pid_t pid; int port; struct sk_buff *response; }; #define SOLOS_CHAN(atmdev) ((int)(unsigned long)(atmdev)->phy_data) MODULE_AUTHOR("Traverse Technologies <support@traverse.com.au>"); MODULE_DESCRIPTION("Solos PCI driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("solos-FPGA.bin"); MODULE_FIRMWARE("solos-Firmware.bin"); MODULE_FIRMWARE("solos-db-FPGA.bin"); MODULE_PARM_DESC(reset, "Reset Solos chips on startup"); MODULE_PARM_DESC(atmdebug, "Print ATM data"); MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade"); MODULE_PARM_DESC(fpga_upgrade, "Initiate FPGA upgrade"); MODULE_PARM_DESC(db_firmware_upgrade, "Initiate daughter board Solos firmware upgrade"); MODULE_PARM_DESC(db_fpga_upgrade, "Initiate daughter board FPGA upgrade"); module_param(reset, int, 0444); module_param(atmdebug, int, 0644); module_param(firmware_upgrade, int, 0444); module_param(fpga_upgrade, int, 0444); module_param(db_firmware_upgrade, int, 0444); module_param(db_fpga_upgrade, int, 0444); static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb, struct atm_vcc *vcc); static uint32_t fpga_tx(struct solos_card *); static irqreturn_t solos_irq(int irq, void *dev_id); static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); static int list_vccs(int vci); static int atm_init(struct solos_card *, struct device *); static void atm_remove(struct solos_card *); static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); static void solos_bh(unsigned long); static int print_buffer(struct sk_buff *buf); static inline void solos_pop(struct atm_vcc *vcc, struct sk_buff *skb) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); } static ssize_t solos_param_show(struct device *dev, struct device_attribute *attr, char *buf) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; struct solos_param prm; struct sk_buff *skb; struct pkt_hdr *header; int buflen; buflen = strlen(attr->attr.name) + 10; skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in solos_param_show()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); buflen = snprintf((void *)&header[1], buflen - 1, "L%05d\n%s\n", current->pid, attr->attr.name); skb_put(skb, buflen); header->size = cpu_to_le16(buflen); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_COMMAND); prm.pid = current->pid; prm.response = NULL; prm.port = SOLOS_CHAN(atmdev); spin_lock_irq(&card->param_queue_lock); list_add(&prm.list, &card->param_queue); spin_unlock_irq(&card->param_queue_lock); fpga_queue(card, prm.port, skb, NULL); wait_event_timeout(card->param_wq, prm.response, 5 * HZ); spin_lock_irq(&card->param_queue_lock); list_del(&prm.list); spin_unlock_irq(&card->param_queue_lock); if (!prm.response) return -EIO; buflen = prm.response->len; memcpy(buf, prm.response->data, buflen); kfree_skb(prm.response); return buflen; } static ssize_t solos_param_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; struct solos_param prm; struct sk_buff *skb; struct pkt_hdr *header; int buflen; ssize_t ret; buflen = strlen(attr->attr.name) + 11 + count; skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in solos_param_store()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); buflen = snprintf((void *)&header[1], buflen - 1, "L%05d\n%s\n%s\n", current->pid, attr->attr.name, buf); skb_put(skb, buflen); header->size = cpu_to_le16(buflen); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_COMMAND); prm.pid = current->pid; prm.response = NULL; prm.port = SOLOS_CHAN(atmdev); spin_lock_irq(&card->param_queue_lock); list_add(&prm.list, &card->param_queue); spin_unlock_irq(&card->param_queue_lock); fpga_queue(card, prm.port, skb, NULL); wait_event_timeout(card->param_wq, prm.response, 5 * HZ); spin_lock_irq(&card->param_queue_lock); list_del(&prm.list); spin_unlock_irq(&card->param_queue_lock); skb = prm.response; if (!skb) return -EIO; buflen = skb->len; /* Sometimes it has a newline, sometimes it doesn't. */ if (skb->data[buflen - 1] == '\n') buflen--; if (buflen == 2 && !strncmp(skb->data, "OK", 2)) ret = count; else if (buflen == 5 && !strncmp(skb->data, "ERROR", 5)) ret = -EIO; else { /* We know we have enough space allocated for this; we allocated it ourselves */ skb->data[buflen] = 0; dev_warn(&card->dev->dev, "Unexpected parameter response: '%s'\n", skb->data); ret = -EIO; } kfree_skb(skb); return ret; } static char *next_string(struct sk_buff *skb) { int i = 0; char *this = skb->data; for (i = 0; i < skb->len; i++) { if (this[i] == '\n') { this[i] = 0; skb_pull(skb, i + 1); return this; } if (!isprint(this[i])) return NULL; } return NULL; } /* * Status packet has fields separated by \n, starting with a version number * for the information therein. Fields are.... * * packet version * RxBitRate (version >= 1) * TxBitRate (version >= 1) * State (version >= 1) * LocalSNRMargin (version >= 1) * LocalLineAttn (version >= 1) */ static int process_status(struct solos_card *card, int port, struct sk_buff *skb) { char *str, *end, *state_str, *snr, *attn; int ver, rate_up, rate_down; if (!card->atmdev[port]) return -ENODEV; str = next_string(skb); if (!str) return -EIO; ver = simple_strtol(str, NULL, 10); if (ver < 1) { dev_warn(&card->dev->dev, "Unexpected status interrupt version %d\n", ver); return -EIO; } str = next_string(skb); if (!str) return -EIO; if (!strcmp(str, "ERROR")) { dev_dbg(&card->dev->dev, "Status packet indicated Solos error on port %d (starting up?)\n", port); return 0; } rate_down = simple_strtol(str, &end, 10); if (*end) return -EIO; str = next_string(skb); if (!str) return -EIO; rate_up = simple_strtol(str, &end, 10); if (*end) return -EIO; state_str = next_string(skb); if (!state_str) return -EIO; /* Anything but 'Showtime' is down */ if (strcmp(state_str, "Showtime")) { atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST); dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str); return 0; } snr = next_string(skb); if (!snr) return -EIO; attn = next_string(skb); if (!attn) return -EIO; dev_info(&card->dev->dev, "Port %d: %s @%d/%d kb/s%s%s%s%s\n", port, state_str, rate_down/1000, rate_up/1000, snr[0]?", SNR ":"", snr, attn[0]?", Attn ":"", attn); card->atmdev[port]->link_rate = rate_down / 424; atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_FOUND); return 0; } static int process_command(struct solos_card *card, int port, struct sk_buff *skb) { struct solos_param *prm; unsigned long flags; int cmdpid; int found = 0; if (skb->len < 7) return 0; if (skb->data[0] != 'L' || !isdigit(skb->data[1]) || !isdigit(skb->data[2]) || !isdigit(skb->data[3]) || !isdigit(skb->data[4]) || !isdigit(skb->data[5]) || skb->data[6] != '\n') return 0; cmdpid = simple_strtol(&skb->data[1], NULL, 10); spin_lock_irqsave(&card->param_queue_lock, flags); list_for_each_entry(prm, &card->param_queue, list) { if (prm->port == port && prm->pid == cmdpid) { prm->response = skb; skb_pull(skb, 7); wake_up(&card->param_wq); found = 1; break; } } spin_unlock_irqrestore(&card->param_queue_lock, flags); return found; } static ssize_t console_show(struct device *dev, struct device_attribute *attr, char *buf) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; struct sk_buff *skb; unsigned int len; spin_lock(&card->cli_queue_lock); skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); spin_unlock(&card->cli_queue_lock); if(skb == NULL) return sprintf(buf, "No data.\n"); len = skb->len; memcpy(buf, skb->data, len); dev_dbg(&card->dev->dev, "len: %d\n", len); kfree_skb(skb); return len; } static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) { struct sk_buff *skb; struct pkt_hdr *header; if (size > (BUF_SIZE - sizeof(*header))) { dev_dbg(&card->dev->dev, "Command is too big. Dropping request\n"); return 0; } skb = alloc_skb(size + sizeof(*header), GFP_ATOMIC); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in send_command()\n"); return 0; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(size); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_COMMAND); memcpy(skb_put(skb, size), buf, size); fpga_queue(card, dev, skb, NULL); return 0; } static ssize_t console_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; int err; err = send_command(card, SOLOS_CHAN(atmdev), buf, count); return err?:count; } static DEVICE_ATTR(console, 0644, console_show, console_store); #define SOLOS_ATTR_RO(x) static DEVICE_ATTR(x, 0444, solos_param_show, NULL); #define SOLOS_ATTR_RW(x) static DEVICE_ATTR(x, 0644, solos_param_show, solos_param_store); #include "solos-attrlist.c" #undef SOLOS_ATTR_RO #undef SOLOS_ATTR_RW #define SOLOS_ATTR_RO(x) &dev_attr_##x.attr, #define SOLOS_ATTR_RW(x) &dev_attr_##x.attr, static struct attribute *solos_attrs[] = { #include "solos-attrlist.c" NULL }; static struct attribute_group solos_attr_group = { .attrs = solos_attrs, .name = "parameters", }; static int flash_upgrade(struct solos_card *card, int chip) { const struct firmware *fw; const char *fw_name; int blocksize = 0; int numblocks = 0; int offset; switch (chip) { case 0: fw_name = "solos-FPGA.bin"; blocksize = FPGA_BLOCK; break; case 1: fw_name = "solos-Firmware.bin"; blocksize = SOLOS_BLOCK; break; case 2: if (card->fpga_version > LEGACY_BUFFERS){ fw_name = "solos-db-FPGA.bin"; blocksize = FPGA_BLOCK; } else { dev_info(&card->dev->dev, "FPGA version doesn't support" " daughter board upgrades\n"); return -EPERM; } break; case 3: if (card->fpga_version > LEGACY_BUFFERS){ fw_name = "solos-Firmware.bin"; blocksize = SOLOS_BLOCK; } else { dev_info(&card->dev->dev, "FPGA version doesn't support" " daughter board upgrades\n"); return -EPERM; } break; default: return -ENODEV; } if (request_firmware(&fw, fw_name, &card->dev->dev)) return -ENOENT; dev_info(&card->dev->dev, "Flash upgrade starting\n"); numblocks = fw->size / blocksize; dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size); dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks); dev_info(&card->dev->dev, "Changing FPGA to Update mode\n"); iowrite32(1, card->config_regs + FPGA_MODE); (void) ioread32(card->config_regs + FPGA_MODE); /* Set mode to Chip Erase */ if(chip == 0 || chip == 2) dev_info(&card->dev->dev, "Set FPGA Flash mode to FPGA Chip Erase\n"); if(chip == 1 || chip == 3) dev_info(&card->dev->dev, "Set FPGA Flash mode to Solos Chip Erase\n"); iowrite32((chip * 2), card->config_regs + FLASH_MODE); iowrite32(1, card->config_regs + WRITE_FLASH); wait_event(card->fw_wq, !ioread32(card->config_regs + FLASH_BUSY)); for (offset = 0; offset < fw->size; offset += blocksize) { int i; /* Clear write flag */ iowrite32(0, card->config_regs + WRITE_FLASH); /* Set mode to Block Write */ /* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */ iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE); /* Copy block to buffer, swapping each 16 bits */ for(i = 0; i < blocksize; i += 4) { uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i)); if(card->fpga_version > LEGACY_BUFFERS) iowrite32(word, FLASH_BUF + i); else iowrite32(word, RX_BUF(card, 3) + i); } /* Specify block number and then trigger flash write */ iowrite32(offset / blocksize, card->config_regs + FLASH_BLOCK); iowrite32(1, card->config_regs + WRITE_FLASH); wait_event(card->fw_wq, !ioread32(card->config_regs + FLASH_BUSY)); } release_firmware(fw); iowrite32(0, card->config_regs + WRITE_FLASH); iowrite32(0, card->config_regs + FPGA_MODE); iowrite32(0, card->config_regs + FLASH_MODE); dev_info(&card->dev->dev, "Returning FPGA to Data mode\n"); return 0; } static irqreturn_t solos_irq(int irq, void *dev_id) { struct solos_card *card = dev_id; int handled = 1; iowrite32(0, card->config_regs + IRQ_CLEAR); /* If we're up and running, just kick the tasklet to process TX/RX */ if (card->atmdev[0]) tasklet_schedule(&card->tlet); else wake_up(&card->fw_wq); return IRQ_RETVAL(handled); } void solos_bh(unsigned long card_arg) { struct solos_card *card = (void *)card_arg; uint32_t card_flags; uint32_t rx_done = 0; int port; /* * Since fpga_tx() is going to need to read the flags under its lock, * it can return them to us so that we don't have to hit PCI MMIO * again for the same information */ card_flags = fpga_tx(card); for (port = 0; port < card->nr_ports; port++) { if (card_flags & (0x10 << port)) { struct pkt_hdr _hdr, *header; struct sk_buff *skb; struct atm_vcc *vcc; int size; if (card->using_dma) { skb = card->rx_skb[port]; card->rx_skb[port] = NULL; pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr, RX_DMA_SIZE, PCI_DMA_FROMDEVICE); header = (void *)skb->data; size = le16_to_cpu(header->size); skb_put(skb, size + sizeof(*header)); skb_pull(skb, sizeof(*header)); } else { header = &_hdr; rx_done |= 0x10 << port; memcpy_fromio(header, RX_BUF(card, port), sizeof(*header)); size = le16_to_cpu(header->size); if (size > (card->buffer_size - sizeof(*header))){ dev_warn(&card->dev->dev, "Invalid buffer size\n"); continue; } skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); continue; } memcpy_fromio(skb_put(skb, size), RX_BUF(card, port) + sizeof(*header), size); } if (atmdebug) { dev_info(&card->dev->dev, "Received: port %d\n", port); dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", size, le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); print_buffer(skb); } switch (le16_to_cpu(header->type)) { case PKT_DATA: vcc = find_vcc(card->atmdev[port], le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); if (!vcc) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n", le16_to_cpu(header->vpi), le16_to_cpu(header->vci), port); continue; } atm_charge(vcc, skb->truesize); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); break; case PKT_STATUS: if (process_status(card, port, skb) && net_ratelimit()) { dev_warn(&card->dev->dev, "Bad status packet of %d bytes on port %d:\n", skb->len, port); print_buffer(skb); } dev_kfree_skb_any(skb); break; case PKT_COMMAND: default: /* FIXME: Not really, surely? */ if (process_command(card, port, skb)) break; spin_lock(&card->cli_queue_lock); if (skb_queue_len(&card->cli_queue[port]) > 10) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Dropping console response on port %d\n", port); dev_kfree_skb_any(skb); } else skb_queue_tail(&card->cli_queue[port], skb); spin_unlock(&card->cli_queue_lock); break; } } /* Allocate RX skbs for any ports which need them */ if (card->using_dma && card->atmdev[port] && !card->rx_skb[port]) { struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); if (skb) { SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, RX_DMA_SIZE, PCI_DMA_FROMDEVICE); iowrite32(SKB_CB(skb)->dma_addr, card->config_regs + RX_DMA_ADDR(port)); card->rx_skb[port] = skb; } else { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate RX skb"); /* We'll have to try again later */ tasklet_schedule(&card->tlet); } } } if (rx_done) iowrite32(rx_done, card->config_regs + FLAGS_ADDR); return; } static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci) { struct hlist_head *head; struct atm_vcc *vcc = NULL; struct hlist_node *node; struct sock *s; read_lock(&vcc_sklist_lock); head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; sk_for_each(s, node, head) { vcc = atm_sk(s); if (vcc->dev == dev && vcc->vci == vci && vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && test_bit(ATM_VF_READY, &vcc->flags)) goto out; } vcc = NULL; out: read_unlock(&vcc_sklist_lock); return vcc; } static int list_vccs(int vci) { struct hlist_head *head; struct atm_vcc *vcc; struct hlist_node *node; struct sock *s; int num_found = 0; int i; read_lock(&vcc_sklist_lock); if (vci != 0){ head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; sk_for_each(s, node, head) { num_found ++; vcc = atm_sk(s); printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n", vcc->dev->number, vcc->vpi, vcc->vci); } } else { for(i = 0; i < VCC_HTABLE_SIZE; i++){ head = &vcc_hash[i]; sk_for_each(s, node, head) { num_found ++; vcc = atm_sk(s); printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n", vcc->dev->number, vcc->vpi, vcc->vci); } } } read_unlock(&vcc_sklist_lock); return num_found; } static int popen(struct atm_vcc *vcc) { struct solos_card *card = vcc->dev->dev_data; struct sk_buff *skb; struct pkt_hdr *header; if (vcc->qos.aal != ATM_AAL5) { dev_warn(&card->dev->dev, "Unsupported ATM type %d\n", vcc->qos.aal); return -EINVAL; } skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(0); header->vpi = cpu_to_le16(vcc->vpi); header->vci = cpu_to_le16(vcc->vci); header->type = cpu_to_le16(PKT_POPEN); fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL); set_bit(ATM_VF_ADDR, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); list_vccs(0); return 0; } static void pclose(struct atm_vcc *vcc) { struct solos_card *card = vcc->dev->dev_data; struct sk_buff *skb; struct pkt_hdr *header; skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n"); return; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(0); header->vpi = cpu_to_le16(vcc->vpi); header->vci = cpu_to_le16(vcc->vci); header->type = cpu_to_le16(PKT_PCLOSE); fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL); clear_bit(ATM_VF_ADDR, &vcc->flags); clear_bit(ATM_VF_READY, &vcc->flags); /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the tasklet has finished processing any incoming packets (and, more to the point, using the vcc pointer). */ tasklet_unlock_wait(&card->tlet); return; } static int print_buffer(struct sk_buff *buf) { int len,i; char msg[500]; char item[10]; len = buf->len; for (i = 0; i < len; i++){ if(i % 8 == 0) sprintf(msg, "%02X: ", i); sprintf(item,"%02X ",*(buf->data + i)); strcat(msg, item); if(i % 8 == 7) { sprintf(item, "\n"); strcat(msg, item); printk(KERN_DEBUG "%s", msg); } } if (i % 8 != 0) { sprintf(item, "\n"); strcat(msg, item); printk(KERN_DEBUG "%s", msg); } printk(KERN_DEBUG "\n"); return 0; } static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb, struct atm_vcc *vcc) { int old_len; unsigned long flags; SKB_CB(skb)->vcc = vcc; spin_lock_irqsave(&card->tx_queue_lock, flags); old_len = skb_queue_len(&card->tx_queue[port]); skb_queue_tail(&card->tx_queue[port], skb); if (!old_len) card->tx_mask |= (1 << port); spin_unlock_irqrestore(&card->tx_queue_lock, flags); /* Theoretically we could just schedule the tasklet here, but that introduces latency we don't want -- it's noticeable */ if (!old_len) fpga_tx(card); } static uint32_t fpga_tx(struct solos_card *card) { uint32_t tx_pending, card_flags; uint32_t tx_started = 0; struct sk_buff *skb; struct atm_vcc *vcc; unsigned char port; unsigned long flags; spin_lock_irqsave(&card->tx_lock, flags); card_flags = ioread32(card->config_regs + FLAGS_ADDR); /* * The queue lock is required for _writing_ to tx_mask, but we're * OK to read it here without locking. The only potential update * that we could race with is in fpga_queue() where it sets a bit * for a new port... but it's going to call this function again if * it's doing that, anyway. */ tx_pending = card->tx_mask & ~card_flags; for (port = 0; tx_pending; tx_pending >>= 1, port++) { if (tx_pending & 1) { struct sk_buff *oldskb = card->tx_skb[port]; if (oldskb) { pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr, oldskb->len, PCI_DMA_TODEVICE); card->tx_skb[port] = NULL; } spin_lock(&card->tx_queue_lock); skb = skb_dequeue(&card->tx_queue[port]); if (!skb) card->tx_mask &= ~(1 << port); spin_unlock(&card->tx_queue_lock); if (skb && !card->using_dma) { memcpy_toio(TX_BUF(card, port), skb->data, skb->len); tx_started |= 1 << port; oldskb = skb; /* We're done with this skb already */ } else if (skb && card->using_dma) { SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, skb->len, PCI_DMA_TODEVICE); card->tx_skb[port] = skb; iowrite32(SKB_CB(skb)->dma_addr, card->config_regs + TX_DMA_ADDR(port)); } if (!oldskb) continue; /* Clean up and free oldskb now it's gone */ if (atmdebug) { struct pkt_hdr *header = (void *)oldskb->data; int size = le16_to_cpu(header->size); skb_pull(oldskb, sizeof(*header)); dev_info(&card->dev->dev, "Transmitted: port %d\n", port); dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", size, le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); print_buffer(oldskb); } vcc = SKB_CB(oldskb)->vcc; if (vcc) { atomic_inc(&vcc->stats->tx); solos_pop(vcc, oldskb); } else dev_kfree_skb_irq(oldskb); } } /* For non-DMA TX, write the 'TX start' bit for all four ports simultaneously */ if (tx_started) iowrite32(tx_started, card->config_regs + FLAGS_ADDR); spin_unlock_irqrestore(&card->tx_lock, flags); return card_flags; } static int psend(struct atm_vcc *vcc, struct sk_buff *skb) { struct solos_card *card = vcc->dev->dev_data; struct pkt_hdr *header; int pktlen; pktlen = skb->len; if (pktlen > (BUF_SIZE - sizeof(*header))) { dev_warn(&card->dev->dev, "Length of PDU is too large. Dropping PDU.\n"); solos_pop(vcc, skb); return 0; } if (!skb_clone_writable(skb, sizeof(*header))) { int expand_by = 0; int ret; if (skb_headroom(skb) < sizeof(*header)) expand_by = sizeof(*header) - skb_headroom(skb); ret = pskb_expand_head(skb, expand_by, 0, GFP_ATOMIC); if (ret) { dev_warn(&card->dev->dev, "pskb_expand_head failed.\n"); solos_pop(vcc, skb); return ret; } } header = (void *)skb_push(skb, sizeof(*header)); /* This does _not_ include the size of the header */ header->size = cpu_to_le16(pktlen); header->vpi = cpu_to_le16(vcc->vpi); header->vci = cpu_to_le16(vcc->vci); header->type = cpu_to_le16(PKT_DATA); fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, vcc); return 0; } static struct atmdev_ops fpga_ops = { .open = popen, .close = pclose, .ioctl = NULL, .getsockopt = NULL, .setsockopt = NULL, .send = psend, .send_oam = NULL, .phy_put = NULL, .phy_get = NULL, .change_qos = NULL, .proc_read = NULL, .owner = THIS_MODULE }; static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; uint16_t fpga_ver; uint8_t major_ver, minor_ver; uint32_t data32; struct solos_card *card; card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->dev = dev; init_waitqueue_head(&card->fw_wq); init_waitqueue_head(&card->param_wq); err = pci_enable_device(dev); if (err) { dev_warn(&dev->dev, "Failed to enable PCI device\n"); goto out; } err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); if (err) { dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n"); goto out; } err = pci_request_regions(dev, "solos"); if (err) { dev_warn(&dev->dev, "Failed to request regions\n"); goto out; } card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); if (!card->config_regs) { dev_warn(&dev->dev, "Failed to ioremap config registers\n"); goto out_release_regions; } card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); if (!card->buffers) { dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); goto out_unmap_config; } if (reset) { iowrite32(1, card->config_regs + FPGA_MODE); data32 = ioread32(card->config_regs + FPGA_MODE); iowrite32(0, card->config_regs + FPGA_MODE); data32 = ioread32(card->config_regs + FPGA_MODE); } data32 = ioread32(card->config_regs + FPGA_VER); fpga_ver = (data32 & 0x0000FFFF); major_ver = ((data32 & 0xFF000000) >> 24); minor_ver = ((data32 & 0x00FF0000) >> 16); card->fpga_version = FPGA_VERSION(major_ver,minor_ver); if (card->fpga_version > LEGACY_BUFFERS) card->buffer_size = BUF_SIZE; else card->buffer_size = OLD_BUF_SIZE; dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", major_ver, minor_ver, fpga_ver); if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade || db_fpga_upgrade || db_firmware_upgrade)) { dev_warn(&dev->dev, "FPGA too old; cannot upgrade flash. Use JTAG.\n"); fpga_upgrade = firmware_upgrade = 0; db_fpga_upgrade = db_firmware_upgrade = 0; } if (card->fpga_version >= DMA_SUPPORTED) { pci_set_master(dev); card->using_dma = 1; } else { card->using_dma = 0; /* Set RX empty flag for all ports */ iowrite32(0xF0, card->config_regs + FLAGS_ADDR); } data32 = ioread32(card->config_regs + PORTS); card->nr_ports = (data32 & 0x000000FF); pci_set_drvdata(dev, card); tasklet_init(&card->tlet, solos_bh, (unsigned long)card); spin_lock_init(&card->tx_lock); spin_lock_init(&card->tx_queue_lock); spin_lock_init(&card->cli_queue_lock); spin_lock_init(&card->param_queue_lock); INIT_LIST_HEAD(&card->param_queue); err = request_irq(dev->irq, solos_irq, IRQF_SHARED, "solos-pci", card); if (err) { dev_dbg(&card->dev->dev, "Failed to request interrupt IRQ: %d\n", dev->irq); goto out_unmap_both; } iowrite32(1, card->config_regs + IRQ_EN_ADDR); if (fpga_upgrade) flash_upgrade(card, 0); if (firmware_upgrade) flash_upgrade(card, 1); if (db_fpga_upgrade) flash_upgrade(card, 2); if (db_firmware_upgrade) flash_upgrade(card, 3); err = atm_init(card, &dev->dev); if (err) goto out_free_irq; return 0; out_free_irq: iowrite32(0, card->config_regs + IRQ_EN_ADDR); free_irq(dev->irq, card); tasklet_kill(&card->tlet); out_unmap_both: pci_set_drvdata(dev, NULL); pci_iounmap(dev, card->buffers); out_unmap_config: pci_iounmap(dev, card->config_regs); out_release_regions: pci_release_regions(dev); out: kfree(card); return err; } static int atm_init(struct solos_card *card, struct device *parent) { int i; for (i = 0; i < card->nr_ports; i++) { struct sk_buff *skb; struct pkt_hdr *header; skb_queue_head_init(&card->tx_queue[i]); skb_queue_head_init(&card->cli_queue[i]); card->atmdev[i] = atm_dev_register("solos-pci", parent, &fpga_ops, -1, NULL); if (!card->atmdev[i]) { dev_err(&card->dev->dev, "Could not register ATM device %d\n", i); atm_remove(card); return -ENODEV; } if (device_create_file(&card->atmdev[i]->class_dev, &dev_attr_console)) dev_err(&card->dev->dev, "Could not register console for ATM device %d\n", i); if (sysfs_create_group(&card->atmdev[i]->class_dev.kobj, &solos_attr_group)) dev_err(&card->dev->dev, "Could not register parameter group for ATM device %d\n", i); dev_info(&card->dev->dev, "Registered ATM device %d\n", card->atmdev[i]->number); card->atmdev[i]->ci_range.vpi_bits = 8; card->atmdev[i]->ci_range.vci_bits = 16; card->atmdev[i]->dev_data = card; card->atmdev[i]->phy_data = (void *)(unsigned long)i; atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND); skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in atm_init()\n"); continue; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(0); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_STATUS); fpga_queue(card, i, skb, NULL); } return 0; } static void atm_remove(struct solos_card *card) { int i; for (i = 0; i < card->nr_ports; i++) { if (card->atmdev[i]) { struct sk_buff *skb; dev_info(&card->dev->dev, "Unregistering ATM device %d\n", card->atmdev[i]->number); sysfs_remove_group(&card->atmdev[i]->class_dev.kobj, &solos_attr_group); atm_dev_deregister(card->atmdev[i]); skb = card->rx_skb[i]; if (skb) { pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr, RX_DMA_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); } skb = card->tx_skb[i]; if (skb) { pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); } while ((skb = skb_dequeue(&card->tx_queue[i]))) dev_kfree_skb(skb); } } } static void fpga_remove(struct pci_dev *dev) { struct solos_card *card = pci_get_drvdata(dev); /* Disable IRQs */ iowrite32(0, card->config_regs + IRQ_EN_ADDR); /* Reset FPGA */ iowrite32(1, card->config_regs + FPGA_MODE); (void)ioread32(card->config_regs + FPGA_MODE); atm_remove(card); free_irq(dev->irq, card); tasklet_kill(&card->tlet); /* Release device from reset */ iowrite32(0, card->config_regs + FPGA_MODE); (void)ioread32(card->config_regs + FPGA_MODE); pci_iounmap(dev, card->buffers); pci_iounmap(dev, card->config_regs); pci_release_regions(dev); pci_disable_device(dev); pci_set_drvdata(dev, NULL); kfree(card); } static struct pci_device_id fpga_pci_tbl[] __devinitdata = { { 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci,fpga_pci_tbl); static struct pci_driver fpga_driver = { .name = "solos", .id_table = fpga_pci_tbl, .probe = fpga_probe, .remove = fpga_remove, }; static int __init solos_pci_init(void) { printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION); return pci_register_driver(&fpga_driver); } static void __exit solos_pci_exit(void) { pci_unregister_driver(&fpga_driver); printk(KERN_INFO "Solos PCI Driver %s Unloaded\n", VERSION); } module_init(solos_pci_init); module_exit(solos_pci_exit);
gpl-2.0
htc-mirror/shooteru-ics-crc-3.0.16-e733189
tools/perf/builtin-probe.c
2546
11159
/* * builtin-probe.c * * Builtin probe command: Set up probe events by C expression * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #define _GNU_SOURCE #include <sys/utsname.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #undef _GNU_SOURCE #include "perf.h" #include "builtin.h" #include "util/util.h" #include "util/strlist.h" #include "util/strfilter.h" #include "util/symbol.h" #include "util/debug.h" #include "util/debugfs.h" #include "util/parse-options.h" #include "util/probe-finder.h" #include "util/probe-event.h" #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*" #define DEFAULT_FUNC_FILTER "!_*" #define MAX_PATH_LEN 256 /* Session management structure */ static struct { bool list_events; bool force_add; bool show_lines; bool show_vars; bool show_ext_vars; bool show_funcs; bool mod_events; int nevents; struct perf_probe_event events[MAX_PROBES]; struct strlist *dellist; struct line_range line_range; const char *target_module; int max_probe_points; struct strfilter *filter; } params; /* Parse an event definition. Note that any error must die. */ static int parse_probe_event(const char *str) { struct perf_probe_event *pev = &params.events[params.nevents]; int ret; pr_debug("probe-definition(%d): %s\n", params.nevents, str); if (++params.nevents == MAX_PROBES) { pr_err("Too many probes (> %d) were specified.", MAX_PROBES); return -1; } /* Parse a perf-probe command into event */ ret = parse_perf_probe_command(str, pev); pr_debug("%d arguments\n", pev->nargs); return ret; } static int parse_probe_event_argv(int argc, const char **argv) { int i, len, ret; char *buf; /* Bind up rest arguments */ len = 0; for (i = 0; i < argc; i++) len += strlen(argv[i]) + 1; buf = zalloc(len + 1); if (buf == NULL) return -ENOMEM; len = 0; for (i = 0; i < argc; i++) len += sprintf(&buf[len], "%s ", argv[i]); params.mod_events = true; ret = parse_probe_event(buf); free(buf); return ret; } static int opt_add_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) { params.mod_events = true; return parse_probe_event(str); } else return 0; } static int opt_del_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) { params.mod_events = true; if (!params.dellist) params.dellist = strlist__new(true, NULL); strlist__add(params.dellist, str); } return 0; } #ifdef DWARF_SUPPORT static int opt_show_lines(const struct option *opt __used, const char *str, int unset __used) { int ret = 0; if (str) ret = parse_line_range_desc(str, &params.line_range); INIT_LIST_HEAD(&params.line_range.line_list); params.show_lines = true; return ret; } static int opt_show_vars(const struct option *opt __used, const char *str, int unset __used) { struct perf_probe_event *pev = &params.events[params.nevents]; int ret; if (!str) return 0; ret = parse_probe_event(str); if (!ret && pev->nargs != 0) { pr_err(" Error: '--vars' doesn't accept arguments.\n"); return -EINVAL; } params.show_vars = true; return ret; } #endif static int opt_set_filter(const struct option *opt __used, const char *str, int unset __used) { const char *err; if (str) { pr_debug2("Set filter: %s\n", str); if (params.filter) strfilter__delete(params.filter); params.filter = strfilter__new(str, &err); if (!params.filter) { pr_err("Filter parse error at %td.\n", err - str + 1); pr_err("Source: \"%s\"\n", str); pr_err(" %*c\n", (int)(err - str + 1), '^'); return -EINVAL; } } return 0; } static const char * const probe_usage[] = { "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]", "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", "perf probe [<options>] --del '[GROUP:]EVENT' ...", "perf probe --list", #ifdef DWARF_SUPPORT "perf probe [<options>] --line 'LINEDESC'", "perf probe [<options>] --vars 'PROBEPOINT'", #endif NULL }; static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show parsed arguments, etc)"), OPT_BOOLEAN('l', "list", &params.list_events, "list up current probe events"), OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", opt_del_probe_event), OPT_CALLBACK('a', "add", NULL, #ifdef DWARF_SUPPORT "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT" " [[NAME=]ARG ...]", #else "[EVENT=]FUNC[+OFF|%return] [[NAME=]ARG ...]", #endif "probe point definition, where\n" "\t\tGROUP:\tGroup name (optional)\n" "\t\tEVENT:\tEvent name\n" "\t\tFUNC:\tFunction name\n" "\t\tOFF:\tOffset from function entry (in byte)\n" "\t\t%return:\tPut the probe at function return\n" #ifdef DWARF_SUPPORT "\t\tSRC:\tSource code path\n" "\t\tRL:\tRelative line number from function entry.\n" "\t\tAL:\tAbsolute line number in file.\n" "\t\tPT:\tLazy expression of line code.\n" "\t\tARG:\tProbe argument (local variable name or\n" "\t\t\tkprobe-tracer argument format.)\n", #else "\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n", #endif opt_add_probe_event), OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events" " with existing name"), #ifdef DWARF_SUPPORT OPT_CALLBACK('L', "line", NULL, "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]", "Show source code lines.", opt_show_lines), OPT_CALLBACK('V', "vars", NULL, "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT", "Show accessible variables on PROBEDEF", opt_show_vars), OPT_BOOLEAN('\0', "externs", &params.show_ext_vars, "Show external variables too (with --vars only)"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_STRING('s', "source", &symbol_conf.source_prefix, "directory", "path to kernel source"), OPT_STRING('m', "module", &params.target_module, "modname", "target module name"), #endif OPT__DRY_RUN(&probe_event_dry_run), OPT_INTEGER('\0', "max-probes", &params.max_probe_points, "Set how many probe points can be found for a probe."), OPT_BOOLEAN('F', "funcs", &params.show_funcs, "Show potential probe-able functions."), OPT_CALLBACK('\0', "filter", NULL, "[!]FILTER", "Set a filter (with --vars/funcs only)\n" "\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n" "\t\t\t \"" DEFAULT_FUNC_FILTER "\" for --funcs)", opt_set_filter), OPT_END() }; int cmd_probe(int argc, const char **argv, const char *prefix __used) { int ret; argc = parse_options(argc, argv, options, probe_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (argc > 0) { if (strcmp(argv[0], "-") == 0) { pr_warning(" Error: '-' is not supported.\n"); usage_with_options(probe_usage, options); } ret = parse_probe_event_argv(argc, argv); if (ret < 0) { pr_err(" Error: Parse Error. (%d)\n", ret); return ret; } } if (params.max_probe_points == 0) params.max_probe_points = MAX_PROBES; if ((!params.nevents && !params.dellist && !params.list_events && !params.show_lines && !params.show_funcs)) usage_with_options(probe_usage, options); /* * Only consider the user's kernel image path if given. */ symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); if (params.list_events) { if (params.mod_events) { pr_err(" Error: Don't use --list with --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_lines) { pr_err(" Error: Don't use --list with --line.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --list with --vars.\n"); usage_with_options(probe_usage, options); } if (params.show_funcs) { pr_err(" Error: Don't use --list with --funcs.\n"); usage_with_options(probe_usage, options); } ret = show_perf_probe_events(); if (ret < 0) pr_err(" Error: Failed to show event list. (%d)\n", ret); return ret; } if (params.show_funcs) { if (params.nevents != 0 || params.dellist) { pr_err(" Error: Don't use --funcs with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_lines) { pr_err(" Error: Don't use --funcs with --line.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --funcs with --vars.\n"); usage_with_options(probe_usage, options); } if (!params.filter) params.filter = strfilter__new(DEFAULT_FUNC_FILTER, NULL); ret = show_available_funcs(params.target_module, params.filter); strfilter__delete(params.filter); if (ret < 0) pr_err(" Error: Failed to show functions." " (%d)\n", ret); return ret; } #ifdef DWARF_SUPPORT if (params.show_lines) { if (params.mod_events) { pr_err(" Error: Don't use --line with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --line with --vars.\n"); usage_with_options(probe_usage, options); } ret = show_line_range(&params.line_range, params.target_module); if (ret < 0) pr_err(" Error: Failed to show lines. (%d)\n", ret); return ret; } if (params.show_vars) { if (params.mod_events) { pr_err(" Error: Don't use --vars with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (!params.filter) params.filter = strfilter__new(DEFAULT_VAR_FILTER, NULL); ret = show_available_vars(params.events, params.nevents, params.max_probe_points, params.target_module, params.filter, params.show_ext_vars); strfilter__delete(params.filter); if (ret < 0) pr_err(" Error: Failed to show vars. (%d)\n", ret); return ret; } #endif if (params.dellist) { ret = del_perf_probe_events(params.dellist); strlist__delete(params.dellist); if (ret < 0) { pr_err(" Error: Failed to delete events. (%d)\n", ret); return ret; } } if (params.nevents) { ret = add_perf_probe_events(params.events, params.nevents, params.max_probe_points, params.target_module, params.force_add); if (ret < 0) { pr_err(" Error: Failed to add events. (%d)\n", ret); return ret; } } return 0; }
gpl-2.0
high1/android_kernel_htc_golfu_reboot_old
tools/perf/builtin-probe.c
2546
11159
/* * builtin-probe.c * * Builtin probe command: Set up probe events by C expression * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #define _GNU_SOURCE #include <sys/utsname.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #undef _GNU_SOURCE #include "perf.h" #include "builtin.h" #include "util/util.h" #include "util/strlist.h" #include "util/strfilter.h" #include "util/symbol.h" #include "util/debug.h" #include "util/debugfs.h" #include "util/parse-options.h" #include "util/probe-finder.h" #include "util/probe-event.h" #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*" #define DEFAULT_FUNC_FILTER "!_*" #define MAX_PATH_LEN 256 /* Session management structure */ static struct { bool list_events; bool force_add; bool show_lines; bool show_vars; bool show_ext_vars; bool show_funcs; bool mod_events; int nevents; struct perf_probe_event events[MAX_PROBES]; struct strlist *dellist; struct line_range line_range; const char *target_module; int max_probe_points; struct strfilter *filter; } params; /* Parse an event definition. Note that any error must die. */ static int parse_probe_event(const char *str) { struct perf_probe_event *pev = &params.events[params.nevents]; int ret; pr_debug("probe-definition(%d): %s\n", params.nevents, str); if (++params.nevents == MAX_PROBES) { pr_err("Too many probes (> %d) were specified.", MAX_PROBES); return -1; } /* Parse a perf-probe command into event */ ret = parse_perf_probe_command(str, pev); pr_debug("%d arguments\n", pev->nargs); return ret; } static int parse_probe_event_argv(int argc, const char **argv) { int i, len, ret; char *buf; /* Bind up rest arguments */ len = 0; for (i = 0; i < argc; i++) len += strlen(argv[i]) + 1; buf = zalloc(len + 1); if (buf == NULL) return -ENOMEM; len = 0; for (i = 0; i < argc; i++) len += sprintf(&buf[len], "%s ", argv[i]); params.mod_events = true; ret = parse_probe_event(buf); free(buf); return ret; } static int opt_add_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) { params.mod_events = true; return parse_probe_event(str); } else return 0; } static int opt_del_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) { params.mod_events = true; if (!params.dellist) params.dellist = strlist__new(true, NULL); strlist__add(params.dellist, str); } return 0; } #ifdef DWARF_SUPPORT static int opt_show_lines(const struct option *opt __used, const char *str, int unset __used) { int ret = 0; if (str) ret = parse_line_range_desc(str, &params.line_range); INIT_LIST_HEAD(&params.line_range.line_list); params.show_lines = true; return ret; } static int opt_show_vars(const struct option *opt __used, const char *str, int unset __used) { struct perf_probe_event *pev = &params.events[params.nevents]; int ret; if (!str) return 0; ret = parse_probe_event(str); if (!ret && pev->nargs != 0) { pr_err(" Error: '--vars' doesn't accept arguments.\n"); return -EINVAL; } params.show_vars = true; return ret; } #endif static int opt_set_filter(const struct option *opt __used, const char *str, int unset __used) { const char *err; if (str) { pr_debug2("Set filter: %s\n", str); if (params.filter) strfilter__delete(params.filter); params.filter = strfilter__new(str, &err); if (!params.filter) { pr_err("Filter parse error at %td.\n", err - str + 1); pr_err("Source: \"%s\"\n", str); pr_err(" %*c\n", (int)(err - str + 1), '^'); return -EINVAL; } } return 0; } static const char * const probe_usage[] = { "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]", "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", "perf probe [<options>] --del '[GROUP:]EVENT' ...", "perf probe --list", #ifdef DWARF_SUPPORT "perf probe [<options>] --line 'LINEDESC'", "perf probe [<options>] --vars 'PROBEPOINT'", #endif NULL }; static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show parsed arguments, etc)"), OPT_BOOLEAN('l', "list", &params.list_events, "list up current probe events"), OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", opt_del_probe_event), OPT_CALLBACK('a', "add", NULL, #ifdef DWARF_SUPPORT "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT" " [[NAME=]ARG ...]", #else "[EVENT=]FUNC[+OFF|%return] [[NAME=]ARG ...]", #endif "probe point definition, where\n" "\t\tGROUP:\tGroup name (optional)\n" "\t\tEVENT:\tEvent name\n" "\t\tFUNC:\tFunction name\n" "\t\tOFF:\tOffset from function entry (in byte)\n" "\t\t%return:\tPut the probe at function return\n" #ifdef DWARF_SUPPORT "\t\tSRC:\tSource code path\n" "\t\tRL:\tRelative line number from function entry.\n" "\t\tAL:\tAbsolute line number in file.\n" "\t\tPT:\tLazy expression of line code.\n" "\t\tARG:\tProbe argument (local variable name or\n" "\t\t\tkprobe-tracer argument format.)\n", #else "\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n", #endif opt_add_probe_event), OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events" " with existing name"), #ifdef DWARF_SUPPORT OPT_CALLBACK('L', "line", NULL, "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]", "Show source code lines.", opt_show_lines), OPT_CALLBACK('V', "vars", NULL, "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT", "Show accessible variables on PROBEDEF", opt_show_vars), OPT_BOOLEAN('\0', "externs", &params.show_ext_vars, "Show external variables too (with --vars only)"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_STRING('s', "source", &symbol_conf.source_prefix, "directory", "path to kernel source"), OPT_STRING('m', "module", &params.target_module, "modname", "target module name"), #endif OPT__DRY_RUN(&probe_event_dry_run), OPT_INTEGER('\0', "max-probes", &params.max_probe_points, "Set how many probe points can be found for a probe."), OPT_BOOLEAN('F', "funcs", &params.show_funcs, "Show potential probe-able functions."), OPT_CALLBACK('\0', "filter", NULL, "[!]FILTER", "Set a filter (with --vars/funcs only)\n" "\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n" "\t\t\t \"" DEFAULT_FUNC_FILTER "\" for --funcs)", opt_set_filter), OPT_END() }; int cmd_probe(int argc, const char **argv, const char *prefix __used) { int ret; argc = parse_options(argc, argv, options, probe_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (argc > 0) { if (strcmp(argv[0], "-") == 0) { pr_warning(" Error: '-' is not supported.\n"); usage_with_options(probe_usage, options); } ret = parse_probe_event_argv(argc, argv); if (ret < 0) { pr_err(" Error: Parse Error. (%d)\n", ret); return ret; } } if (params.max_probe_points == 0) params.max_probe_points = MAX_PROBES; if ((!params.nevents && !params.dellist && !params.list_events && !params.show_lines && !params.show_funcs)) usage_with_options(probe_usage, options); /* * Only consider the user's kernel image path if given. */ symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); if (params.list_events) { if (params.mod_events) { pr_err(" Error: Don't use --list with --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_lines) { pr_err(" Error: Don't use --list with --line.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --list with --vars.\n"); usage_with_options(probe_usage, options); } if (params.show_funcs) { pr_err(" Error: Don't use --list with --funcs.\n"); usage_with_options(probe_usage, options); } ret = show_perf_probe_events(); if (ret < 0) pr_err(" Error: Failed to show event list. (%d)\n", ret); return ret; } if (params.show_funcs) { if (params.nevents != 0 || params.dellist) { pr_err(" Error: Don't use --funcs with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_lines) { pr_err(" Error: Don't use --funcs with --line.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --funcs with --vars.\n"); usage_with_options(probe_usage, options); } if (!params.filter) params.filter = strfilter__new(DEFAULT_FUNC_FILTER, NULL); ret = show_available_funcs(params.target_module, params.filter); strfilter__delete(params.filter); if (ret < 0) pr_err(" Error: Failed to show functions." " (%d)\n", ret); return ret; } #ifdef DWARF_SUPPORT if (params.show_lines) { if (params.mod_events) { pr_err(" Error: Don't use --line with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --line with --vars.\n"); usage_with_options(probe_usage, options); } ret = show_line_range(&params.line_range, params.target_module); if (ret < 0) pr_err(" Error: Failed to show lines. (%d)\n", ret); return ret; } if (params.show_vars) { if (params.mod_events) { pr_err(" Error: Don't use --vars with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (!params.filter) params.filter = strfilter__new(DEFAULT_VAR_FILTER, NULL); ret = show_available_vars(params.events, params.nevents, params.max_probe_points, params.target_module, params.filter, params.show_ext_vars); strfilter__delete(params.filter); if (ret < 0) pr_err(" Error: Failed to show vars. (%d)\n", ret); return ret; } #endif if (params.dellist) { ret = del_perf_probe_events(params.dellist); strlist__delete(params.dellist); if (ret < 0) { pr_err(" Error: Failed to delete events. (%d)\n", ret); return ret; } } if (params.nevents) { ret = add_perf_probe_events(params.events, params.nevents, params.max_probe_points, params.target_module, params.force_add); if (ret < 0) { pr_err(" Error: Failed to add events. (%d)\n", ret); return ret; } } return 0; }
gpl-2.0
SebastianFM/HTC-Rezound-overclocked-kernel
drivers/net/wireless/ath/ath5k/rfkill.c
4082
4100
/* * RFKILL support for ath5k * * Copyright (c) 2009 Tobias Doerffel <tobias.doerffel@gmail.com> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include "base.h" static inline void ath5k_rfkill_disable(struct ath5k_softc *sc) { ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n", sc->rf_kill.gpio, sc->rf_kill.polarity); ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio); ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity); } static inline void ath5k_rfkill_enable(struct ath5k_softc *sc) { ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n", sc->rf_kill.gpio, sc->rf_kill.polarity); ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio); ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity); } static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable) { struct ath5k_hw *ah = sc->ah; u32 curval; ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio); curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio); ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ? !!curval : !curval); } static bool ath5k_is_rfkill_set(struct ath5k_softc *sc) { /* configuring GPIO for input for some reason disables rfkill */ /*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/ return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) == sc->rf_kill.polarity; } static void ath5k_tasklet_rfkill_toggle(unsigned long data) { struct ath5k_softc *sc = (void *)data; bool blocked; blocked = ath5k_is_rfkill_set(sc); wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked); } void ath5k_rfkill_hw_start(struct ath5k_hw *ah) { struct ath5k_softc *sc = ah->ah_sc; /* read rfkill GPIO configuration from EEPROM header */ sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin; sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol; tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle, (unsigned long)sc); ath5k_rfkill_disable(sc); /* enable interrupt for rfkill switch */ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) ath5k_rfkill_set_intr(sc, true); } void ath5k_rfkill_hw_stop(struct ath5k_hw *ah) { struct ath5k_softc *sc = ah->ah_sc; /* disable interrupt for rfkill switch */ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) ath5k_rfkill_set_intr(sc, false); tasklet_kill(&sc->rf_kill.toggleq); /* enable RFKILL when stopping HW so Wifi LED is turned off */ ath5k_rfkill_enable(sc); }
gpl-2.0
dhiru1602/android_kernel_samsung_jf
drivers/hwmon/max1668.c
4082
14381
/* * Copyright (c) 2011 David George <david.george@ska.ac.za> * * based on adm1021.c * some credit to Christoph Scheurer, but largely a rewrite * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* Addresses to scan */ static unsigned short max1668_addr_list[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; /* max1668 registers */ #define MAX1668_REG_TEMP(nr) (nr) #define MAX1668_REG_STAT1 0x05 #define MAX1668_REG_STAT2 0x06 #define MAX1668_REG_MAN_ID 0xfe #define MAX1668_REG_DEV_ID 0xff /* limits */ /* write high limits */ #define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr)) /* write low limits */ #define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr)) /* read high limits */ #define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr)) /* read low limits */ #define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr)) /* manufacturer and device ID Constants */ #define MAN_ID_MAXIM 0x4d #define DEV_ID_MAX1668 0x3 #define DEV_ID_MAX1805 0x5 #define DEV_ID_MAX1989 0xb /* read only mode module parameter */ static bool read_only; module_param(read_only, bool, 0); MODULE_PARM_DESC(read_only, "Don't set any values, read only mode"); enum chips { max1668, max1805, max1989 }; struct max1668_data { struct device *hwmon_dev; enum chips type; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* 1x local and 4x remote */ s8 temp_max[5]; s8 temp_min[5]; s8 temp[5]; u16 alarms; }; static struct max1668_data *max1668_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct max1668_data *data = i2c_get_clientdata(client); struct max1668_data *ret = data; s32 val; int i; mutex_lock(&data->update_lock); if (data->valid && !time_after(jiffies, data->last_updated + HZ + HZ / 2)) goto abort; for (i = 0; i < 5; i++) { val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp[i] = (s8) val; val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_max[i] = (s8) val; val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i)); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->temp_min[i] = (s8) val; } val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->alarms = val << 8; val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->alarms |= val; data->last_updated = jiffies; data->valid = 1; abort: mutex_unlock(&data->update_lock); return ret; } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", data->temp[index] * 1000); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", data->temp_max[index] * 1000); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%d\n", data->temp_min[index] * 1000); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1); } static ssize_t show_fault(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = max1668_update_device(dev); if (IS_ERR(data)) return PTR_ERR(data); return sprintf(buf, "%u\n", (data->alarms & (1 << 12)) && data->temp[index] == 127); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct i2c_client *client = to_i2c_client(dev); struct max1668_data *data = i2c_get_clientdata(client); long temp; int ret; ret = kstrtol(buf, 10, &temp); if (ret < 0) return ret; mutex_lock(&data->update_lock); data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127); if (i2c_smbus_write_byte_data(client, MAX1668_REG_LIMH_WR(index), data->temp_max[index])) count = -EIO; mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct i2c_client *client = to_i2c_client(dev); struct max1668_data *data = i2c_get_clientdata(client); long temp; int ret; ret = kstrtol(buf, 10, &temp); if (ret < 0) return ret; mutex_lock(&data->update_lock); data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127); if (i2c_smbus_write_byte_data(client, MAX1668_REG_LIML_WR(index), data->temp_max[index])) count = -EIO; mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, set_temp_max, 0); static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, set_temp_min, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, set_temp_max, 1); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min, set_temp_min, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, set_temp_max, 2); static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min, set_temp_min, 2); static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3); static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, set_temp_max, 3); static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min, set_temp_min, 3); static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4); static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, set_temp_max, 4); static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min, set_temp_min, 4); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4); /* Attributes common to MAX1668, MAX1989 and MAX1805 */ static struct attribute *max1668_attribute_common[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, NULL }; /* Attributes not present on MAX1805 */ static struct attribute *max1668_attribute_unique[] = { &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_min.dev_attr.attr, &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp5_max.dev_attr.attr, &sensor_dev_attr_temp5_min.dev_attr.attr, &sensor_dev_attr_temp5_input.dev_attr.attr, &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, &sensor_dev_attr_temp5_min_alarm.dev_attr.attr, &sensor_dev_attr_temp4_fault.dev_attr.attr, &sensor_dev_attr_temp5_fault.dev_attr.attr, NULL }; static umode_t max1668_attribute_mode(struct kobject *kobj, struct attribute *attr, int index) { umode_t ret = S_IRUGO; if (read_only) return ret; if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr || attr == &sensor_dev_attr_temp2_max.dev_attr.attr || attr == &sensor_dev_attr_temp3_max.dev_attr.attr || attr == &sensor_dev_attr_temp4_max.dev_attr.attr || attr == &sensor_dev_attr_temp5_max.dev_attr.attr || attr == &sensor_dev_attr_temp1_min.dev_attr.attr || attr == &sensor_dev_attr_temp2_min.dev_attr.attr || attr == &sensor_dev_attr_temp3_min.dev_attr.attr || attr == &sensor_dev_attr_temp4_min.dev_attr.attr || attr == &sensor_dev_attr_temp5_min.dev_attr.attr) ret |= S_IWUSR; return ret; } static const struct attribute_group max1668_group_common = { .attrs = max1668_attribute_common, .is_visible = max1668_attribute_mode }; static const struct attribute_group max1668_group_unique = { .attrs = max1668_attribute_unique, .is_visible = max1668_attribute_mode }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int max1668_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; const char *type_name; int man_id, dev_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Check for unsupported part */ man_id = i2c_smbus_read_byte_data(client, MAX1668_REG_MAN_ID); if (man_id != MAN_ID_MAXIM) return -ENODEV; dev_id = i2c_smbus_read_byte_data(client, MAX1668_REG_DEV_ID); if (dev_id < 0) return -ENODEV; type_name = NULL; if (dev_id == DEV_ID_MAX1668) type_name = "max1668"; else if (dev_id == DEV_ID_MAX1805) type_name = "max1805"; else if (dev_id == DEV_ID_MAX1989) type_name = "max1989"; if (!type_name) return -ENODEV; strlcpy(info->type, type_name, I2C_NAME_SIZE); return 0; } static int max1668_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct max1668_data *data; int err; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; data = kzalloc(sizeof(struct max1668_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); data->type = id->driver_data; mutex_init(&data->update_lock); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &max1668_group_common); if (err) goto error_free; if (data->type == max1668 || data->type == max1989) { err = sysfs_create_group(&client->dev.kobj, &max1668_group_unique); if (err) goto error_sysrem0; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto error_sysrem1; } return 0; error_sysrem1: if (data->type == max1668 || data->type == max1989) sysfs_remove_group(&client->dev.kobj, &max1668_group_unique); error_sysrem0: sysfs_remove_group(&client->dev.kobj, &max1668_group_common); error_free: kfree(data); return err; } static int max1668_remove(struct i2c_client *client) { struct max1668_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); if (data->type == max1668 || data->type == max1989) sysfs_remove_group(&client->dev.kobj, &max1668_group_unique); sysfs_remove_group(&client->dev.kobj, &max1668_group_common); kfree(data); return 0; } static const struct i2c_device_id max1668_id[] = { { "max1668", max1668 }, { "max1805", max1805 }, { "max1989", max1989 }, { } }; MODULE_DEVICE_TABLE(i2c, max1668_id); /* This is the driver that will be inserted */ static struct i2c_driver max1668_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "max1668", }, .probe = max1668_probe, .remove = max1668_remove, .id_table = max1668_id, .detect = max1668_detect, .address_list = max1668_addr_list, }; module_i2c_driver(max1668_driver); MODULE_AUTHOR("David George <david.george@ska.ac.za>"); MODULE_DESCRIPTION("MAX1668 remote temperature sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
zombi-x/android_kernel_htc_m7
arch/arm/mach-imx/mach-pcm037_eet.c
7666
4339
/* * Copyright (C) 2009 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <mach/common.h> #include <mach/iomux-mx3.h> #include <asm/mach-types.h> #include "pcm037.h" #include "devices-imx31.h" static unsigned int pcm037_eet_pins[] = { /* Reserve and hardwire GPIO 57 high - S6E63D6 chipselect */ IOMUX_MODE(MX31_PIN_KEY_COL7, IOMUX_CONFIG_GPIO), /* GPIO keys */ IOMUX_MODE(MX31_PIN_GPIO1_0, IOMUX_CONFIG_GPIO), /* 0 */ IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO), /* 1 */ IOMUX_MODE(MX31_PIN_GPIO1_2, IOMUX_CONFIG_GPIO), /* 2 */ IOMUX_MODE(MX31_PIN_GPIO1_3, IOMUX_CONFIG_GPIO), /* 3 */ IOMUX_MODE(MX31_PIN_SVEN0, IOMUX_CONFIG_GPIO), /* 32 */ IOMUX_MODE(MX31_PIN_STX0, IOMUX_CONFIG_GPIO), /* 33 */ IOMUX_MODE(MX31_PIN_SRX0, IOMUX_CONFIG_GPIO), /* 34 */ IOMUX_MODE(MX31_PIN_SIMPD0, IOMUX_CONFIG_GPIO), /* 35 */ IOMUX_MODE(MX31_PIN_RTS1, IOMUX_CONFIG_GPIO), /* 38 */ IOMUX_MODE(MX31_PIN_CTS1, IOMUX_CONFIG_GPIO), /* 39 */ IOMUX_MODE(MX31_PIN_KEY_ROW4, IOMUX_CONFIG_GPIO), /* 50 */ IOMUX_MODE(MX31_PIN_KEY_ROW5, IOMUX_CONFIG_GPIO), /* 51 */ IOMUX_MODE(MX31_PIN_KEY_ROW6, IOMUX_CONFIG_GPIO), /* 52 */ IOMUX_MODE(MX31_PIN_KEY_ROW7, IOMUX_CONFIG_GPIO), /* 53 */ /* LEDs */ IOMUX_MODE(MX31_PIN_DTR_DTE1, IOMUX_CONFIG_GPIO), /* 44 */ IOMUX_MODE(MX31_PIN_DSR_DTE1, IOMUX_CONFIG_GPIO), /* 45 */ IOMUX_MODE(MX31_PIN_KEY_COL5, IOMUX_CONFIG_GPIO), /* 55 */ IOMUX_MODE(MX31_PIN_KEY_COL6, IOMUX_CONFIG_GPIO), /* 56 */ }; /* SPI */ static struct spi_board_info pcm037_spi_dev[] = { { .modalias = "dac124s085", .max_speed_hz = 400000, .bus_num = 0, .chip_select = 0, /* Index in pcm037_spi1_cs[] */ .mode = SPI_CPHA, }, }; /* Platform Data for MXC CSPI */ static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; static const struct spi_imx_master pcm037_spi1_pdata __initconst = { .chipselect = pcm037_spi1_cs, .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), }; /* GPIO-keys input device */ static struct gpio_keys_button pcm037_gpio_keys[] = { { .type = EV_KEY, .code = KEY_L, .gpio = 0, .desc = "Wheel Manual", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_A, .gpio = 1, .desc = "Wheel AF", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_V, .gpio = 2, .desc = "Wheel View", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_M, .gpio = 3, .desc = "Wheel Menu", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_UP, .gpio = 32, .desc = "Nav Pad Up", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_RIGHT, .gpio = 33, .desc = "Nav Pad Right", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_DOWN, .gpio = 34, .desc = "Nav Pad Down", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_LEFT, .gpio = 35, .desc = "Nav Pad Left", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_ENTER, .gpio = 38, .desc = "Nav Pad Ok", .wakeup = 0, }, { .type = EV_KEY, .code = KEY_O, .gpio = 39, .desc = "Wheel Off", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_FORWARD, .gpio = 50, .desc = "Focus Forward", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_BACK, .gpio = 51, .desc = "Focus Backward", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_MIDDLE, .gpio = 52, .desc = "Release Half", .wakeup = 0, }, { .type = EV_KEY, .code = BTN_EXTRA, .gpio = 53, .desc = "Release Full", .wakeup = 0, }, }; static const struct gpio_keys_platform_data pcm037_gpio_keys_platform_data __initconst = { .buttons = pcm037_gpio_keys, .nbuttons = ARRAY_SIZE(pcm037_gpio_keys), .rep = 0, /* No auto-repeat */ }; static int __init eet_init_devices(void) { if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) return 0; mxc_iomux_setup_multiple_pins(pcm037_eet_pins, ARRAY_SIZE(pcm037_eet_pins), "pcm037_eet"); /* SPI */ spi_register_board_info(pcm037_spi_dev, ARRAY_SIZE(pcm037_spi_dev)); imx31_add_spi_imx0(&pcm037_spi1_pdata); imx_add_gpio_keys(&pcm037_gpio_keys_platform_data); return 0; } late_initcall(eet_init_devices);
gpl-2.0
alcobar/asuswrt-merlin
release/src-rt-6.x.4708/linux/linux-2.6.36/crypto/compress.c
13810
1378
/* * Cryptographic API. * * Compression operations. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/types.h> #include <linux/crypto.h> #include <linux/errno.h> #include <linux/string.h> #include "internal.h" static int crypto_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, dlen); } static int crypto_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, dlen); } int crypto_init_compress_ops(struct crypto_tfm *tfm) { struct compress_tfm *ops = &tfm->crt_compress; ops->cot_compress = crypto_compress; ops->cot_decompress = crypto_decompress; return 0; } void crypto_exit_compress_ops(struct crypto_tfm *tfm) { }
gpl-2.0
ambikadash/linux-fqt
drivers/mfd/wm5110-tables.c
243
113292
/* * wm5110-tables.c -- WM5110 data tables * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/mfd/arizona/core.h> #include <linux/mfd/arizona/registers.h> #include <linux/device.h> #include "arizona.h" #define WM5110_NUM_AOD_ISR 2 #define WM5110_NUM_ISR 5 static const struct reg_default wm5110_reva_patch[] = { { 0x80, 0x3 }, { 0x44, 0x20 }, { 0x45, 0x40 }, { 0x46, 0x60 }, { 0x47, 0x80 }, { 0x48, 0xa0 }, { 0x51, 0x13 }, { 0x52, 0x33 }, { 0x53, 0x53 }, { 0x54, 0x73 }, { 0x55, 0x75 }, { 0x56, 0xb3 }, { 0x2ef, 0x124 }, { 0x2ef, 0x124 }, { 0x2f0, 0x124 }, { 0x2f0, 0x124 }, { 0x2f1, 0x124 }, { 0x2f1, 0x124 }, { 0x2f2, 0x124 }, { 0x2f2, 0x124 }, { 0x2f3, 0x124 }, { 0x2f3, 0x124 }, { 0x2f4, 0x124 }, { 0x2f4, 0x124 }, { 0x2eb, 0x60 }, { 0x2ec, 0x60 }, { 0x2ed, 0x60 }, { 0xc30, 0x3e3e }, { 0xc30, 0x3e3e }, { 0xc31, 0x3e }, { 0xc32, 0x3e3e }, { 0xc32, 0x3e3e }, { 0xc33, 0x3e3e }, { 0xc33, 0x3e3e }, { 0xc34, 0x3e3e }, { 0xc34, 0x3e3e }, { 0xc35, 0x3e3e }, { 0xc35, 0x3e3e }, { 0xc36, 0x3e3e }, { 0xc36, 0x3e3e }, { 0xc37, 0x3e3e }, { 0xc37, 0x3e3e }, { 0xc38, 0x3e3e }, { 0xc38, 0x3e3e }, { 0xc30, 0x3e3e }, { 0xc30, 0x3e3e }, { 0xc39, 0x3e3e }, { 0xc39, 0x3e3e }, { 0xc3a, 0x3e3e }, { 0xc3a, 0x3e3e }, { 0xc3b, 0x3e3e }, { 0xc3b, 0x3e3e }, { 0xc3c, 0x3e }, { 0x201, 0x18a5 }, { 0x201, 0x18a5 }, { 0x201, 0x18a5 }, { 0x202, 0x4100 }, { 0x460, 0xc00 }, { 0x461, 0x8000 }, { 0x462, 0xc01 }, { 0x463, 0x50f0 }, { 0x464, 0xc01 }, { 0x465, 0x4820 }, { 0x466, 0xc01 }, { 0x466, 0xc01 }, { 0x467, 0x4040 }, { 0x468, 0xc01 }, { 0x468, 0xc01 }, { 0x469, 0x3940 }, { 0x46a, 0xc01 }, { 0x46a, 0xc01 }, { 0x46a, 0xc01 }, { 0x46b, 0x3310 }, { 0x46c, 0x801 }, { 0x46c, 0x801 }, { 0x46d, 0x2d80 }, { 0x46e, 0x801 }, { 0x46e, 0x801 }, { 0x46f, 0x2890 }, { 0x470, 0x801 }, { 0x470, 0x801 }, { 0x471, 0x1990 }, { 0x472, 0x801 }, { 0x472, 0x801 }, { 0x473, 0x1450 }, { 0x474, 0x801 }, { 0x474, 0x801 }, { 0x474, 0x801 }, { 0x475, 0x1020 }, { 0x476, 0x801 }, { 0x476, 0x801 }, { 0x476, 0x801 }, { 0x477, 0xcd0 }, { 0x478, 0x806 }, { 0x478, 0x806 }, { 0x479, 0xa30 }, { 0x47a, 0x806 }, { 0x47a, 0x806 }, { 0x47b, 0x810 }, { 0x47c, 0x80e }, { 0x47c, 0x80e }, { 0x47d, 0x510 }, { 0x47e, 0x81f }, { 0x47e, 0x81f }, { 0x2DB, 0x0A00 }, { 0x2DD, 0x0023 }, { 0x2DF, 0x0102 }, { 0x80, 0x0 }, { 0xC20, 0x0002 }, { 0x209, 0x002A }, }; static const struct reg_default wm5110_revb_patch[] = { { 0x80, 0x3 }, { 0x36e, 0x0210 }, { 0x370, 0x0210 }, { 0x372, 0x0210 }, { 0x374, 0x0210 }, { 0x376, 0x0210 }, { 0x378, 0x0210 }, { 0x36d, 0x0028 }, { 0x36f, 0x0028 }, { 0x371, 0x0028 }, { 0x373, 0x0028 }, { 0x375, 0x0028 }, { 0x377, 0x0028 }, { 0x280, 0x2002 }, { 0x44, 0x20 }, { 0x45, 0x40 }, { 0x46, 0x60 }, { 0x47, 0x80 }, { 0x48, 0xa0 }, { 0x51, 0x13 }, { 0x52, 0x33 }, { 0x53, 0x53 }, { 0x54, 0x73 }, { 0x55, 0x93 }, { 0x56, 0xb3 }, { 0xc30, 0x3e3e }, { 0xc31, 0x3e }, { 0xc32, 0x3e3e }, { 0xc33, 0x3e3e }, { 0xc34, 0x3e3e }, { 0xc35, 0x3e3e }, { 0xc36, 0x3e3e }, { 0xc37, 0x3e3e }, { 0xc38, 0x3e3e }, { 0xc39, 0x3e3e }, { 0xc3a, 0x3e3e }, { 0xc3b, 0x3e3e }, { 0xc3c, 0x3e }, { 0x201, 0x18a5 }, { 0x202, 0x4100 }, { 0x460, 0x0c40 }, { 0x461, 0x8000 }, { 0x462, 0x0c41 }, { 0x463, 0x4820 }, { 0x464, 0x0c41 }, { 0x465, 0x4040 }, { 0x466, 0x0841 }, { 0x467, 0x3940 }, { 0x468, 0x0841 }, { 0x469, 0x2030 }, { 0x46a, 0x0842 }, { 0x46b, 0x1990 }, { 0x46c, 0x08c2 }, { 0x46d, 0x1450 }, { 0x46e, 0x08c6 }, { 0x46f, 0x1020 }, { 0x470, 0x08c6 }, { 0x471, 0x0cd0 }, { 0x472, 0x08c6 }, { 0x473, 0x0a30 }, { 0x474, 0x0442 }, { 0x475, 0x0660 }, { 0x476, 0x0446 }, { 0x477, 0x0510 }, { 0x478, 0x04c6 }, { 0x479, 0x0400 }, { 0x47a, 0x04ce }, { 0x47b, 0x0330 }, { 0x47c, 0x05df }, { 0x47d, 0x0001 }, { 0x47e, 0x07ff }, { 0x2db, 0x0a00 }, { 0x2dd, 0x0023 }, { 0x2df, 0x0102 }, { 0x2ef, 0x924 }, { 0x2f0, 0x924 }, { 0x2f1, 0x924 }, { 0x2f2, 0x924 }, { 0x2f3, 0x924 }, { 0x2f4, 0x924 }, { 0x2eb, 0x60 }, { 0x2ec, 0x60 }, { 0x2ed, 0x60 }, { 0x4f2, 0x33e }, { 0x458, 0x0000 }, { 0x15a, 0x0003 }, { 0x80, 0x0 }, }; static const struct reg_default wm5110_revd_patch[] = { { 0x80, 0x3 }, { 0x80, 0x3 }, { 0x393, 0x27 }, { 0x394, 0x27 }, { 0x395, 0x27 }, { 0x396, 0x27 }, { 0x397, 0x27 }, { 0x398, 0x26 }, { 0x221, 0x90 }, { 0x211, 0x8 }, { 0x36c, 0x1fb }, { 0x26e, 0x64 }, { 0x26f, 0xea }, { 0x270, 0x1f16 }, { 0x51b, 0x1 }, { 0x55b, 0x1 }, { 0x59b, 0x1 }, { 0x4f0, 0x633 }, { 0x441, 0xc059 }, { 0x209, 0x27 }, { 0x80, 0x0 }, { 0x80, 0x0 }, }; /* We use a function so we can use ARRAY_SIZE() */ int wm5110_patch(struct arizona *arizona) { switch (arizona->rev) { case 0: return regmap_register_patch(arizona->regmap, wm5110_reva_patch, ARRAY_SIZE(wm5110_reva_patch)); case 1: return regmap_register_patch(arizona->regmap, wm5110_revb_patch, ARRAY_SIZE(wm5110_revb_patch)); case 3: return regmap_register_patch(arizona->regmap, wm5110_revd_patch, ARRAY_SIZE(wm5110_revd_patch)); default: return 0; } } EXPORT_SYMBOL_GPL(wm5110_patch); static const struct regmap_irq wm5110_aod_irqs[ARIZONA_NUM_IRQ] = { [ARIZONA_IRQ_MICD_CLAMP_FALL] = { .mask = ARIZONA_MICD_CLAMP_FALL_EINT1 }, [ARIZONA_IRQ_MICD_CLAMP_RISE] = { .mask = ARIZONA_MICD_CLAMP_RISE_EINT1 }, [ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 }, [ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 }, [ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 }, [ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 }, }; const struct regmap_irq_chip wm5110_aod = { .name = "wm5110 AOD", .status_base = ARIZONA_AOD_IRQ1, .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1, .ack_base = ARIZONA_AOD_IRQ1, .wake_base = ARIZONA_WAKE_CONTROL, .wake_invert = 1, .num_regs = 1, .irqs = wm5110_aod_irqs, .num_irqs = ARRAY_SIZE(wm5110_aod_irqs), }; EXPORT_SYMBOL_GPL(wm5110_aod); static const struct regmap_irq wm5110_irqs[ARIZONA_NUM_IRQ] = { [ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 }, [ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 }, [ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 }, [ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 }, [ARIZONA_IRQ_DSP4_RAM_RDY] = { .reg_offset = 1, .mask = ARIZONA_DSP4_RAM_RDY_EINT1 }, [ARIZONA_IRQ_DSP3_RAM_RDY] = { .reg_offset = 1, .mask = ARIZONA_DSP3_RAM_RDY_EINT1 }, [ARIZONA_IRQ_DSP2_RAM_RDY] = { .reg_offset = 1, .mask = ARIZONA_DSP2_RAM_RDY_EINT1 }, [ARIZONA_IRQ_DSP1_RAM_RDY] = { .reg_offset = 1, .mask = ARIZONA_DSP1_RAM_RDY_EINT1 }, [ARIZONA_IRQ_DSP_IRQ8] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ8_EINT1 }, [ARIZONA_IRQ_DSP_IRQ7] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ7_EINT1 }, [ARIZONA_IRQ_DSP_IRQ6] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ6_EINT1 }, [ARIZONA_IRQ_DSP_IRQ5] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ5_EINT1 }, [ARIZONA_IRQ_DSP_IRQ4] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ4_EINT1 }, [ARIZONA_IRQ_DSP_IRQ3] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ3_EINT1 }, [ARIZONA_IRQ_DSP_IRQ2] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ2_EINT1 }, [ARIZONA_IRQ_DSP_IRQ1] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1 }, [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = { .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1 }, [ARIZONA_IRQ_SPK_SHUTDOWN] = { .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1 }, [ARIZONA_IRQ_HPDET] = { .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1 }, [ARIZONA_IRQ_MICDET] = { .reg_offset = 2, .mask = ARIZONA_MICDET_EINT1 }, [ARIZONA_IRQ_WSEQ_DONE] = { .reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1 }, [ARIZONA_IRQ_DRC2_SIG_DET] = { .reg_offset = 2, .mask = ARIZONA_DRC2_SIG_DET_EINT1 }, [ARIZONA_IRQ_DRC1_SIG_DET] = { .reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1 }, [ARIZONA_IRQ_ASRC2_LOCK] = { .reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1 }, [ARIZONA_IRQ_ASRC1_LOCK] = { .reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1 }, [ARIZONA_IRQ_UNDERCLOCKED] = { .reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1 }, [ARIZONA_IRQ_OVERCLOCKED] = { .reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1 }, [ARIZONA_IRQ_FLL2_LOCK] = { .reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1 }, [ARIZONA_IRQ_FLL1_LOCK] = { .reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1 }, [ARIZONA_IRQ_CLKGEN_ERR] = { .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1 }, [ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = { .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1 }, [ARIZONA_IRQ_ASRC_CFG_ERR] = { .reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1 }, [ARIZONA_IRQ_AIF3_ERR] = { .reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1 }, [ARIZONA_IRQ_AIF2_ERR] = { .reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1 }, [ARIZONA_IRQ_AIF1_ERR] = { .reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1 }, [ARIZONA_IRQ_CTRLIF_ERR] = { .reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1 }, [ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = { .reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1 }, [ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = { .reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1 }, [ARIZONA_IRQ_SYSCLK_ENA_LOW] = { .reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1 }, [ARIZONA_IRQ_ISRC1_CFG_ERR] = { .reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1 }, [ARIZONA_IRQ_ISRC2_CFG_ERR] = { .reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1 }, [ARIZONA_IRQ_BOOT_DONE] = { .reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1 }, [ARIZONA_IRQ_DCS_DAC_DONE] = { .reg_offset = 4, .mask = ARIZONA_DCS_DAC_DONE_EINT1 }, [ARIZONA_IRQ_DCS_HP_DONE] = { .reg_offset = 4, .mask = ARIZONA_DCS_HP_DONE_EINT1 }, [ARIZONA_IRQ_FLL2_CLOCK_OK] = { .reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1 }, [ARIZONA_IRQ_FLL1_CLOCK_OK] = { .reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1 }, }; const struct regmap_irq_chip wm5110_irq = { .name = "wm5110 IRQ", .status_base = ARIZONA_INTERRUPT_STATUS_1, .mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK, .ack_base = ARIZONA_INTERRUPT_STATUS_1, .num_regs = 5, .irqs = wm5110_irqs, .num_irqs = ARRAY_SIZE(wm5110_irqs), }; EXPORT_SYMBOL_GPL(wm5110_irq); static const struct reg_default wm5110_reg_default[] = { { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */ { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */ { 0x0000000A, 0x0001 }, /* R10 - Ctrl IF I2C2 CFG 1 */ { 0x0000000B, 0x0036 }, /* R11 - Ctrl IF I2C1 CFG 2 */ { 0x0000000C, 0x0036 }, /* R12 - Ctrl IF I2C2 CFG 2 */ { 0x00000016, 0x0000 }, /* R22 - Write Sequencer Ctrl 0 */ { 0x00000017, 0x0000 }, /* R23 - Write Sequencer Ctrl 1 */ { 0x00000018, 0x0000 }, /* R24 - Write Sequencer Ctrl 2 */ { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */ { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */ { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */ { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */ { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */ { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */ { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */ { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */ { 0x00000040, 0x0000 }, /* R64 - Wake control */ { 0x00000041, 0x0000 }, /* R65 - Sequence control */ { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */ { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */ { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 1 */ { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 2 */ { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 3 */ { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 4 */ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */ { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */ { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */ { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */ { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */ { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */ { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */ { 0x00000100, 0x0001 }, /* R256 - Clock 32k 1 */ { 0x00000101, 0x0504 }, /* R257 - System Clock 1 */ { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */ { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */ { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */ { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */ { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */ { 0x00000149, 0x0000 }, /* R329 - Output system clock */ { 0x0000014A, 0x0000 }, /* R330 - Output async clock */ { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */ { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */ { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */ { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */ { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */ { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */ { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */ { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */ { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */ { 0x00000175, 0x0006 }, /* R373 - FLL1 Control 5 */ { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */ { 0x00000177, 0x0281 }, /* R375 - FLL1 Loop Filter Test 1 */ { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */ { 0x00000179, 0x0000 }, /* R376 - FLL1 Control 7 */ { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */ { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */ { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */ { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */ { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */ { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */ { 0x00000187, 0x0001 }, /* R390 - FLL1 Synchroniser 7 */ { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */ { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */ { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */ { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */ { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */ { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */ { 0x00000195, 0x000C }, /* R405 - FLL2 Control 5 */ { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */ { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */ { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */ { 0x00000199, 0x0000 }, /* R408 - FLL2 Control 7 */ { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */ { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */ { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */ { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */ { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */ { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */ { 0x000001A7, 0x0001 }, /* R422 - FLL2 Synchroniser 7 */ { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */ { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */ { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */ { 0x00000210, 0x0184 }, /* R528 - LDO1 Control 1 */ { 0x00000213, 0x03E4 }, /* R531 - LDO2 Control 1 */ { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */ { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */ { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */ { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */ { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */ { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */ { 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */ { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */ { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */ { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */ { 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */ { 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */ { 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */ { 0x000002A9, 0x300A }, /* R681 - Mic Detect Level 4 */ { 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */ { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */ { 0x00000300, 0x0000 }, /* R768 - Input Enables */ { 0x00000308, 0x0000 }, /* R776 - Input Rate */ { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */ { 0x0000030C, 0x0002 }, /* R780 - HPF Control */ { 0x00000310, 0x2080 }, /* R784 - IN1L Control */ { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */ { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */ { 0x00000314, 0x0080 }, /* R788 - IN1R Control */ { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */ { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */ { 0x00000318, 0x2080 }, /* R792 - IN2L Control */ { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */ { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */ { 0x0000031C, 0x0080 }, /* R796 - IN2R Control */ { 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */ { 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */ { 0x00000320, 0x2080 }, /* R800 - IN3L Control */ { 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */ { 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */ { 0x00000324, 0x0080 }, /* R804 - IN3R Control */ { 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */ { 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */ { 0x00000328, 0x2000 }, /* R808 - IN4L Control */ { 0x00000329, 0x0180 }, /* R809 - ADC Digital Volume 4L */ { 0x0000032A, 0x0000 }, /* R810 - DMIC4L Control */ { 0x0000032C, 0x0000 }, /* R812 - IN4R Control */ { 0x0000032D, 0x0180 }, /* R813 - ADC Digital Volume 4R */ { 0x0000032E, 0x0000 }, /* R814 - DMIC4R Control */ { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */ { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */ { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */ { 0x00000410, 0x0080 }, /* R1040 - Output Path Config 1L */ { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */ { 0x00000412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */ { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */ { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */ { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */ { 0x00000416, 0x0080 }, /* R1046 - DAC Volume Limit 1R */ { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */ { 0x00000418, 0x0080 }, /* R1048 - Output Path Config 2L */ { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */ { 0x0000041A, 0x0080 }, /* R1050 - DAC Volume Limit 2L */ { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */ { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */ { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */ { 0x0000041E, 0x0080 }, /* R1054 - DAC Volume Limit 2R */ { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */ { 0x00000420, 0x0080 }, /* R1056 - Output Path Config 3L */ { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */ { 0x00000422, 0x0080 }, /* R1058 - DAC Volume Limit 3L */ { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */ { 0x00000424, 0x0080 }, /* R1060 - Output Path Config 3R */ { 0x00000425, 0x0180 }, /* R1061 - DAC Digital Volume 3R */ { 0x00000426, 0x0080 }, /* R1062 - DAC Volume Limit 3R */ { 0x00000427, 0x0020 }, /* R1063 - Noise Gate Select 3R */ { 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */ { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */ { 0x0000042A, 0x0080 }, /* R1066 - Out Volume 4L */ { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */ { 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */ { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */ { 0x0000042E, 0x0080 }, /* R1070 - Out Volume 4R */ { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */ { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */ { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */ { 0x00000432, 0x0080 }, /* R1074 - DAC Volume Limit 5L */ { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */ { 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */ { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */ { 0x00000436, 0x0080 }, /* R1078 - DAC Volume Limit 5R */ { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */ { 0x00000438, 0x0000 }, /* R1080 - Output Path Config 6L */ { 0x00000439, 0x0180 }, /* R1081 - DAC Digital Volume 6L */ { 0x0000043A, 0x0080 }, /* R1082 - DAC Volume Limit 6L */ { 0x0000043B, 0x0400 }, /* R1083 - Noise Gate Select 6L */ { 0x0000043C, 0x0000 }, /* R1084 - Output Path Config 6R */ { 0x0000043D, 0x0180 }, /* R1085 - DAC Digital Volume 6R */ { 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */ { 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */ { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */ { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */ { 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */ { 0x00000480, 0x0040 }, /* R1152 - Class W ANC Threshold 1 */ { 0x00000481, 0x0040 }, /* R1153 - Class W ANC Threshold 2 */ { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */ { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */ { 0x00000492, 0x0069 }, /* R1170 - PDM SPK2 CTRL 1 */ { 0x00000493, 0x0000 }, /* R1171 - PDM SPK2 CTRL 2 */ { 0x000004A0, 0x3480 }, /* R1184 - HP1 Short Circuit Ctrl */ { 0x000004A1, 0x3480 }, /* R1185 - HP2 Short Circuit Ctrl */ { 0x000004A2, 0x3480 }, /* R1186 - HP3 Short Circuit Ctrl */ { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */ { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */ { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */ { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */ { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */ { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */ { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */ { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */ { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */ { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */ { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */ { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */ { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */ { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */ { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */ { 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */ { 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */ { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */ { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */ { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */ { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */ { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */ { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */ { 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */ { 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */ { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */ { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */ { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */ { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */ { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */ { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */ { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */ { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */ { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */ { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */ { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */ { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */ { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */ { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */ { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */ { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */ { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */ { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */ { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */ { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */ { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */ { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */ { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */ { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */ { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */ { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */ { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */ { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */ { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */ { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */ { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */ { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */ { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */ { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */ { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */ { 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */ { 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */ { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */ { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */ { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */ { 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */ { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */ { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */ { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */ { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */ { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */ { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */ { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */ { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */ { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */ { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */ { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */ { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */ { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */ { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */ { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */ { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */ { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */ { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */ { 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */ { 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */ { 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */ { 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */ { 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */ { 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */ { 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */ { 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */ { 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */ { 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */ { 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */ { 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */ { 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */ { 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */ { 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */ { 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */ { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */ { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */ { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */ { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */ { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */ { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */ { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */ { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */ { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */ { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */ { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */ { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */ { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */ { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */ { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */ { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */ { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */ { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */ { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */ { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */ { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */ { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */ { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */ { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */ { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */ { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */ { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */ { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */ { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */ { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */ { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */ { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */ { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */ { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */ { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */ { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */ { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */ { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */ { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */ { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */ { 0x000006A8, 0x0000 }, /* R1704 - OUT3RMIX Input 1 Source */ { 0x000006A9, 0x0080 }, /* R1705 - OUT3RMIX Input 1 Volume */ { 0x000006AA, 0x0000 }, /* R1706 - OUT3RMIX Input 2 Source */ { 0x000006AB, 0x0080 }, /* R1707 - OUT3RMIX Input 2 Volume */ { 0x000006AC, 0x0000 }, /* R1708 - OUT3RMIX Input 3 Source */ { 0x000006AD, 0x0080 }, /* R1709 - OUT3RMIX Input 3 Volume */ { 0x000006AE, 0x0000 }, /* R1710 - OUT3RMIX Input 4 Source */ { 0x000006AF, 0x0080 }, /* R1711 - OUT3RMIX Input 4 Volume */ { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */ { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */ { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */ { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */ { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */ { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */ { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */ { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */ { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */ { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */ { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */ { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */ { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */ { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */ { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */ { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */ { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */ { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */ { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */ { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */ { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */ { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */ { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */ { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */ { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */ { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */ { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */ { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */ { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */ { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */ { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */ { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */ { 0x000006D0, 0x0000 }, /* R1744 - OUT6LMIX Input 1 Source */ { 0x000006D1, 0x0080 }, /* R1745 - OUT6LMIX Input 1 Volume */ { 0x000006D2, 0x0000 }, /* R1746 - OUT6LMIX Input 2 Source */ { 0x000006D3, 0x0080 }, /* R1747 - OUT6LMIX Input 2 Volume */ { 0x000006D4, 0x0000 }, /* R1748 - OUT6LMIX Input 3 Source */ { 0x000006D5, 0x0080 }, /* R1749 - OUT6LMIX Input 3 Volume */ { 0x000006D6, 0x0000 }, /* R1750 - OUT6LMIX Input 4 Source */ { 0x000006D7, 0x0080 }, /* R1751 - OUT6LMIX Input 4 Volume */ { 0x000006D8, 0x0000 }, /* R1752 - OUT6RMIX Input 1 Source */ { 0x000006D9, 0x0080 }, /* R1753 - OUT6RMIX Input 1 Volume */ { 0x000006DA, 0x0000 }, /* R1754 - OUT6RMIX Input 2 Source */ { 0x000006DB, 0x0080 }, /* R1755 - OUT6RMIX Input 2 Volume */ { 0x000006DC, 0x0000 }, /* R1756 - OUT6RMIX Input 3 Source */ { 0x000006DD, 0x0080 }, /* R1757 - OUT6RMIX Input 3 Volume */ { 0x000006DE, 0x0000 }, /* R1758 - OUT6RMIX Input 4 Source */ { 0x000006DF, 0x0080 }, /* R1759 - OUT6RMIX Input 4 Volume */ { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */ { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */ { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */ { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */ { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */ { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */ { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */ { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */ { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */ { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */ { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */ { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */ { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */ { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */ { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */ { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */ { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */ { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */ { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */ { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */ { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */ { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */ { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */ { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */ { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */ { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */ { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */ { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */ { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */ { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */ { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */ { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */ { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */ { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */ { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */ { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */ { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */ { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */ { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */ { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */ { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */ { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */ { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */ { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */ { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */ { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */ { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */ { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */ { 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */ { 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */ { 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */ { 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */ { 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */ { 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */ { 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */ { 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */ { 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */ { 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */ { 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */ { 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */ { 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */ { 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */ { 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */ { 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */ { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */ { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */ { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */ { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */ { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */ { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */ { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */ { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */ { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */ { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */ { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */ { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */ { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */ { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */ { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */ { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */ { 0x00000750, 0x0000 }, /* R1872 - AIF2TX3MIX Input 1 Source */ { 0x00000751, 0x0080 }, /* R1873 - AIF2TX3MIX Input 1 Volume */ { 0x00000752, 0x0000 }, /* R1874 - AIF2TX3MIX Input 2 Source */ { 0x00000753, 0x0080 }, /* R1875 - AIF2TX3MIX Input 2 Volume */ { 0x00000754, 0x0000 }, /* R1876 - AIF2TX3MIX Input 3 Source */ { 0x00000755, 0x0080 }, /* R1877 - AIF2TX3MIX Input 3 Volume */ { 0x00000756, 0x0000 }, /* R1878 - AIF2TX3MIX Input 4 Source */ { 0x00000757, 0x0080 }, /* R1879 - AIF2TX3MIX Input 4 Volume */ { 0x00000758, 0x0000 }, /* R1880 - AIF2TX4MIX Input 1 Source */ { 0x00000759, 0x0080 }, /* R1881 - AIF2TX4MIX Input 1 Volume */ { 0x0000075A, 0x0000 }, /* R1882 - AIF2TX4MIX Input 2 Source */ { 0x0000075B, 0x0080 }, /* R1883 - AIF2TX4MIX Input 2 Volume */ { 0x0000075C, 0x0000 }, /* R1884 - AIF2TX4MIX Input 3 Source */ { 0x0000075D, 0x0080 }, /* R1885 - AIF2TX4MIX Input 3 Volume */ { 0x0000075E, 0x0000 }, /* R1886 - AIF2TX4MIX Input 4 Source */ { 0x0000075F, 0x0080 }, /* R1887 - AIF2TX4MIX Input 4 Volume */ { 0x00000760, 0x0000 }, /* R1888 - AIF2TX5MIX Input 1 Source */ { 0x00000761, 0x0080 }, /* R1889 - AIF2TX5MIX Input 1 Volume */ { 0x00000762, 0x0000 }, /* R1890 - AIF2TX5MIX Input 2 Source */ { 0x00000763, 0x0080 }, /* R1891 - AIF2TX5MIX Input 2 Volume */ { 0x00000764, 0x0000 }, /* R1892 - AIF2TX5MIX Input 3 Source */ { 0x00000765, 0x0080 }, /* R1893 - AIF2TX5MIX Input 3 Volume */ { 0x00000766, 0x0000 }, /* R1894 - AIF2TX5MIX Input 4 Source */ { 0x00000767, 0x0080 }, /* R1895 - AIF2TX5MIX Input 4 Volume */ { 0x00000768, 0x0000 }, /* R1896 - AIF2TX6MIX Input 1 Source */ { 0x00000769, 0x0080 }, /* R1897 - AIF2TX6MIX Input 1 Volume */ { 0x0000076A, 0x0000 }, /* R1898 - AIF2TX6MIX Input 2 Source */ { 0x0000076B, 0x0080 }, /* R1899 - AIF2TX6MIX Input 2 Volume */ { 0x0000076C, 0x0000 }, /* R1900 - AIF2TX6MIX Input 3 Source */ { 0x0000076D, 0x0080 }, /* R1901 - AIF2TX6MIX Input 3 Volume */ { 0x0000076E, 0x0000 }, /* R1902 - AIF2TX6MIX Input 4 Source */ { 0x0000076F, 0x0080 }, /* R1903 - AIF2TX6MIX Input 4 Volume */ { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */ { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */ { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */ { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */ { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */ { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */ { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */ { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */ { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */ { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */ { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */ { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */ { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */ { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */ { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */ { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */ { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */ { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */ { 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */ { 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */ { 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */ { 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */ { 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */ { 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */ { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */ { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */ { 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */ { 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */ { 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */ { 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */ { 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */ { 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */ { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */ { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */ { 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */ { 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */ { 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */ { 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */ { 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */ { 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */ { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */ { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */ { 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */ { 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */ { 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */ { 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */ { 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */ { 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */ { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */ { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */ { 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */ { 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */ { 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */ { 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */ { 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */ { 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */ { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */ { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */ { 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */ { 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */ { 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */ { 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */ { 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */ { 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */ { 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */ { 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */ { 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */ { 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */ { 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */ { 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */ { 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */ { 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */ { 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */ { 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */ { 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */ { 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */ { 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */ { 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */ { 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */ { 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */ { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */ { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */ { 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */ { 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */ { 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */ { 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */ { 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */ { 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */ { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */ { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */ { 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */ { 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */ { 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */ { 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */ { 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */ { 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */ { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */ { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */ { 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */ { 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */ { 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */ { 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */ { 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */ { 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */ { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */ { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */ { 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */ { 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */ { 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */ { 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */ { 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */ { 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */ { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */ { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */ { 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */ { 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */ { 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */ { 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */ { 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */ { 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */ { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */ { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */ { 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */ { 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */ { 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */ { 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */ { 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */ { 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */ { 0x000008D0, 0x0000 }, /* R2256 - DRC2LMIX Input 1 Source */ { 0x000008D1, 0x0080 }, /* R2257 - DRC2LMIX Input 1 Volume */ { 0x000008D2, 0x0000 }, /* R2258 - DRC2LMIX Input 2 Source */ { 0x000008D3, 0x0080 }, /* R2259 - DRC2LMIX Input 2 Volume */ { 0x000008D4, 0x0000 }, /* R2260 - DRC2LMIX Input 3 Source */ { 0x000008D5, 0x0080 }, /* R2261 - DRC2LMIX Input 3 Volume */ { 0x000008D6, 0x0000 }, /* R2262 - DRC2LMIX Input 4 Source */ { 0x000008D7, 0x0080 }, /* R2263 - DRC2LMIX Input 4 Volume */ { 0x000008D8, 0x0000 }, /* R2264 - DRC2RMIX Input 1 Source */ { 0x000008D9, 0x0080 }, /* R2265 - DRC2RMIX Input 1 Volume */ { 0x000008DA, 0x0000 }, /* R2266 - DRC2RMIX Input 2 Source */ { 0x000008DB, 0x0080 }, /* R2267 - DRC2RMIX Input 2 Volume */ { 0x000008DC, 0x0000 }, /* R2268 - DRC2RMIX Input 3 Source */ { 0x000008DD, 0x0080 }, /* R2269 - DRC2RMIX Input 3 Volume */ { 0x000008DE, 0x0000 }, /* R2270 - DRC2RMIX Input 4 Source */ { 0x000008DF, 0x0080 }, /* R2271 - DRC2RMIX Input 4 Volume */ { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */ { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */ { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */ { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */ { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */ { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */ { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */ { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */ { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */ { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */ { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */ { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */ { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */ { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */ { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */ { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */ { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */ { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */ { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */ { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */ { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */ { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */ { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */ { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */ { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */ { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */ { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */ { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */ { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */ { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */ { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */ { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */ { 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */ { 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */ { 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */ { 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */ { 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */ { 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */ { 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */ { 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */ { 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */ { 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */ { 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */ { 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */ { 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */ { 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */ { 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */ { 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */ { 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */ { 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */ { 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */ { 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */ { 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */ { 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */ { 0x00000980, 0x0000 }, /* R2432 - DSP2LMIX Input 1 Source */ { 0x00000981, 0x0080 }, /* R2433 - DSP2LMIX Input 1 Volume */ { 0x00000982, 0x0000 }, /* R2434 - DSP2LMIX Input 2 Source */ { 0x00000983, 0x0080 }, /* R2435 - DSP2LMIX Input 2 Volume */ { 0x00000984, 0x0000 }, /* R2436 - DSP2LMIX Input 3 Source */ { 0x00000985, 0x0080 }, /* R2437 - DSP2LMIX Input 3 Volume */ { 0x00000986, 0x0000 }, /* R2438 - DSP2LMIX Input 4 Source */ { 0x00000987, 0x0080 }, /* R2439 - DSP2LMIX Input 4 Volume */ { 0x00000988, 0x0000 }, /* R2440 - DSP2RMIX Input 1 Source */ { 0x00000989, 0x0080 }, /* R2441 - DSP2RMIX Input 1 Volume */ { 0x0000098A, 0x0000 }, /* R2442 - DSP2RMIX Input 2 Source */ { 0x0000098B, 0x0080 }, /* R2443 - DSP2RMIX Input 2 Volume */ { 0x0000098C, 0x0000 }, /* R2444 - DSP2RMIX Input 3 Source */ { 0x0000098D, 0x0080 }, /* R2445 - DSP2RMIX Input 3 Volume */ { 0x0000098E, 0x0000 }, /* R2446 - DSP2RMIX Input 4 Source */ { 0x0000098F, 0x0080 }, /* R2447 - DSP2RMIX Input 4 Volume */ { 0x00000990, 0x0000 }, /* R2448 - DSP2AUX1MIX Input 1 Source */ { 0x00000998, 0x0000 }, /* R2456 - DSP2AUX2MIX Input 1 Source */ { 0x000009A0, 0x0000 }, /* R2464 - DSP2AUX3MIX Input 1 Source */ { 0x000009A8, 0x0000 }, /* R2472 - DSP2AUX4MIX Input 1 Source */ { 0x000009B0, 0x0000 }, /* R2480 - DSP2AUX5MIX Input 1 Source */ { 0x000009B8, 0x0000 }, /* R2488 - DSP2AUX6MIX Input 1 Source */ { 0x000009C0, 0x0000 }, /* R2496 - DSP3LMIX Input 1 Source */ { 0x000009C1, 0x0080 }, /* R2497 - DSP3LMIX Input 1 Volume */ { 0x000009C2, 0x0000 }, /* R2498 - DSP3LMIX Input 2 Source */ { 0x000009C3, 0x0080 }, /* R2499 - DSP3LMIX Input 2 Volume */ { 0x000009C4, 0x0000 }, /* R2500 - DSP3LMIX Input 3 Source */ { 0x000009C5, 0x0080 }, /* R2501 - DSP3LMIX Input 3 Volume */ { 0x000009C6, 0x0000 }, /* R2502 - DSP3LMIX Input 4 Source */ { 0x000009C7, 0x0080 }, /* R2503 - DSP3LMIX Input 4 Volume */ { 0x000009C8, 0x0000 }, /* R2504 - DSP3RMIX Input 1 Source */ { 0x000009C9, 0x0080 }, /* R2505 - DSP3RMIX Input 1 Volume */ { 0x000009CA, 0x0000 }, /* R2506 - DSP3RMIX Input 2 Source */ { 0x000009CB, 0x0080 }, /* R2507 - DSP3RMIX Input 2 Volume */ { 0x000009CC, 0x0000 }, /* R2508 - DSP3RMIX Input 3 Source */ { 0x000009CD, 0x0080 }, /* R2509 - DSP3RMIX Input 3 Volume */ { 0x000009CE, 0x0000 }, /* R2510 - DSP3RMIX Input 4 Source */ { 0x000009CF, 0x0080 }, /* R2511 - DSP3RMIX Input 4 Volume */ { 0x000009D0, 0x0000 }, /* R2512 - DSP3AUX1MIX Input 1 Source */ { 0x000009D8, 0x0000 }, /* R2520 - DSP3AUX2MIX Input 1 Source */ { 0x000009E0, 0x0000 }, /* R2528 - DSP3AUX3MIX Input 1 Source */ { 0x000009E8, 0x0000 }, /* R2536 - DSP3AUX4MIX Input 1 Source */ { 0x000009F0, 0x0000 }, /* R2544 - DSP3AUX5MIX Input 1 Source */ { 0x000009F8, 0x0000 }, /* R2552 - DSP3AUX6MIX Input 1 Source */ { 0x00000A00, 0x0000 }, /* R2560 - DSP4LMIX Input 1 Source */ { 0x00000A01, 0x0080 }, /* R2561 - DSP4LMIX Input 1 Volume */ { 0x00000A02, 0x0000 }, /* R2562 - DSP4LMIX Input 2 Source */ { 0x00000A03, 0x0080 }, /* R2563 - DSP4LMIX Input 2 Volume */ { 0x00000A04, 0x0000 }, /* R2564 - DSP4LMIX Input 3 Source */ { 0x00000A05, 0x0080 }, /* R2565 - DSP4LMIX Input 3 Volume */ { 0x00000A06, 0x0000 }, /* R2566 - DSP4LMIX Input 4 Source */ { 0x00000A07, 0x0080 }, /* R2567 - DSP4LMIX Input 4 Volume */ { 0x00000A08, 0x0000 }, /* R2568 - DSP4RMIX Input 1 Source */ { 0x00000A09, 0x0080 }, /* R2569 - DSP4RMIX Input 1 Volume */ { 0x00000A0A, 0x0000 }, /* R2570 - DSP4RMIX Input 2 Source */ { 0x00000A0B, 0x0080 }, /* R2571 - DSP4RMIX Input 2 Volume */ { 0x00000A0C, 0x0000 }, /* R2572 - DSP4RMIX Input 3 Source */ { 0x00000A0D, 0x0080 }, /* R2573 - DSP4RMIX Input 3 Volume */ { 0x00000A0E, 0x0000 }, /* R2574 - DSP4RMIX Input 4 Source */ { 0x00000A0F, 0x0080 }, /* R2575 - DSP4RMIX Input 4 Volume */ { 0x00000A10, 0x0000 }, /* R2576 - DSP4AUX1MIX Input 1 Source */ { 0x00000A18, 0x0000 }, /* R2584 - DSP4AUX2MIX Input 1 Source */ { 0x00000A20, 0x0000 }, /* R2592 - DSP4AUX3MIX Input 1 Source */ { 0x00000A28, 0x0000 }, /* R2600 - DSP4AUX4MIX Input 1 Source */ { 0x00000A30, 0x0000 }, /* R2608 - DSP4AUX5MIX Input 1 Source */ { 0x00000A38, 0x0000 }, /* R2616 - DSP4AUX6MIX Input 1 Source */ { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */ { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */ { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */ { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */ { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */ { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */ { 0x00000B10, 0x0000 }, /* R2832 - ISRC1DEC3MIX Input 1 Source */ { 0x00000B18, 0x0000 }, /* R2840 - ISRC1DEC4MIX Input 1 Source */ { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */ { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */ { 0x00000B30, 0x0000 }, /* R2864 - ISRC1INT3MIX Input 1 Source */ { 0x00000B38, 0x0000 }, /* R2872 - ISRC1INT4MIX Input 1 Source */ { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */ { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */ { 0x00000B50, 0x0000 }, /* R2896 - ISRC2DEC3MIX Input 1 Source */ { 0x00000B58, 0x0000 }, /* R2904 - ISRC2DEC4MIX Input 1 Source */ { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */ { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */ { 0x00000B70, 0x0000 }, /* R2928 - ISRC2INT3MIX Input 1 Source */ { 0x00000B78, 0x0000 }, /* R2936 - ISRC2INT4MIX Input 1 Source */ { 0x00000B80, 0x0000 }, /* R2944 - ISRC3DEC1MIX Input 1 Source */ { 0x00000B88, 0x0000 }, /* R2952 - ISRC3DEC2MIX Input 1 Source */ { 0x00000B90, 0x0000 }, /* R2960 - ISRC3DEC3MIX Input 1 Source */ { 0x00000B98, 0x0000 }, /* R2968 - ISRC3DEC4MIX Input 1 Source */ { 0x00000BA0, 0x0000 }, /* R2976 - ISRC3INT1MIX Input 1 Source */ { 0x00000BA8, 0x0000 }, /* R2984 - ISRC3INT2MIX Input 1 Source */ { 0x00000BB0, 0x0000 }, /* R2992 - ISRC3INT3MIX Input 1 Source */ { 0x00000BB8, 0x0000 }, /* R3000 - ISRC3INT4MIX Input 1 Source */ { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */ { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */ { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */ { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */ { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */ { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */ { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */ { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */ { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */ { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */ { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */ { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */ { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */ { 0x00000C30, 0x8282 }, /* R3120 - Misc Pad Ctrl 7 */ { 0x00000C31, 0x0082 }, /* R3121 - Misc Pad Ctrl 8 */ { 0x00000C32, 0x8282 }, /* R3122 - Misc Pad Ctrl 9 */ { 0x00000C33, 0x8282 }, /* R3123 - Misc Pad Ctrl 10 */ { 0x00000C34, 0x8282 }, /* R3124 - Misc Pad Ctrl 11 */ { 0x00000C35, 0x8282 }, /* R3125 - Misc Pad Ctrl 12 */ { 0x00000C36, 0x8282 }, /* R3126 - Misc Pad Ctrl 13 */ { 0x00000C37, 0x8282 }, /* R3127 - Misc Pad Ctrl 14 */ { 0x00000C38, 0x8282 }, /* R3128 - Misc Pad Ctrl 15 */ { 0x00000C39, 0x8282 }, /* R3129 - Misc Pad Ctrl 16 */ { 0x00000C3A, 0x8282 }, /* R3130 - Misc Pad Ctrl 17 */ { 0x00000C3B, 0x8282 }, /* R3131 - Misc Pad Ctrl 18 */ { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */ { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */ { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */ { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */ { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */ { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */ { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */ { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */ { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */ { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */ { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */ { 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */ { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */ { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */ { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */ { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */ { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */ { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */ { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */ { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */ { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */ { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */ { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */ { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */ { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */ { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */ { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */ { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */ { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */ { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */ { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */ { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */ { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */ { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */ { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */ { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */ { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */ { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */ { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */ { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */ { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */ { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */ { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */ { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */ { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */ { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */ { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */ { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */ { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */ { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */ { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */ { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */ { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */ { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */ { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */ { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */ { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */ { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */ { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */ { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */ { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */ { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */ { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */ { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */ { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */ { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */ { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */ { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */ { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */ { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */ { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */ { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */ { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */ { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */ { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */ { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */ { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */ { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */ { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */ { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */ { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */ { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */ { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */ { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */ { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */ { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */ { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */ { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */ { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */ { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */ { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */ { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */ { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */ { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */ { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */ { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */ { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */ { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */ { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */ { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */ { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */ { 0x00000E89, 0x0018 }, /* R3721 - DRC2 ctrl1 */ { 0x00000E8A, 0x0933 }, /* R3722 - DRC2 ctrl2 */ { 0x00000E8B, 0x0018 }, /* R3723 - DRC2 ctrl3 */ { 0x00000E8C, 0x0000 }, /* R3724 - DRC2 ctrl4 */ { 0x00000E8D, 0x0000 }, /* R3725 - DRC2 ctrl5 */ { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */ { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */ { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */ { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */ { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */ { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */ { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */ { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */ { 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */ { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */ { 0x00000EF6, 0x0000 }, /* R3830 - ISRC 3 CTRL 1 */ { 0x00000EF7, 0x0000 }, /* R3831 - ISRC 3 CTRL 2 */ { 0x00000EF8, 0x0000 }, /* R3832 - ISRC 3 CTRL 3 */ { 0x00000F00, 0x0000 }, /* R3840 - Clock Control */ { 0x00000F01, 0x0000 }, /* R3841 - ANC_SRC */ { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */ { 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */ { 0x00001200, 0x0010 }, /* R4608 - DSP2 Control 1 */ { 0x00001201, 0x0000 }, /* R4609 - DSP2 Clocking 1 */ { 0x00001300, 0x0010 }, /* R4864 - DSP3 Control 1 */ { 0x00001301, 0x0000 }, /* R4865 - DSP3 Clocking 1 */ { 0x00001400, 0x0010 }, /* R5120 - DSP4 Control 1 */ { 0x00001401, 0x0000 }, /* R5121 - DSP4 Clocking 1 */ { 0x00001404, 0x0000 }, /* R5124 - DSP4 Status 1 */ }; static bool wm5110_is_rev_b_adsp_memory(unsigned int reg) { if ((reg >= 0x100000 && reg < 0x103000) || (reg >= 0x180000 && reg < 0x181000) || (reg >= 0x190000 && reg < 0x192000) || (reg >= 0x1a8000 && reg < 0x1a9000) || (reg >= 0x200000 && reg < 0x209000) || (reg >= 0x280000 && reg < 0x281000) || (reg >= 0x290000 && reg < 0x29a000) || (reg >= 0x2a8000 && reg < 0x2aa000) || (reg >= 0x300000 && reg < 0x30f000) || (reg >= 0x380000 && reg < 0x382000) || (reg >= 0x390000 && reg < 0x39e000) || (reg >= 0x3a8000 && reg < 0x3b6000) || (reg >= 0x400000 && reg < 0x403000) || (reg >= 0x480000 && reg < 0x481000) || (reg >= 0x490000 && reg < 0x492000) || (reg >= 0x4a8000 && reg < 0x4a9000)) return true; else return false; } static bool wm5110_is_rev_d_adsp_memory(unsigned int reg) { if ((reg >= 0x100000 && reg < 0x106000) || (reg >= 0x180000 && reg < 0x182000) || (reg >= 0x190000 && reg < 0x198000) || (reg >= 0x1a8000 && reg < 0x1aa000) || (reg >= 0x200000 && reg < 0x20f000) || (reg >= 0x280000 && reg < 0x282000) || (reg >= 0x290000 && reg < 0x29c000) || (reg >= 0x2a6000 && reg < 0x2b4000) || (reg >= 0x300000 && reg < 0x30f000) || (reg >= 0x380000 && reg < 0x382000) || (reg >= 0x390000 && reg < 0x3a2000) || (reg >= 0x3a6000 && reg < 0x3b4000) || (reg >= 0x400000 && reg < 0x406000) || (reg >= 0x480000 && reg < 0x482000) || (reg >= 0x490000 && reg < 0x498000) || (reg >= 0x4a8000 && reg < 0x4aa000)) return true; else return false; } static bool wm5110_is_adsp_memory(struct device *dev, unsigned int reg) { struct arizona *arizona = dev_get_drvdata(dev); switch (arizona->rev) { case 0 ... 2: return wm5110_is_rev_b_adsp_memory(reg); default: return wm5110_is_rev_d_adsp_memory(reg); } } static bool wm5110_readable_register(struct device *dev, unsigned int reg) { switch (reg) { case ARIZONA_SOFTWARE_RESET: case ARIZONA_DEVICE_REVISION: case ARIZONA_CTRL_IF_SPI_CFG_1: case ARIZONA_CTRL_IF_I2C1_CFG_1: case ARIZONA_CTRL_IF_I2C2_CFG_1: case ARIZONA_CTRL_IF_I2C1_CFG_2: case ARIZONA_CTRL_IF_I2C2_CFG_2: case ARIZONA_WRITE_SEQUENCER_CTRL_0: case ARIZONA_WRITE_SEQUENCER_CTRL_1: case ARIZONA_WRITE_SEQUENCER_CTRL_2: case ARIZONA_TONE_GENERATOR_1: case ARIZONA_TONE_GENERATOR_2: case ARIZONA_TONE_GENERATOR_3: case ARIZONA_TONE_GENERATOR_4: case ARIZONA_TONE_GENERATOR_5: case ARIZONA_PWM_DRIVE_1: case ARIZONA_PWM_DRIVE_2: case ARIZONA_PWM_DRIVE_3: case ARIZONA_WAKE_CONTROL: case ARIZONA_SEQUENCE_CONTROL: case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1: case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2: case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3: case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3: case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4: case ARIZONA_COMFORT_NOISE_GENERATOR: case ARIZONA_HAPTICS_CONTROL_1: case ARIZONA_HAPTICS_CONTROL_2: case ARIZONA_HAPTICS_PHASE_1_INTENSITY: case ARIZONA_HAPTICS_PHASE_1_DURATION: case ARIZONA_HAPTICS_PHASE_2_INTENSITY: case ARIZONA_HAPTICS_PHASE_2_DURATION: case ARIZONA_HAPTICS_PHASE_3_INTENSITY: case ARIZONA_HAPTICS_PHASE_3_DURATION: case ARIZONA_HAPTICS_STATUS: case ARIZONA_CLOCK_32K_1: case ARIZONA_SYSTEM_CLOCK_1: case ARIZONA_SAMPLE_RATE_1: case ARIZONA_SAMPLE_RATE_2: case ARIZONA_SAMPLE_RATE_3: case ARIZONA_SAMPLE_RATE_1_STATUS: case ARIZONA_SAMPLE_RATE_2_STATUS: case ARIZONA_SAMPLE_RATE_3_STATUS: case ARIZONA_ASYNC_CLOCK_1: case ARIZONA_ASYNC_SAMPLE_RATE_1: case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS: case ARIZONA_OUTPUT_SYSTEM_CLOCK: case ARIZONA_OUTPUT_ASYNC_CLOCK: case ARIZONA_RATE_ESTIMATOR_1: case ARIZONA_RATE_ESTIMATOR_2: case ARIZONA_RATE_ESTIMATOR_3: case ARIZONA_RATE_ESTIMATOR_4: case ARIZONA_RATE_ESTIMATOR_5: case ARIZONA_FLL1_CONTROL_1: case ARIZONA_FLL1_CONTROL_2: case ARIZONA_FLL1_CONTROL_3: case ARIZONA_FLL1_CONTROL_4: case ARIZONA_FLL1_CONTROL_5: case ARIZONA_FLL1_CONTROL_6: case ARIZONA_FLL1_CONTROL_7: case ARIZONA_FLL1_LOOP_FILTER_TEST_1: case ARIZONA_FLL1_NCO_TEST_0: case ARIZONA_FLL1_SYNCHRONISER_1: case ARIZONA_FLL1_SYNCHRONISER_2: case ARIZONA_FLL1_SYNCHRONISER_3: case ARIZONA_FLL1_SYNCHRONISER_4: case ARIZONA_FLL1_SYNCHRONISER_5: case ARIZONA_FLL1_SYNCHRONISER_6: case ARIZONA_FLL1_SYNCHRONISER_7: case ARIZONA_FLL1_SPREAD_SPECTRUM: case ARIZONA_FLL1_GPIO_CLOCK: case ARIZONA_FLL2_CONTROL_1: case ARIZONA_FLL2_CONTROL_2: case ARIZONA_FLL2_CONTROL_3: case ARIZONA_FLL2_CONTROL_4: case ARIZONA_FLL2_CONTROL_5: case ARIZONA_FLL2_CONTROL_6: case ARIZONA_FLL2_CONTROL_7: case ARIZONA_FLL2_LOOP_FILTER_TEST_1: case ARIZONA_FLL2_NCO_TEST_0: case ARIZONA_FLL2_SYNCHRONISER_1: case ARIZONA_FLL2_SYNCHRONISER_2: case ARIZONA_FLL2_SYNCHRONISER_3: case ARIZONA_FLL2_SYNCHRONISER_4: case ARIZONA_FLL2_SYNCHRONISER_5: case ARIZONA_FLL2_SYNCHRONISER_6: case ARIZONA_FLL2_SYNCHRONISER_7: case ARIZONA_FLL2_SPREAD_SPECTRUM: case ARIZONA_FLL2_GPIO_CLOCK: case ARIZONA_MIC_CHARGE_PUMP_1: case ARIZONA_LDO1_CONTROL_1: case ARIZONA_LDO2_CONTROL_1: case ARIZONA_MIC_BIAS_CTRL_1: case ARIZONA_MIC_BIAS_CTRL_2: case ARIZONA_MIC_BIAS_CTRL_3: case ARIZONA_ACCESSORY_DETECT_MODE_1: case ARIZONA_HEADPHONE_DETECT_1: case ARIZONA_HEADPHONE_DETECT_2: case ARIZONA_MICD_CLAMP_CONTROL: case ARIZONA_MIC_DETECT_1: case ARIZONA_MIC_DETECT_2: case ARIZONA_MIC_DETECT_3: case ARIZONA_MIC_DETECT_LEVEL_1: case ARIZONA_MIC_DETECT_LEVEL_2: case ARIZONA_MIC_DETECT_LEVEL_3: case ARIZONA_MIC_DETECT_LEVEL_4: case ARIZONA_MIC_NOISE_MIX_CONTROL_1: case ARIZONA_JACK_DETECT_ANALOGUE: case ARIZONA_INPUT_ENABLES: case ARIZONA_INPUT_ENABLES_STATUS: case ARIZONA_INPUT_RATE: case ARIZONA_INPUT_VOLUME_RAMP: case ARIZONA_HPF_CONTROL: case ARIZONA_IN1L_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_1L: case ARIZONA_DMIC1L_CONTROL: case ARIZONA_IN1R_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_1R: case ARIZONA_DMIC1R_CONTROL: case ARIZONA_IN2L_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_2L: case ARIZONA_DMIC2L_CONTROL: case ARIZONA_IN2R_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_2R: case ARIZONA_DMIC2R_CONTROL: case ARIZONA_IN3L_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_3L: case ARIZONA_DMIC3L_CONTROL: case ARIZONA_IN3R_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_3R: case ARIZONA_DMIC3R_CONTROL: case ARIZONA_IN4L_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_4L: case ARIZONA_DMIC4L_CONTROL: case ARIZONA_IN4R_CONTROL: case ARIZONA_ADC_DIGITAL_VOLUME_4R: case ARIZONA_DMIC4R_CONTROL: case ARIZONA_OUTPUT_ENABLES_1: case ARIZONA_OUTPUT_STATUS_1: case ARIZONA_RAW_OUTPUT_STATUS_1: case ARIZONA_OUTPUT_RATE_1: case ARIZONA_OUTPUT_VOLUME_RAMP: case ARIZONA_OUTPUT_PATH_CONFIG_1L: case ARIZONA_DAC_DIGITAL_VOLUME_1L: case ARIZONA_DAC_VOLUME_LIMIT_1L: case ARIZONA_NOISE_GATE_SELECT_1L: case ARIZONA_OUTPUT_PATH_CONFIG_1R: case ARIZONA_DAC_DIGITAL_VOLUME_1R: case ARIZONA_DAC_VOLUME_LIMIT_1R: case ARIZONA_NOISE_GATE_SELECT_1R: case ARIZONA_OUTPUT_PATH_CONFIG_2L: case ARIZONA_DAC_DIGITAL_VOLUME_2L: case ARIZONA_DAC_VOLUME_LIMIT_2L: case ARIZONA_NOISE_GATE_SELECT_2L: case ARIZONA_OUTPUT_PATH_CONFIG_2R: case ARIZONA_DAC_DIGITAL_VOLUME_2R: case ARIZONA_DAC_VOLUME_LIMIT_2R: case ARIZONA_NOISE_GATE_SELECT_2R: case ARIZONA_OUTPUT_PATH_CONFIG_3L: case ARIZONA_DAC_DIGITAL_VOLUME_3L: case ARIZONA_DAC_VOLUME_LIMIT_3L: case ARIZONA_NOISE_GATE_SELECT_3L: case ARIZONA_OUTPUT_PATH_CONFIG_3R: case ARIZONA_DAC_DIGITAL_VOLUME_3R: case ARIZONA_DAC_VOLUME_LIMIT_3R: case ARIZONA_NOISE_GATE_SELECT_3R: case ARIZONA_OUTPUT_PATH_CONFIG_4L: case ARIZONA_DAC_DIGITAL_VOLUME_4L: case ARIZONA_OUT_VOLUME_4L: case ARIZONA_NOISE_GATE_SELECT_4L: case ARIZONA_OUTPUT_PATH_CONFIG_4R: case ARIZONA_DAC_DIGITAL_VOLUME_4R: case ARIZONA_OUT_VOLUME_4R: case ARIZONA_NOISE_GATE_SELECT_4R: case ARIZONA_OUTPUT_PATH_CONFIG_5L: case ARIZONA_DAC_DIGITAL_VOLUME_5L: case ARIZONA_DAC_VOLUME_LIMIT_5L: case ARIZONA_NOISE_GATE_SELECT_5L: case ARIZONA_OUTPUT_PATH_CONFIG_5R: case ARIZONA_DAC_DIGITAL_VOLUME_5R: case ARIZONA_DAC_VOLUME_LIMIT_5R: case ARIZONA_NOISE_GATE_SELECT_5R: case ARIZONA_OUTPUT_PATH_CONFIG_6L: case ARIZONA_DAC_DIGITAL_VOLUME_6L: case ARIZONA_DAC_VOLUME_LIMIT_6L: case ARIZONA_NOISE_GATE_SELECT_6L: case ARIZONA_OUTPUT_PATH_CONFIG_6R: case ARIZONA_DAC_DIGITAL_VOLUME_6R: case ARIZONA_DAC_VOLUME_LIMIT_6R: case ARIZONA_NOISE_GATE_SELECT_6R: case ARIZONA_DRE_ENABLE: case ARIZONA_DAC_AEC_CONTROL_1: case ARIZONA_NOISE_GATE_CONTROL: case ARIZONA_PDM_SPK1_CTRL_1: case ARIZONA_PDM_SPK1_CTRL_2: case ARIZONA_PDM_SPK2_CTRL_1: case ARIZONA_PDM_SPK2_CTRL_2: case ARIZONA_HP1_SHORT_CIRCUIT_CTRL: case ARIZONA_HP2_SHORT_CIRCUIT_CTRL: case ARIZONA_HP3_SHORT_CIRCUIT_CTRL: case ARIZONA_AIF1_BCLK_CTRL: case ARIZONA_AIF1_TX_PIN_CTRL: case ARIZONA_AIF1_RX_PIN_CTRL: case ARIZONA_AIF1_RATE_CTRL: case ARIZONA_AIF1_FORMAT: case ARIZONA_AIF1_TX_BCLK_RATE: case ARIZONA_AIF1_RX_BCLK_RATE: case ARIZONA_AIF1_FRAME_CTRL_1: case ARIZONA_AIF1_FRAME_CTRL_2: case ARIZONA_AIF1_FRAME_CTRL_3: case ARIZONA_AIF1_FRAME_CTRL_4: case ARIZONA_AIF1_FRAME_CTRL_5: case ARIZONA_AIF1_FRAME_CTRL_6: case ARIZONA_AIF1_FRAME_CTRL_7: case ARIZONA_AIF1_FRAME_CTRL_8: case ARIZONA_AIF1_FRAME_CTRL_9: case ARIZONA_AIF1_FRAME_CTRL_10: case ARIZONA_AIF1_FRAME_CTRL_11: case ARIZONA_AIF1_FRAME_CTRL_12: case ARIZONA_AIF1_FRAME_CTRL_13: case ARIZONA_AIF1_FRAME_CTRL_14: case ARIZONA_AIF1_FRAME_CTRL_15: case ARIZONA_AIF1_FRAME_CTRL_16: case ARIZONA_AIF1_FRAME_CTRL_17: case ARIZONA_AIF1_FRAME_CTRL_18: case ARIZONA_AIF1_TX_ENABLES: case ARIZONA_AIF1_RX_ENABLES: case ARIZONA_AIF2_BCLK_CTRL: case ARIZONA_AIF2_TX_PIN_CTRL: case ARIZONA_AIF2_RX_PIN_CTRL: case ARIZONA_AIF2_RATE_CTRL: case ARIZONA_AIF2_FORMAT: case ARIZONA_AIF2_TX_BCLK_RATE: case ARIZONA_AIF2_RX_BCLK_RATE: case ARIZONA_AIF2_FRAME_CTRL_1: case ARIZONA_AIF2_FRAME_CTRL_2: case ARIZONA_AIF2_FRAME_CTRL_3: case ARIZONA_AIF2_FRAME_CTRL_4: case ARIZONA_AIF2_FRAME_CTRL_11: case ARIZONA_AIF2_FRAME_CTRL_12: case ARIZONA_AIF2_TX_ENABLES: case ARIZONA_AIF2_RX_ENABLES: case ARIZONA_AIF3_BCLK_CTRL: case ARIZONA_AIF3_TX_PIN_CTRL: case ARIZONA_AIF3_RX_PIN_CTRL: case ARIZONA_AIF3_RATE_CTRL: case ARIZONA_AIF3_FORMAT: case ARIZONA_AIF3_TX_BCLK_RATE: case ARIZONA_AIF3_RX_BCLK_RATE: case ARIZONA_AIF3_FRAME_CTRL_1: case ARIZONA_AIF3_FRAME_CTRL_2: case ARIZONA_AIF3_FRAME_CTRL_3: case ARIZONA_AIF3_FRAME_CTRL_4: case ARIZONA_AIF3_FRAME_CTRL_11: case ARIZONA_AIF3_FRAME_CTRL_12: case ARIZONA_AIF3_TX_ENABLES: case ARIZONA_AIF3_RX_ENABLES: case ARIZONA_SLIMBUS_FRAMER_REF_GEAR: case ARIZONA_SLIMBUS_RATES_1: case ARIZONA_SLIMBUS_RATES_2: case ARIZONA_SLIMBUS_RATES_3: case ARIZONA_SLIMBUS_RATES_4: case ARIZONA_SLIMBUS_RATES_5: case ARIZONA_SLIMBUS_RATES_6: case ARIZONA_SLIMBUS_RATES_7: case ARIZONA_SLIMBUS_RATES_8: case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE: case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE: case ARIZONA_SLIMBUS_RX_PORT_STATUS: case ARIZONA_SLIMBUS_TX_PORT_STATUS: case ARIZONA_PWM1MIX_INPUT_1_SOURCE: case ARIZONA_PWM1MIX_INPUT_1_VOLUME: case ARIZONA_PWM1MIX_INPUT_2_SOURCE: case ARIZONA_PWM1MIX_INPUT_2_VOLUME: case ARIZONA_PWM1MIX_INPUT_3_SOURCE: case ARIZONA_PWM1MIX_INPUT_3_VOLUME: case ARIZONA_PWM1MIX_INPUT_4_SOURCE: case ARIZONA_PWM1MIX_INPUT_4_VOLUME: case ARIZONA_PWM2MIX_INPUT_1_SOURCE: case ARIZONA_PWM2MIX_INPUT_1_VOLUME: case ARIZONA_PWM2MIX_INPUT_2_SOURCE: case ARIZONA_PWM2MIX_INPUT_2_VOLUME: case ARIZONA_PWM2MIX_INPUT_3_SOURCE: case ARIZONA_PWM2MIX_INPUT_3_VOLUME: case ARIZONA_PWM2MIX_INPUT_4_SOURCE: case ARIZONA_PWM2MIX_INPUT_4_VOLUME: case ARIZONA_MICMIX_INPUT_1_SOURCE: case ARIZONA_MICMIX_INPUT_1_VOLUME: case ARIZONA_MICMIX_INPUT_2_SOURCE: case ARIZONA_MICMIX_INPUT_2_VOLUME: case ARIZONA_MICMIX_INPUT_3_SOURCE: case ARIZONA_MICMIX_INPUT_3_VOLUME: case ARIZONA_MICMIX_INPUT_4_SOURCE: case ARIZONA_MICMIX_INPUT_4_VOLUME: case ARIZONA_NOISEMIX_INPUT_1_SOURCE: case ARIZONA_NOISEMIX_INPUT_1_VOLUME: case ARIZONA_NOISEMIX_INPUT_2_SOURCE: case ARIZONA_NOISEMIX_INPUT_2_VOLUME: case ARIZONA_NOISEMIX_INPUT_3_SOURCE: case ARIZONA_NOISEMIX_INPUT_3_VOLUME: case ARIZONA_NOISEMIX_INPUT_4_SOURCE: case ARIZONA_NOISEMIX_INPUT_4_VOLUME: case ARIZONA_OUT1LMIX_INPUT_1_SOURCE: case ARIZONA_OUT1LMIX_INPUT_1_VOLUME: case ARIZONA_OUT1LMIX_INPUT_2_SOURCE: case ARIZONA_OUT1LMIX_INPUT_2_VOLUME: case ARIZONA_OUT1LMIX_INPUT_3_SOURCE: case ARIZONA_OUT1LMIX_INPUT_3_VOLUME: case ARIZONA_OUT1LMIX_INPUT_4_SOURCE: case ARIZONA_OUT1LMIX_INPUT_4_VOLUME: case ARIZONA_OUT1RMIX_INPUT_1_SOURCE: case ARIZONA_OUT1RMIX_INPUT_1_VOLUME: case ARIZONA_OUT1RMIX_INPUT_2_SOURCE: case ARIZONA_OUT1RMIX_INPUT_2_VOLUME: case ARIZONA_OUT1RMIX_INPUT_3_SOURCE: case ARIZONA_OUT1RMIX_INPUT_3_VOLUME: case ARIZONA_OUT1RMIX_INPUT_4_SOURCE: case ARIZONA_OUT1RMIX_INPUT_4_VOLUME: case ARIZONA_OUT2LMIX_INPUT_1_SOURCE: case ARIZONA_OUT2LMIX_INPUT_1_VOLUME: case ARIZONA_OUT2LMIX_INPUT_2_SOURCE: case ARIZONA_OUT2LMIX_INPUT_2_VOLUME: case ARIZONA_OUT2LMIX_INPUT_3_SOURCE: case ARIZONA_OUT2LMIX_INPUT_3_VOLUME: case ARIZONA_OUT2LMIX_INPUT_4_SOURCE: case ARIZONA_OUT2LMIX_INPUT_4_VOLUME: case ARIZONA_OUT2RMIX_INPUT_1_SOURCE: case ARIZONA_OUT2RMIX_INPUT_1_VOLUME: case ARIZONA_OUT2RMIX_INPUT_2_SOURCE: case ARIZONA_OUT2RMIX_INPUT_2_VOLUME: case ARIZONA_OUT2RMIX_INPUT_3_SOURCE: case ARIZONA_OUT2RMIX_INPUT_3_VOLUME: case ARIZONA_OUT2RMIX_INPUT_4_SOURCE: case ARIZONA_OUT2RMIX_INPUT_4_VOLUME: case ARIZONA_OUT3LMIX_INPUT_1_SOURCE: case ARIZONA_OUT3LMIX_INPUT_1_VOLUME: case ARIZONA_OUT3LMIX_INPUT_2_SOURCE: case ARIZONA_OUT3LMIX_INPUT_2_VOLUME: case ARIZONA_OUT3LMIX_INPUT_3_SOURCE: case ARIZONA_OUT3LMIX_INPUT_3_VOLUME: case ARIZONA_OUT3LMIX_INPUT_4_SOURCE: case ARIZONA_OUT3LMIX_INPUT_4_VOLUME: case ARIZONA_OUT3RMIX_INPUT_1_SOURCE: case ARIZONA_OUT3RMIX_INPUT_1_VOLUME: case ARIZONA_OUT3RMIX_INPUT_2_SOURCE: case ARIZONA_OUT3RMIX_INPUT_2_VOLUME: case ARIZONA_OUT3RMIX_INPUT_3_SOURCE: case ARIZONA_OUT3RMIX_INPUT_3_VOLUME: case ARIZONA_OUT3RMIX_INPUT_4_SOURCE: case ARIZONA_OUT3RMIX_INPUT_4_VOLUME: case ARIZONA_OUT4LMIX_INPUT_1_SOURCE: case ARIZONA_OUT4LMIX_INPUT_1_VOLUME: case ARIZONA_OUT4LMIX_INPUT_2_SOURCE: case ARIZONA_OUT4LMIX_INPUT_2_VOLUME: case ARIZONA_OUT4LMIX_INPUT_3_SOURCE: case ARIZONA_OUT4LMIX_INPUT_3_VOLUME: case ARIZONA_OUT4LMIX_INPUT_4_SOURCE: case ARIZONA_OUT4LMIX_INPUT_4_VOLUME: case ARIZONA_OUT4RMIX_INPUT_1_SOURCE: case ARIZONA_OUT4RMIX_INPUT_1_VOLUME: case ARIZONA_OUT4RMIX_INPUT_2_SOURCE: case ARIZONA_OUT4RMIX_INPUT_2_VOLUME: case ARIZONA_OUT4RMIX_INPUT_3_SOURCE: case ARIZONA_OUT4RMIX_INPUT_3_VOLUME: case ARIZONA_OUT4RMIX_INPUT_4_SOURCE: case ARIZONA_OUT4RMIX_INPUT_4_VOLUME: case ARIZONA_OUT5LMIX_INPUT_1_SOURCE: case ARIZONA_OUT5LMIX_INPUT_1_VOLUME: case ARIZONA_OUT5LMIX_INPUT_2_SOURCE: case ARIZONA_OUT5LMIX_INPUT_2_VOLUME: case ARIZONA_OUT5LMIX_INPUT_3_SOURCE: case ARIZONA_OUT5LMIX_INPUT_3_VOLUME: case ARIZONA_OUT5LMIX_INPUT_4_SOURCE: case ARIZONA_OUT5LMIX_INPUT_4_VOLUME: case ARIZONA_OUT5RMIX_INPUT_1_SOURCE: case ARIZONA_OUT5RMIX_INPUT_1_VOLUME: case ARIZONA_OUT5RMIX_INPUT_2_SOURCE: case ARIZONA_OUT5RMIX_INPUT_2_VOLUME: case ARIZONA_OUT5RMIX_INPUT_3_SOURCE: case ARIZONA_OUT5RMIX_INPUT_3_VOLUME: case ARIZONA_OUT5RMIX_INPUT_4_SOURCE: case ARIZONA_OUT5RMIX_INPUT_4_VOLUME: case ARIZONA_OUT6LMIX_INPUT_1_SOURCE: case ARIZONA_OUT6LMIX_INPUT_1_VOLUME: case ARIZONA_OUT6LMIX_INPUT_2_SOURCE: case ARIZONA_OUT6LMIX_INPUT_2_VOLUME: case ARIZONA_OUT6LMIX_INPUT_3_SOURCE: case ARIZONA_OUT6LMIX_INPUT_3_VOLUME: case ARIZONA_OUT6LMIX_INPUT_4_SOURCE: case ARIZONA_OUT6LMIX_INPUT_4_VOLUME: case ARIZONA_OUT6RMIX_INPUT_1_SOURCE: case ARIZONA_OUT6RMIX_INPUT_1_VOLUME: case ARIZONA_OUT6RMIX_INPUT_2_SOURCE: case ARIZONA_OUT6RMIX_INPUT_2_VOLUME: case ARIZONA_OUT6RMIX_INPUT_3_SOURCE: case ARIZONA_OUT6RMIX_INPUT_3_VOLUME: case ARIZONA_OUT6RMIX_INPUT_4_SOURCE: case ARIZONA_OUT6RMIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME: case ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE: case ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME: case ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE: case ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME: case ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE: case ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME: case ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE: case ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME: case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE: case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME: case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE: case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME: case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE: case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME: case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE: case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME: case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE: case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME: case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE: case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME: case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE: case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME: case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE: case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME: case ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE: case ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME: case ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE: case ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME: case ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE: case ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME: case ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE: case ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME: case ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE: case ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME: case ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE: case ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME: case ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE: case ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME: case ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE: case ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME: case ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE: case ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME: case ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE: case ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME: case ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE: case ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME: case ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE: case ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME: case ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE: case ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME: case ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE: case ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME: case ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE: case ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME: case ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE: case ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME: case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE: case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME: case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE: case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME: case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE: case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME: case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE: case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME: case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE: case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME: case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE: case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME: case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE: case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME: case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE: case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME: case ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE: case ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME: case ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE: case ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME: case ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE: case ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME: case ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE: case ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME: case ARIZONA_EQ1MIX_INPUT_1_SOURCE: case ARIZONA_EQ1MIX_INPUT_1_VOLUME: case ARIZONA_EQ1MIX_INPUT_2_SOURCE: case ARIZONA_EQ1MIX_INPUT_2_VOLUME: case ARIZONA_EQ1MIX_INPUT_3_SOURCE: case ARIZONA_EQ1MIX_INPUT_3_VOLUME: case ARIZONA_EQ1MIX_INPUT_4_SOURCE: case ARIZONA_EQ1MIX_INPUT_4_VOLUME: case ARIZONA_EQ2MIX_INPUT_1_SOURCE: case ARIZONA_EQ2MIX_INPUT_1_VOLUME: case ARIZONA_EQ2MIX_INPUT_2_SOURCE: case ARIZONA_EQ2MIX_INPUT_2_VOLUME: case ARIZONA_EQ2MIX_INPUT_3_SOURCE: case ARIZONA_EQ2MIX_INPUT_3_VOLUME: case ARIZONA_EQ2MIX_INPUT_4_SOURCE: case ARIZONA_EQ2MIX_INPUT_4_VOLUME: case ARIZONA_EQ3MIX_INPUT_1_SOURCE: case ARIZONA_EQ3MIX_INPUT_1_VOLUME: case ARIZONA_EQ3MIX_INPUT_2_SOURCE: case ARIZONA_EQ3MIX_INPUT_2_VOLUME: case ARIZONA_EQ3MIX_INPUT_3_SOURCE: case ARIZONA_EQ3MIX_INPUT_3_VOLUME: case ARIZONA_EQ3MIX_INPUT_4_SOURCE: case ARIZONA_EQ3MIX_INPUT_4_VOLUME: case ARIZONA_EQ4MIX_INPUT_1_SOURCE: case ARIZONA_EQ4MIX_INPUT_1_VOLUME: case ARIZONA_EQ4MIX_INPUT_2_SOURCE: case ARIZONA_EQ4MIX_INPUT_2_VOLUME: case ARIZONA_EQ4MIX_INPUT_3_SOURCE: case ARIZONA_EQ4MIX_INPUT_3_VOLUME: case ARIZONA_EQ4MIX_INPUT_4_SOURCE: case ARIZONA_EQ4MIX_INPUT_4_VOLUME: case ARIZONA_DRC1LMIX_INPUT_1_SOURCE: case ARIZONA_DRC1LMIX_INPUT_1_VOLUME: case ARIZONA_DRC1LMIX_INPUT_2_SOURCE: case ARIZONA_DRC1LMIX_INPUT_2_VOLUME: case ARIZONA_DRC1LMIX_INPUT_3_SOURCE: case ARIZONA_DRC1LMIX_INPUT_3_VOLUME: case ARIZONA_DRC1LMIX_INPUT_4_SOURCE: case ARIZONA_DRC1LMIX_INPUT_4_VOLUME: case ARIZONA_DRC1RMIX_INPUT_1_SOURCE: case ARIZONA_DRC1RMIX_INPUT_1_VOLUME: case ARIZONA_DRC1RMIX_INPUT_2_SOURCE: case ARIZONA_DRC1RMIX_INPUT_2_VOLUME: case ARIZONA_DRC1RMIX_INPUT_3_SOURCE: case ARIZONA_DRC1RMIX_INPUT_3_VOLUME: case ARIZONA_DRC1RMIX_INPUT_4_SOURCE: case ARIZONA_DRC1RMIX_INPUT_4_VOLUME: case ARIZONA_DRC2LMIX_INPUT_1_SOURCE: case ARIZONA_DRC2LMIX_INPUT_1_VOLUME: case ARIZONA_DRC2LMIX_INPUT_2_SOURCE: case ARIZONA_DRC2LMIX_INPUT_2_VOLUME: case ARIZONA_DRC2LMIX_INPUT_3_SOURCE: case ARIZONA_DRC2LMIX_INPUT_3_VOLUME: case ARIZONA_DRC2LMIX_INPUT_4_SOURCE: case ARIZONA_DRC2LMIX_INPUT_4_VOLUME: case ARIZONA_DRC2RMIX_INPUT_1_SOURCE: case ARIZONA_DRC2RMIX_INPUT_1_VOLUME: case ARIZONA_DRC2RMIX_INPUT_2_SOURCE: case ARIZONA_DRC2RMIX_INPUT_2_VOLUME: case ARIZONA_DRC2RMIX_INPUT_3_SOURCE: case ARIZONA_DRC2RMIX_INPUT_3_VOLUME: case ARIZONA_DRC2RMIX_INPUT_4_SOURCE: case ARIZONA_DRC2RMIX_INPUT_4_VOLUME: case ARIZONA_HPLP1MIX_INPUT_1_SOURCE: case ARIZONA_HPLP1MIX_INPUT_1_VOLUME: case ARIZONA_HPLP1MIX_INPUT_2_SOURCE: case ARIZONA_HPLP1MIX_INPUT_2_VOLUME: case ARIZONA_HPLP1MIX_INPUT_3_SOURCE: case ARIZONA_HPLP1MIX_INPUT_3_VOLUME: case ARIZONA_HPLP1MIX_INPUT_4_SOURCE: case ARIZONA_HPLP1MIX_INPUT_4_VOLUME: case ARIZONA_HPLP2MIX_INPUT_1_SOURCE: case ARIZONA_HPLP2MIX_INPUT_1_VOLUME: case ARIZONA_HPLP2MIX_INPUT_2_SOURCE: case ARIZONA_HPLP2MIX_INPUT_2_VOLUME: case ARIZONA_HPLP2MIX_INPUT_3_SOURCE: case ARIZONA_HPLP2MIX_INPUT_3_VOLUME: case ARIZONA_HPLP2MIX_INPUT_4_SOURCE: case ARIZONA_HPLP2MIX_INPUT_4_VOLUME: case ARIZONA_HPLP3MIX_INPUT_1_SOURCE: case ARIZONA_HPLP3MIX_INPUT_1_VOLUME: case ARIZONA_HPLP3MIX_INPUT_2_SOURCE: case ARIZONA_HPLP3MIX_INPUT_2_VOLUME: case ARIZONA_HPLP3MIX_INPUT_3_SOURCE: case ARIZONA_HPLP3MIX_INPUT_3_VOLUME: case ARIZONA_HPLP3MIX_INPUT_4_SOURCE: case ARIZONA_HPLP3MIX_INPUT_4_VOLUME: case ARIZONA_HPLP4MIX_INPUT_1_SOURCE: case ARIZONA_HPLP4MIX_INPUT_1_VOLUME: case ARIZONA_HPLP4MIX_INPUT_2_SOURCE: case ARIZONA_HPLP4MIX_INPUT_2_VOLUME: case ARIZONA_HPLP4MIX_INPUT_3_SOURCE: case ARIZONA_HPLP4MIX_INPUT_3_VOLUME: case ARIZONA_HPLP4MIX_INPUT_4_SOURCE: case ARIZONA_HPLP4MIX_INPUT_4_VOLUME: case ARIZONA_DSP1LMIX_INPUT_1_SOURCE: case ARIZONA_DSP1LMIX_INPUT_1_VOLUME: case ARIZONA_DSP1LMIX_INPUT_2_SOURCE: case ARIZONA_DSP1LMIX_INPUT_2_VOLUME: case ARIZONA_DSP1LMIX_INPUT_3_SOURCE: case ARIZONA_DSP1LMIX_INPUT_3_VOLUME: case ARIZONA_DSP1LMIX_INPUT_4_SOURCE: case ARIZONA_DSP1LMIX_INPUT_4_VOLUME: case ARIZONA_DSP1RMIX_INPUT_1_SOURCE: case ARIZONA_DSP1RMIX_INPUT_1_VOLUME: case ARIZONA_DSP1RMIX_INPUT_2_SOURCE: case ARIZONA_DSP1RMIX_INPUT_2_VOLUME: case ARIZONA_DSP1RMIX_INPUT_3_SOURCE: case ARIZONA_DSP1RMIX_INPUT_3_VOLUME: case ARIZONA_DSP1RMIX_INPUT_4_SOURCE: case ARIZONA_DSP1RMIX_INPUT_4_VOLUME: case ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE: case ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE: case ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE: case ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE: case ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE: case ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE: case ARIZONA_DSP2LMIX_INPUT_1_SOURCE: case ARIZONA_DSP2LMIX_INPUT_1_VOLUME: case ARIZONA_DSP2LMIX_INPUT_2_SOURCE: case ARIZONA_DSP2LMIX_INPUT_2_VOLUME: case ARIZONA_DSP2LMIX_INPUT_3_SOURCE: case ARIZONA_DSP2LMIX_INPUT_3_VOLUME: case ARIZONA_DSP2LMIX_INPUT_4_SOURCE: case ARIZONA_DSP2LMIX_INPUT_4_VOLUME: case ARIZONA_DSP2RMIX_INPUT_1_SOURCE: case ARIZONA_DSP2RMIX_INPUT_1_VOLUME: case ARIZONA_DSP2RMIX_INPUT_2_SOURCE: case ARIZONA_DSP2RMIX_INPUT_2_VOLUME: case ARIZONA_DSP2RMIX_INPUT_3_SOURCE: case ARIZONA_DSP2RMIX_INPUT_3_VOLUME: case ARIZONA_DSP2RMIX_INPUT_4_SOURCE: case ARIZONA_DSP2RMIX_INPUT_4_VOLUME: case ARIZONA_DSP2AUX1MIX_INPUT_1_SOURCE: case ARIZONA_DSP2AUX2MIX_INPUT_1_SOURCE: case ARIZONA_DSP2AUX3MIX_INPUT_1_SOURCE: case ARIZONA_DSP2AUX4MIX_INPUT_1_SOURCE: case ARIZONA_DSP2AUX5MIX_INPUT_1_SOURCE: case ARIZONA_DSP2AUX6MIX_INPUT_1_SOURCE: case ARIZONA_DSP3LMIX_INPUT_1_SOURCE: case ARIZONA_DSP3LMIX_INPUT_1_VOLUME: case ARIZONA_DSP3LMIX_INPUT_2_SOURCE: case ARIZONA_DSP3LMIX_INPUT_2_VOLUME: case ARIZONA_DSP3LMIX_INPUT_3_SOURCE: case ARIZONA_DSP3LMIX_INPUT_3_VOLUME: case ARIZONA_DSP3LMIX_INPUT_4_SOURCE: case ARIZONA_DSP3LMIX_INPUT_4_VOLUME: case ARIZONA_DSP3RMIX_INPUT_1_SOURCE: case ARIZONA_DSP3RMIX_INPUT_1_VOLUME: case ARIZONA_DSP3RMIX_INPUT_2_SOURCE: case ARIZONA_DSP3RMIX_INPUT_2_VOLUME: case ARIZONA_DSP3RMIX_INPUT_3_SOURCE: case ARIZONA_DSP3RMIX_INPUT_3_VOLUME: case ARIZONA_DSP3RMIX_INPUT_4_SOURCE: case ARIZONA_DSP3RMIX_INPUT_4_VOLUME: case ARIZONA_DSP3AUX1MIX_INPUT_1_SOURCE: case ARIZONA_DSP3AUX2MIX_INPUT_1_SOURCE: case ARIZONA_DSP3AUX3MIX_INPUT_1_SOURCE: case ARIZONA_DSP3AUX4MIX_INPUT_1_SOURCE: case ARIZONA_DSP3AUX5MIX_INPUT_1_SOURCE: case ARIZONA_DSP3AUX6MIX_INPUT_1_SOURCE: case ARIZONA_DSP4LMIX_INPUT_1_SOURCE: case ARIZONA_DSP4LMIX_INPUT_1_VOLUME: case ARIZONA_DSP4LMIX_INPUT_2_SOURCE: case ARIZONA_DSP4LMIX_INPUT_2_VOLUME: case ARIZONA_DSP4LMIX_INPUT_3_SOURCE: case ARIZONA_DSP4LMIX_INPUT_3_VOLUME: case ARIZONA_DSP4LMIX_INPUT_4_SOURCE: case ARIZONA_DSP4LMIX_INPUT_4_VOLUME: case ARIZONA_DSP4RMIX_INPUT_1_SOURCE: case ARIZONA_DSP4RMIX_INPUT_1_VOLUME: case ARIZONA_DSP4RMIX_INPUT_2_SOURCE: case ARIZONA_DSP4RMIX_INPUT_2_VOLUME: case ARIZONA_DSP4RMIX_INPUT_3_SOURCE: case ARIZONA_DSP4RMIX_INPUT_3_VOLUME: case ARIZONA_DSP4RMIX_INPUT_4_SOURCE: case ARIZONA_DSP4RMIX_INPUT_4_VOLUME: case ARIZONA_DSP4AUX1MIX_INPUT_1_SOURCE: case ARIZONA_DSP4AUX2MIX_INPUT_1_SOURCE: case ARIZONA_DSP4AUX3MIX_INPUT_1_SOURCE: case ARIZONA_DSP4AUX4MIX_INPUT_1_SOURCE: case ARIZONA_DSP4AUX5MIX_INPUT_1_SOURCE: case ARIZONA_DSP4AUX6MIX_INPUT_1_SOURCE: case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE: case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE: case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE: case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE: case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE: case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE: case ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE: case ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE: case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE: case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE: case ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE: case ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2INT3MIX_INPUT_1_SOURCE: case ARIZONA_ISRC2INT4MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3DEC1MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3DEC2MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3DEC3MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3DEC4MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3INT1MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3INT2MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3INT3MIX_INPUT_1_SOURCE: case ARIZONA_ISRC3INT4MIX_INPUT_1_SOURCE: case ARIZONA_GPIO1_CTRL: case ARIZONA_GPIO2_CTRL: case ARIZONA_GPIO3_CTRL: case ARIZONA_GPIO4_CTRL: case ARIZONA_GPIO5_CTRL: case ARIZONA_IRQ_CTRL_1: case ARIZONA_GPIO_DEBOUNCE_CONFIG: case ARIZONA_MISC_PAD_CTRL_1: case ARIZONA_MISC_PAD_CTRL_2: case ARIZONA_MISC_PAD_CTRL_3: case ARIZONA_MISC_PAD_CTRL_4: case ARIZONA_MISC_PAD_CTRL_5: case ARIZONA_MISC_PAD_CTRL_6: case ARIZONA_MISC_PAD_CTRL_7: case ARIZONA_MISC_PAD_CTRL_8: case ARIZONA_MISC_PAD_CTRL_9: case ARIZONA_MISC_PAD_CTRL_10: case ARIZONA_MISC_PAD_CTRL_11: case ARIZONA_MISC_PAD_CTRL_12: case ARIZONA_MISC_PAD_CTRL_13: case ARIZONA_MISC_PAD_CTRL_14: case ARIZONA_MISC_PAD_CTRL_15: case ARIZONA_MISC_PAD_CTRL_16: case ARIZONA_MISC_PAD_CTRL_17: case ARIZONA_MISC_PAD_CTRL_18: case ARIZONA_INTERRUPT_STATUS_1: case ARIZONA_INTERRUPT_STATUS_2: case ARIZONA_INTERRUPT_STATUS_3: case ARIZONA_INTERRUPT_STATUS_4: case ARIZONA_INTERRUPT_STATUS_5: case ARIZONA_INTERRUPT_STATUS_1_MASK: case ARIZONA_INTERRUPT_STATUS_2_MASK: case ARIZONA_INTERRUPT_STATUS_3_MASK: case ARIZONA_INTERRUPT_STATUS_4_MASK: case ARIZONA_INTERRUPT_STATUS_5_MASK: case ARIZONA_INTERRUPT_CONTROL: case ARIZONA_IRQ2_STATUS_1: case ARIZONA_IRQ2_STATUS_2: case ARIZONA_IRQ2_STATUS_3: case ARIZONA_IRQ2_STATUS_4: case ARIZONA_IRQ2_STATUS_5: case ARIZONA_IRQ2_STATUS_1_MASK: case ARIZONA_IRQ2_STATUS_2_MASK: case ARIZONA_IRQ2_STATUS_3_MASK: case ARIZONA_IRQ2_STATUS_4_MASK: case ARIZONA_IRQ2_STATUS_5_MASK: case ARIZONA_IRQ2_CONTROL: case ARIZONA_INTERRUPT_RAW_STATUS_2: case ARIZONA_INTERRUPT_RAW_STATUS_3: case ARIZONA_INTERRUPT_RAW_STATUS_4: case ARIZONA_INTERRUPT_RAW_STATUS_5: case ARIZONA_INTERRUPT_RAW_STATUS_6: case ARIZONA_INTERRUPT_RAW_STATUS_7: case ARIZONA_INTERRUPT_RAW_STATUS_8: case ARIZONA_IRQ_PIN_STATUS: case ARIZONA_AOD_WKUP_AND_TRIG: case ARIZONA_AOD_IRQ1: case ARIZONA_AOD_IRQ2: case ARIZONA_AOD_IRQ_MASK_IRQ1: case ARIZONA_AOD_IRQ_MASK_IRQ2: case ARIZONA_AOD_IRQ_RAW_STATUS: case ARIZONA_JACK_DETECT_DEBOUNCE: case ARIZONA_FX_CTRL1: case ARIZONA_FX_CTRL2: case ARIZONA_EQ1_1: case ARIZONA_EQ1_2: case ARIZONA_EQ1_3: case ARIZONA_EQ1_4: case ARIZONA_EQ1_5: case ARIZONA_EQ1_6: case ARIZONA_EQ1_7: case ARIZONA_EQ1_8: case ARIZONA_EQ1_9: case ARIZONA_EQ1_10: case ARIZONA_EQ1_11: case ARIZONA_EQ1_12: case ARIZONA_EQ1_13: case ARIZONA_EQ1_14: case ARIZONA_EQ1_15: case ARIZONA_EQ1_16: case ARIZONA_EQ1_17: case ARIZONA_EQ1_18: case ARIZONA_EQ1_19: case ARIZONA_EQ1_20: case ARIZONA_EQ1_21: case ARIZONA_EQ2_1: case ARIZONA_EQ2_2: case ARIZONA_EQ2_3: case ARIZONA_EQ2_4: case ARIZONA_EQ2_5: case ARIZONA_EQ2_6: case ARIZONA_EQ2_7: case ARIZONA_EQ2_8: case ARIZONA_EQ2_9: case ARIZONA_EQ2_10: case ARIZONA_EQ2_11: case ARIZONA_EQ2_12: case ARIZONA_EQ2_13: case ARIZONA_EQ2_14: case ARIZONA_EQ2_15: case ARIZONA_EQ2_16: case ARIZONA_EQ2_17: case ARIZONA_EQ2_18: case ARIZONA_EQ2_19: case ARIZONA_EQ2_20: case ARIZONA_EQ2_21: case ARIZONA_EQ3_1: case ARIZONA_EQ3_2: case ARIZONA_EQ3_3: case ARIZONA_EQ3_4: case ARIZONA_EQ3_5: case ARIZONA_EQ3_6: case ARIZONA_EQ3_7: case ARIZONA_EQ3_8: case ARIZONA_EQ3_9: case ARIZONA_EQ3_10: case ARIZONA_EQ3_11: case ARIZONA_EQ3_12: case ARIZONA_EQ3_13: case ARIZONA_EQ3_14: case ARIZONA_EQ3_15: case ARIZONA_EQ3_16: case ARIZONA_EQ3_17: case ARIZONA_EQ3_18: case ARIZONA_EQ3_19: case ARIZONA_EQ3_20: case ARIZONA_EQ3_21: case ARIZONA_EQ4_1: case ARIZONA_EQ4_2: case ARIZONA_EQ4_3: case ARIZONA_EQ4_4: case ARIZONA_EQ4_5: case ARIZONA_EQ4_6: case ARIZONA_EQ4_7: case ARIZONA_EQ4_8: case ARIZONA_EQ4_9: case ARIZONA_EQ4_10: case ARIZONA_EQ4_11: case ARIZONA_EQ4_12: case ARIZONA_EQ4_13: case ARIZONA_EQ4_14: case ARIZONA_EQ4_15: case ARIZONA_EQ4_16: case ARIZONA_EQ4_17: case ARIZONA_EQ4_18: case ARIZONA_EQ4_19: case ARIZONA_EQ4_20: case ARIZONA_EQ4_21: case ARIZONA_DRC1_CTRL1: case ARIZONA_DRC1_CTRL2: case ARIZONA_DRC1_CTRL3: case ARIZONA_DRC1_CTRL4: case ARIZONA_DRC1_CTRL5: case ARIZONA_DRC2_CTRL1: case ARIZONA_DRC2_CTRL2: case ARIZONA_DRC2_CTRL3: case ARIZONA_DRC2_CTRL4: case ARIZONA_DRC2_CTRL5: case ARIZONA_HPLPF1_1: case ARIZONA_HPLPF1_2: case ARIZONA_HPLPF2_1: case ARIZONA_HPLPF2_2: case ARIZONA_HPLPF3_1: case ARIZONA_HPLPF3_2: case ARIZONA_HPLPF4_1: case ARIZONA_HPLPF4_2: case ARIZONA_ASRC_ENABLE: case ARIZONA_ASRC_STATUS: case ARIZONA_ASRC_RATE1: case ARIZONA_ISRC_1_CTRL_1: case ARIZONA_ISRC_1_CTRL_2: case ARIZONA_ISRC_1_CTRL_3: case ARIZONA_ISRC_2_CTRL_1: case ARIZONA_ISRC_2_CTRL_2: case ARIZONA_ISRC_2_CTRL_3: case ARIZONA_ISRC_3_CTRL_1: case ARIZONA_ISRC_3_CTRL_2: case ARIZONA_ISRC_3_CTRL_3: case ARIZONA_CLOCK_CONTROL: case ARIZONA_ANC_SRC: case ARIZONA_DSP_STATUS: case ARIZONA_DSP1_CONTROL_1: case ARIZONA_DSP1_CLOCKING_1: case ARIZONA_DSP1_STATUS_1: case ARIZONA_DSP1_STATUS_2: case ARIZONA_DSP1_STATUS_3: case ARIZONA_DSP1_SCRATCH_0: case ARIZONA_DSP1_SCRATCH_1: case ARIZONA_DSP1_SCRATCH_2: case ARIZONA_DSP1_SCRATCH_3: case ARIZONA_DSP2_CONTROL_1: case ARIZONA_DSP2_CLOCKING_1: case ARIZONA_DSP2_STATUS_1: case ARIZONA_DSP2_STATUS_2: case ARIZONA_DSP2_STATUS_3: case ARIZONA_DSP2_SCRATCH_0: case ARIZONA_DSP2_SCRATCH_1: case ARIZONA_DSP2_SCRATCH_2: case ARIZONA_DSP2_SCRATCH_3: case ARIZONA_DSP3_CONTROL_1: case ARIZONA_DSP3_CLOCKING_1: case ARIZONA_DSP3_STATUS_1: case ARIZONA_DSP3_STATUS_2: case ARIZONA_DSP3_STATUS_3: case ARIZONA_DSP3_SCRATCH_0: case ARIZONA_DSP3_SCRATCH_1: case ARIZONA_DSP3_SCRATCH_2: case ARIZONA_DSP3_SCRATCH_3: case ARIZONA_DSP4_CONTROL_1: case ARIZONA_DSP4_CLOCKING_1: case ARIZONA_DSP4_STATUS_1: case ARIZONA_DSP4_STATUS_2: case ARIZONA_DSP4_STATUS_3: case ARIZONA_DSP4_SCRATCH_0: case ARIZONA_DSP4_SCRATCH_1: case ARIZONA_DSP4_SCRATCH_2: case ARIZONA_DSP4_SCRATCH_3: return true; default: return wm5110_is_adsp_memory(dev, reg); } } static bool wm5110_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case ARIZONA_SOFTWARE_RESET: case ARIZONA_DEVICE_REVISION: case ARIZONA_HAPTICS_STATUS: case ARIZONA_SAMPLE_RATE_1_STATUS: case ARIZONA_SAMPLE_RATE_2_STATUS: case ARIZONA_SAMPLE_RATE_3_STATUS: case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS: case ARIZONA_MIC_DETECT_3: case ARIZONA_HEADPHONE_DETECT_2: case ARIZONA_INPUT_ENABLES_STATUS: case ARIZONA_OUTPUT_STATUS_1: case ARIZONA_RAW_OUTPUT_STATUS_1: case ARIZONA_SLIMBUS_RX_PORT_STATUS: case ARIZONA_SLIMBUS_TX_PORT_STATUS: case ARIZONA_INTERRUPT_STATUS_1: case ARIZONA_INTERRUPT_STATUS_2: case ARIZONA_INTERRUPT_STATUS_3: case ARIZONA_INTERRUPT_STATUS_4: case ARIZONA_INTERRUPT_STATUS_5: case ARIZONA_IRQ2_STATUS_1: case ARIZONA_IRQ2_STATUS_2: case ARIZONA_IRQ2_STATUS_3: case ARIZONA_IRQ2_STATUS_4: case ARIZONA_IRQ2_STATUS_5: case ARIZONA_INTERRUPT_RAW_STATUS_2: case ARIZONA_INTERRUPT_RAW_STATUS_3: case ARIZONA_INTERRUPT_RAW_STATUS_4: case ARIZONA_INTERRUPT_RAW_STATUS_5: case ARIZONA_INTERRUPT_RAW_STATUS_6: case ARIZONA_INTERRUPT_RAW_STATUS_7: case ARIZONA_INTERRUPT_RAW_STATUS_8: case ARIZONA_IRQ_PIN_STATUS: case ARIZONA_AOD_WKUP_AND_TRIG: case ARIZONA_AOD_IRQ1: case ARIZONA_AOD_IRQ2: case ARIZONA_AOD_IRQ_RAW_STATUS: case ARIZONA_FX_CTRL2: case ARIZONA_ASRC_STATUS: case ARIZONA_DSP_STATUS: case ARIZONA_DSP1_STATUS_1: case ARIZONA_DSP1_STATUS_2: case ARIZONA_DSP1_STATUS_3: case ARIZONA_DSP1_SCRATCH_0: case ARIZONA_DSP1_SCRATCH_1: case ARIZONA_DSP1_SCRATCH_2: case ARIZONA_DSP1_SCRATCH_3: case ARIZONA_DSP2_STATUS_1: case ARIZONA_DSP2_STATUS_2: case ARIZONA_DSP2_STATUS_3: case ARIZONA_DSP2_SCRATCH_0: case ARIZONA_DSP2_SCRATCH_1: case ARIZONA_DSP2_SCRATCH_2: case ARIZONA_DSP2_SCRATCH_3: case ARIZONA_DSP3_STATUS_1: case ARIZONA_DSP3_STATUS_2: case ARIZONA_DSP3_STATUS_3: case ARIZONA_DSP3_SCRATCH_0: case ARIZONA_DSP3_SCRATCH_1: case ARIZONA_DSP3_SCRATCH_2: case ARIZONA_DSP3_SCRATCH_3: case ARIZONA_DSP4_STATUS_1: case ARIZONA_DSP4_STATUS_2: case ARIZONA_DSP4_STATUS_3: case ARIZONA_DSP4_SCRATCH_0: case ARIZONA_DSP4_SCRATCH_1: case ARIZONA_DSP4_SCRATCH_2: case ARIZONA_DSP4_SCRATCH_3: return true; default: return wm5110_is_adsp_memory(dev, reg); } } #define WM5110_MAX_REGISTER 0x4a9fff const struct regmap_config wm5110_spi_regmap = { .reg_bits = 32, .pad_bits = 16, .val_bits = 16, .max_register = WM5110_MAX_REGISTER, .readable_reg = wm5110_readable_register, .volatile_reg = wm5110_volatile_register, .cache_type = REGCACHE_RBTREE, .reg_defaults = wm5110_reg_default, .num_reg_defaults = ARRAY_SIZE(wm5110_reg_default), }; EXPORT_SYMBOL_GPL(wm5110_spi_regmap); const struct regmap_config wm5110_i2c_regmap = { .reg_bits = 32, .val_bits = 16, .max_register = WM5110_MAX_REGISTER, .readable_reg = wm5110_readable_register, .volatile_reg = wm5110_volatile_register, .cache_type = REGCACHE_RBTREE, .reg_defaults = wm5110_reg_default, .num_reg_defaults = ARRAY_SIZE(wm5110_reg_default), }; EXPORT_SYMBOL_GPL(wm5110_i2c_regmap);
gpl-2.0
cogsy23/linux-hg
drivers/ata/ata_piix.c
243
51319
/* * ata_piix.c - Intel PATA/SATA controllers * * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * * Copyright 2003-2005 Red Hat Inc * Copyright 2003-2005 Jeff Garzik * * * Copyright header from piix.c: * * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat Inc * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available at http://developer.intel.com/ * * Documentation * Publicly available from Intel web site. Errata documentation * is also publicly available. As an aide to anyone hacking on this * driver the list of errata that are relevant is below, going back to * PIIX4. Older device documentation is now a bit tricky to find. * * The chipsets all follow very much the same design. The original Triton * series chipsets do _not_ support independent device timings, but this * is fixed in Triton II. With the odd mobile exception the chips then * change little except in gaining more modes until SATA arrives. This * driver supports only the chips with independent timing (that is those * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix * for the early chip drivers. * * Errata of note: * * Unfixable * PIIX4 errata #9 - Only on ultra obscure hw * ICH3 errata #13 - Not observed to affect real hw * by Intel * * Things we must deal with * PIIX4 errata #10 - BM IDE hang with non UDMA * (must stop/start dma to recover) * 440MX errata #15 - As PIIX4 errata #10 * PIIX4 errata #15 - Must not read control registers * during a PIO transfer * 440MX errata #13 - As PIIX4 errata #15 * ICH2 errata #21 - DMA mode 0 doesn't work right * ICH0/1 errata #55 - As ICH2 errata #21 * ICH2 spec c #9 - Extra operations needed to handle * drive hotswap [NOT YET SUPPORTED] * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary * and must be dword aligned * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 * ICH7 errata #16 - MWDMA1 timings are incorrect * * Should have been BIOS fixed: * 450NX: errata #19 - DMA hangs on old 450NX * 450NX: errata #20 - DMA hangs on old 450NX * 450NX: errata #25 - Corruption with DMA on old 450NX * ICH3 errata #15 - IDE deadlock under high load * (BIOS must set dev 31 fn 0 bit 23) * ICH3 errata #18 - Don't use native mode */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/gfp.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/dmi.h> #define DRV_NAME "ata_piix" #define DRV_VERSION "2.13" enum { PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ ICH5_PMR = 0x90, /* address map register */ ICH5_PCS = 0x92, /* port control and status */ PIIX_SIDPR_BAR = 5, PIIX_SIDPR_LEN = 16, PIIX_SIDPR_IDX = 0, PIIX_SIDPR_DATA = 4, PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */ PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS, PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, PIIX_FLAG_PIO16 = (1 << 30), /*support 16bit PIO only*/ PIIX_80C_PRI = (1 << 5) | (1 << 4), PIIX_80C_SEC = (1 << 7) | (1 << 6), /* constants for mapping table */ P0 = 0, /* port 0 */ P1 = 1, /* port 1 */ P2 = 2, /* port 2 */ P3 = 3, /* port 3 */ IDE = -1, /* IDE */ NA = -2, /* not available */ RV = -3, /* reserved */ PIIX_AHCI_DEVICE = 6, /* host->flags bits */ PIIX_HOST_BROKEN_SUSPEND = (1 << 24), }; enum piix_controller_ids { /* controller IDs */ piix_pata_mwdma, /* PIIX3 MWDMA only */ piix_pata_33, /* PIIX4 at 33Mhz */ ich_pata_33, /* ICH up to UDMA 33 only */ ich_pata_66, /* ICH up to 66 Mhz */ ich_pata_100, /* ICH up to UDMA 100 */ ich_pata_100_nomwdma1, /* ICH up to UDMA 100 but with no MWDMA1*/ ich5_sata, ich6_sata, ich6m_sata, ich8_sata, ich8_2port_sata, ich8m_apple_sata, /* locks up on second port enable */ tolapai_sata, piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ ich8_sata_snb, ich8_2port_sata_snb, ich8_2port_sata_byt, }; struct piix_map_db { const u32 mask; const u16 port_enable; const int map[][4]; }; struct piix_host_priv { const int *map; u32 saved_iocfg; void __iomem *sidpr; }; static unsigned int in_module_init = 1; static const struct pci_device_id piix_pci_tbl[] = { /* Intel PIIX3 for the 430HX etc */ { 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma }, /* VMware ICH4 */ { 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw }, /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */ /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */ { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, /* Intel PIIX4 */ { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, /* Intel PIIX4 */ { 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, /* Intel PIIX */ { 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, /* Intel ICH (i810, i815, i840) UDMA 66*/ { 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 }, /* Intel ICH0 : UDMA 33*/ { 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 }, /* Intel ICH2M */ { 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */ { 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH3M */ { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH3 (E7500/1) UDMA 100 */ { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH4-L */ { 0x8086, 0x24C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */ { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH5 */ { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* C-ICH (i810E2) */ { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* ESB (855GME/875P + 6300ESB) UDMA 100 */ { 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* ICH6 (and 6) (i915) UDMA 100 */ { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* ICH7/7-R (i945, i975) UDMA 100*/ { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, /* ICH8 Mobile PATA Controller */ { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* SATA ports */ /* 82801EB (ICH5) */ { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 82801EB (ICH5) */ { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 6300ESB (ICH5 variant with broken PCS present bits) */ { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 6300ESB pretending RAID */ { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, /* 82801FB/FW (ICH6/ICH6W) */ { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, /* 82801FR/FRW (ICH6R/ICH6RW) */ { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented). * Attach iff the controller is in IDE mode. */ { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata }, /* 82801GB/GR/GH (ICH7, identical to ICH6) */ { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, /* 82801GBM/GHM (ICH7M, identical to ICH6M) */ { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata }, /* Enterprise Southbridge 2 (631xESB/632xESB) */ { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, /* SATA Controller 1 IDE (ICH8) */ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller 2 IDE (ICH8) */ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* Mobile SATA Controller IDE (ICH8M), Apple */ { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata }, { 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata }, /* Mobile SATA Controller IDE (ICH8M) */ { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (ICH9) */ { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (ICH9) */ { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (ICH9) */ { 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (ICH9M) */ { 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (ICH9M) */ { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (ICH9M) */ { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (Tolapai) */ { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata }, /* SATA Controller IDE (ICH10) */ { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (ICH10) */ { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (ICH10) */ { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (ICH10) */ { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (PCH) */ { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (PCH) */ { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (PCH) */ { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (PCH) */ { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (PCH) */ { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (PCH) */ { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (CPT) */ { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (CPT) */ { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (CPT) */ { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (CPT) */ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (PBG) */ { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (PBG) */ { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Panther Point) */ { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Panther Point) */ { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Panther Point) */ { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Panther Point) */ { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point-LP) */ { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point-LP) */ { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point-LP) */ { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point-LP) */ { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (DH89xxCC) */ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Avoton) */ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Avoton) */ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Avoton) */ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Avoton) */ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Wellsburg) */ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Wellsburg) */ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, /* SATA Controller IDE (Wellsburg) */ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Wellsburg) */ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (BayTrail) */ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, /* SATA Controller IDE (Coleto Creek) */ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, { } /* terminate list */ }; static const struct piix_map_db ich5_map_db = { .mask = 0x7, .port_enable = 0x3, .map = { /* PM PS SM SS MAP */ { P0, NA, P1, NA }, /* 000b */ { P1, NA, P0, NA }, /* 001b */ { RV, RV, RV, RV }, { RV, RV, RV, RV }, { P0, P1, IDE, IDE }, /* 100b */ { P1, P0, IDE, IDE }, /* 101b */ { IDE, IDE, P0, P1 }, /* 110b */ { IDE, IDE, P1, P0 }, /* 111b */ }, }; static const struct piix_map_db ich6_map_db = { .mask = 0x3, .port_enable = 0xf, .map = { /* PM PS SM SS MAP */ { P0, P2, P1, P3 }, /* 00b */ { IDE, IDE, P1, P3 }, /* 01b */ { P0, P2, IDE, IDE }, /* 10b */ { RV, RV, RV, RV }, }, }; static const struct piix_map_db ich6m_map_db = { .mask = 0x3, .port_enable = 0x5, /* Map 01b isn't specified in the doc but some notebooks use * it anyway. MAP 01b have been spotted on both ICH6M and * ICH7M. */ .map = { /* PM PS SM SS MAP */ { P0, P2, NA, NA }, /* 00b */ { IDE, IDE, P1, P3 }, /* 01b */ { P0, P2, IDE, IDE }, /* 10b */ { RV, RV, RV, RV }, }, }; static const struct piix_map_db ich8_map_db = { .mask = 0x3, .port_enable = 0xf, .map = { /* PM PS SM SS MAP */ { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */ { RV, RV, RV, RV }, { P0, P2, IDE, IDE }, /* 10b (IDE mode) */ { RV, RV, RV, RV }, }, }; static const struct piix_map_db ich8_2port_map_db = { .mask = 0x3, .port_enable = 0x3, .map = { /* PM PS SM SS MAP */ { P0, NA, P1, NA }, /* 00b */ { RV, RV, RV, RV }, /* 01b */ { RV, RV, RV, RV }, /* 10b */ { RV, RV, RV, RV }, }, }; static const struct piix_map_db ich8m_apple_map_db = { .mask = 0x3, .port_enable = 0x1, .map = { /* PM PS SM SS MAP */ { P0, NA, NA, NA }, /* 00b */ { RV, RV, RV, RV }, { P0, P2, IDE, IDE }, /* 10b */ { RV, RV, RV, RV }, }, }; static const struct piix_map_db tolapai_map_db = { .mask = 0x3, .port_enable = 0x3, .map = { /* PM PS SM SS MAP */ { P0, NA, P1, NA }, /* 00b */ { RV, RV, RV, RV }, /* 01b */ { RV, RV, RV, RV }, /* 10b */ { RV, RV, RV, RV }, }, }; static const struct piix_map_db *piix_map_db_table[] = { [ich5_sata] = &ich5_map_db, [ich6_sata] = &ich6_map_db, [ich6m_sata] = &ich6m_map_db, [ich8_sata] = &ich8_map_db, [ich8_2port_sata] = &ich8_2port_map_db, [ich8m_apple_sata] = &ich8m_apple_map_db, [tolapai_sata] = &tolapai_map_db, [ich8_sata_snb] = &ich8_map_db, [ich8_2port_sata_snb] = &ich8_2port_map_db, [ich8_2port_sata_byt] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ }; MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik"); MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, piix_pci_tbl); MODULE_VERSION(DRV_VERSION); struct ich_laptop { u16 device; u16 subvendor; u16 subdevice; }; /* * List of laptops that use short cables rather than 80 wire */ static const struct ich_laptop ich_laptop[] = { /* devid, subvendor, subdev */ { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unknown HP */ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ /* end marker */ { 0, } }; static int piix_port_start(struct ata_port *ap) { if (!(ap->flags & PIIX_FLAG_PIO16)) ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; return ata_bmdma_port_start(ap); } /** * ich_pata_cable_detect - Probe host controller cable detect info * @ap: Port for which cable detect info is desired * * Read 80c cable indicator from ATA PCI device's PCI config * register. This register is normally set by firmware (BIOS). * * LOCKING: * None (inherited from caller). */ static int ich_pata_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct piix_host_priv *hpriv = ap->host->private_data; const struct ich_laptop *lap = &ich_laptop[0]; u8 mask; /* Check for specials */ while (lap->device) { if (lap->device == pdev->device && lap->subvendor == pdev->subsystem_vendor && lap->subdevice == pdev->subsystem_device) return ATA_CBL_PATA40_SHORT; lap++; } /* check BIOS cable detect results */ mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; if ((hpriv->saved_iocfg & mask) == 0) return ATA_CBL_PATA40; return ATA_CBL_PATA80; } /** * piix_pata_prereset - prereset for PATA host controller * @link: Target link * @deadline: deadline jiffies for the operation * * LOCKING: * None (inherited from caller). */ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } static DEFINE_SPINLOCK(piix_lock); static void piix_set_timings(struct ata_port *ap, struct ata_device *adev, u8 pio) { struct pci_dev *dev = to_pci_dev(ap->host->dev); unsigned long flags; unsigned int is_slave = (adev->devno != 0); unsigned int master_port= ap->port_no ? 0x42 : 0x40; unsigned int slave_port = 0x44; u16 master_data; u8 slave_data; u8 udma_enable; int control = 0; /* * See Intel Document 298600-004 for the timing programing rules * for ICH controllers. */ static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; if (pio >= 2) control |= 1; /* TIME1 enable */ if (ata_pio_need_iordy(adev)) control |= 2; /* IE enable */ /* Intel specifies that the PPE functionality is for disk only */ if (adev->class == ATA_DEV_ATA) control |= 4; /* PPE enable */ /* * If the drive MWDMA is faster than it can do PIO then * we must force PIO into PIO0 */ if (adev->pio_mode < XFER_PIO_0 + pio) /* Enable DMA timing only */ control |= 8; /* PIO cycles in PIO0 */ spin_lock_irqsave(&piix_lock, flags); /* PIO configuration clears DTE unconditionally. It will be * programmed in set_dmamode which is guaranteed to be called * after set_piomode if any DMA mode is available. */ pci_read_config_word(dev, master_port, &master_data); if (is_slave) { /* clear TIME1|IE1|PPE1|DTE1 */ master_data &= 0xff0f; /* enable PPE1, IE1 and TIME1 as needed */ master_data |= (control << 4); pci_read_config_byte(dev, slave_port, &slave_data); slave_data &= (ap->port_no ? 0x0f : 0xf0); /* Load the timing nibble for this slave */ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); } else { /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */ master_data &= 0xccf0; /* Enable PPE, IE and TIME as appropriate */ master_data |= control; /* load ISP and RCT */ master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } /* Enable SITRE (separate slave timing register) */ master_data |= 0x4000; pci_write_config_word(dev, master_port, master_data); if (is_slave) pci_write_config_byte(dev, slave_port, slave_data); /* Ensure the UDMA bit is off - it will be turned back on if UDMA is selected */ if (ap->udma_mask) { pci_read_config_byte(dev, 0x48, &udma_enable); udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); pci_write_config_byte(dev, 0x48, udma_enable); } spin_unlock_irqrestore(&piix_lock, flags); } /** * piix_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Drive in question * * Set PIO mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) { piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0); } /** * do_pata_set_dmamode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Drive in question * @isich: set if the chip is an ICH device * * Set UDMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) { struct pci_dev *dev = to_pci_dev(ap->host->dev); unsigned long flags; u8 speed = adev->dma_mode; int devid = adev->devno + 2 * ap->port_no; u8 udma_enable = 0; if (speed >= XFER_UDMA_0) { unsigned int udma = speed - XFER_UDMA_0; u16 udma_timing; u16 ideconf; int u_clock, u_speed; spin_lock_irqsave(&piix_lock, flags); pci_read_config_byte(dev, 0x48, &udma_enable); /* * UDMA is handled by a combination of clock switching and * selection of dividers * * Handy rule: Odd modes are UDMATIMx 01, even are 02 * except UDMA0 which is 00 */ u_speed = min(2 - (udma & 1), udma); if (udma == 5) u_clock = 0x1000; /* 100Mhz */ else if (udma > 2) u_clock = 1; /* 66Mhz */ else u_clock = 0; /* 33Mhz */ udma_enable |= (1 << devid); /* Load the CT/RP selection */ pci_read_config_word(dev, 0x4A, &udma_timing); udma_timing &= ~(3 << (4 * devid)); udma_timing |= u_speed << (4 * devid); pci_write_config_word(dev, 0x4A, udma_timing); if (isich) { /* Select a 33/66/100Mhz clock */ pci_read_config_word(dev, 0x54, &ideconf); ideconf &= ~(0x1001 << devid); ideconf |= u_clock << devid; /* For ICH or later we should set bit 10 for better performance (WR_PingPong_En) */ pci_write_config_word(dev, 0x54, ideconf); } pci_write_config_byte(dev, 0x48, udma_enable); spin_unlock_irqrestore(&piix_lock, flags); } else { /* MWDMA is driven by the PIO timings. */ unsigned int mwdma = speed - XFER_MW_DMA_0; const unsigned int needed_pio[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 }; int pio = needed_pio[mwdma] - XFER_PIO_0; /* XFER_PIO_0 is never used currently */ piix_set_timings(ap, adev, pio); } } /** * piix_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: um * * Set MW/UDMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev) { do_pata_set_dmamode(ap, adev, 0); } /** * ich_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: um * * Set MW/UDMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev) { do_pata_set_dmamode(ap, adev, 1); } /* * Serial ATA Index/Data Pair Superset Registers access * * Beginning from ICH8, there's a sane way to access SCRs using index * and data register pair located at BAR5 which means that we have * separate SCRs for master and slave. This is handled using libata * slave_link facility. */ static const int piix_sidx_map[] = { [SCR_STATUS] = 0, [SCR_ERROR] = 2, [SCR_CONTROL] = 1, }; static void piix_sidpr_sel(struct ata_link *link, unsigned int reg) { struct ata_port *ap = link->ap; struct piix_host_priv *hpriv = ap->host->private_data; iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg], hpriv->sidpr + PIIX_SIDPR_IDX); } static int piix_sidpr_scr_read(struct ata_link *link, unsigned int reg, u32 *val) { struct piix_host_priv *hpriv = link->ap->host->private_data; if (reg >= ARRAY_SIZE(piix_sidx_map)) return -EINVAL; piix_sidpr_sel(link, reg); *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); return 0; } static int piix_sidpr_scr_write(struct ata_link *link, unsigned int reg, u32 val) { struct piix_host_priv *hpriv = link->ap->host->private_data; if (reg >= ARRAY_SIZE(piix_sidx_map)) return -EINVAL; piix_sidpr_sel(link, reg); iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); return 0; } static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, unsigned hints) { return sata_link_scr_lpm(link, policy, false); } static bool piix_irq_check(struct ata_port *ap) { if (unlikely(!ap->ioaddr.bmdma_addr)) return false; return ap->ops->bmdma_status(ap) & ATA_DMA_INTR; } #ifdef CONFIG_PM static int piix_broken_suspend(void) { static const struct dmi_system_id sysids[] = { { .ident = "TECRA M3", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"), }, }, { .ident = "TECRA M3", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"), }, }, { .ident = "TECRA M4", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"), }, }, { .ident = "TECRA M4", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"), }, }, { .ident = "TECRA M5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"), }, }, { .ident = "TECRA M6", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"), }, }, { .ident = "TECRA M7", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"), }, }, { .ident = "TECRA A8", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"), }, }, { .ident = "Satellite R20", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"), }, }, { .ident = "Satellite R25", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"), }, }, { .ident = "Satellite U200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"), }, }, { .ident = "Satellite U200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"), }, }, { .ident = "Satellite Pro U200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"), }, }, { .ident = "Satellite U205", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"), }, }, { .ident = "SATELLITE U205", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"), }, }, { .ident = "Satellite Pro A120", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"), }, }, { .ident = "Portege M500", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), }, }, { .ident = "VGN-BX297XP", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"), }, }, { } /* terminate list */ }; static const char *oemstrs[] = { "Tecra M3,", }; int i; if (dmi_check_system(sysids)) return 1; for (i = 0; i < ARRAY_SIZE(oemstrs); i++) if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL)) return 1; /* TECRA M4 sometimes forgets its identify and reports bogus * DMI information. As the bogus information is a bit * generic, match as many entries as possible. This manual * matching is necessary because dmi_system_id.matches is * limited to four entries. */ if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") && dmi_match(DMI_PRODUCT_NAME, "000000") && dmi_match(DMI_PRODUCT_VERSION, "000000") && dmi_match(DMI_PRODUCT_SERIAL, "000000") && dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") && dmi_match(DMI_BOARD_NAME, "Portable PC") && dmi_match(DMI_BOARD_VERSION, "Version A0")) return 1; return 0; } static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = pci_get_drvdata(pdev); unsigned long flags; int rc = 0; rc = ata_host_suspend(host, mesg); if (rc) return rc; /* Some braindamaged ACPI suspend implementations expect the * controller to be awake on entry; otherwise, it burns cpu * cycles and power trying to do something to the sleeping * beauty. */ if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) { pci_save_state(pdev); /* mark its power state as "unknown", since we don't * know if e.g. the BIOS will change its device state * when we suspend. */ if (pdev->current_state == PCI_D0) pdev->current_state = PCI_UNKNOWN; /* tell resume that it's waking up from broken suspend */ spin_lock_irqsave(&host->lock, flags); host->flags |= PIIX_HOST_BROKEN_SUSPEND; spin_unlock_irqrestore(&host->lock, flags); } else ata_pci_device_do_suspend(pdev, mesg); return 0; } static int piix_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); unsigned long flags; int rc; if (host->flags & PIIX_HOST_BROKEN_SUSPEND) { spin_lock_irqsave(&host->lock, flags); host->flags &= ~PIIX_HOST_BROKEN_SUSPEND; spin_unlock_irqrestore(&host->lock, flags); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* PCI device wasn't disabled during suspend. Use * pci_reenable_device() to avoid affecting the enable * count. */ rc = pci_reenable_device(pdev); if (rc) dev_err(&pdev->dev, "failed to enable device after resume (%d)\n", rc); } else rc = ata_pci_device_do_resume(pdev); if (rc == 0) ata_host_resume(host); return rc; } #endif static u8 piix_vmw_bmdma_status(struct ata_port *ap) { return ata_bmdma_status(ap) & ~ATA_DMA_ERR; } static struct scsi_host_template piix_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations piix_sata_ops = { .inherits = &ata_bmdma32_port_ops, .sff_irq_check = piix_irq_check, .port_start = piix_port_start, }; static struct ata_port_operations piix_pata_ops = { .inherits = &piix_sata_ops, .cable_detect = ata_cable_40wire, .set_piomode = piix_set_piomode, .set_dmamode = piix_set_dmamode, .prereset = piix_pata_prereset, }; static struct ata_port_operations piix_vmw_ops = { .inherits = &piix_pata_ops, .bmdma_status = piix_vmw_bmdma_status, }; static struct ata_port_operations ich_pata_ops = { .inherits = &piix_pata_ops, .cable_detect = ich_pata_cable_detect, .set_dmamode = ich_set_dmamode, }; static struct device_attribute *piix_sidpr_shost_attrs[] = { &dev_attr_link_power_management_policy, NULL }; static struct scsi_host_template piix_sidpr_sht = { ATA_BMDMA_SHT(DRV_NAME), .shost_attrs = piix_sidpr_shost_attrs, }; static struct ata_port_operations piix_sidpr_sata_ops = { .inherits = &piix_sata_ops, .hardreset = sata_std_hardreset, .scr_read = piix_sidpr_scr_read, .scr_write = piix_sidpr_scr_write, .set_lpm = piix_sidpr_set_lpm, }; static struct ata_port_info piix_port_info[] = { [piix_pata_mwdma] = /* PIIX3 MWDMA only */ { .flags = PIIX_PATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ .port_ops = &piix_pata_ops, }, [piix_pata_33] = /* PIIX4 at 33MHz */ { .flags = PIIX_PATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ .udma_mask = ATA_UDMA2, .port_ops = &piix_pata_ops, }, [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/ { .flags = PIIX_PATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */ .udma_mask = ATA_UDMA2, .port_ops = &ich_pata_ops, }, [ich_pata_66] = /* ICH controllers up to 66MHz */ { .flags = PIIX_PATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */ .udma_mask = ATA_UDMA4, .port_ops = &ich_pata_ops, }, [ich_pata_100] = { .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, .udma_mask = ATA_UDMA5, .port_ops = &ich_pata_ops, }, [ich_pata_100_nomwdma1] = { .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2_ONLY, .udma_mask = ATA_UDMA5, .port_ops = &ich_pata_ops, }, [ich5_sata] = { .flags = PIIX_SATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [ich6_sata] = { .flags = PIIX_SATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [ich6m_sata] = { .flags = PIIX_SATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [ich8_sata] = { .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [ich8_2port_sata] = { .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [tolapai_sata] = { .flags = PIIX_SATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [ich8m_apple_sata] = { .flags = PIIX_SATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [piix_pata_vmw] = { .flags = PIIX_PATA_FLAGS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ .udma_mask = ATA_UDMA2, .port_ops = &piix_vmw_ops, }, /* * some Sandybridge chipsets have broken 32 mode up to now, * see https://bugzilla.kernel.org/show_bug.cgi?id=40592 */ [ich8_sata_snb] = { .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [ich8_2port_sata_snb] = { .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, [ich8_2port_sata_byt] = { .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, }; #define AHCI_PCI_BAR 5 #define AHCI_GLOBAL_CTL 0x04 #define AHCI_ENABLE (1 << 31) static int piix_disable_ahci(struct pci_dev *pdev) { void __iomem *mmio; u32 tmp; int rc = 0; /* BUG: pci_enable_device has not yet been called. This * works because this device is usually set up by BIOS. */ if (!pci_resource_start(pdev, AHCI_PCI_BAR) || !pci_resource_len(pdev, AHCI_PCI_BAR)) return 0; mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64); if (!mmio) return -ENOMEM; tmp = ioread32(mmio + AHCI_GLOBAL_CTL); if (tmp & AHCI_ENABLE) { tmp &= ~AHCI_ENABLE; iowrite32(tmp, mmio + AHCI_GLOBAL_CTL); tmp = ioread32(mmio + AHCI_GLOBAL_CTL); if (tmp & AHCI_ENABLE) rc = -EIO; } pci_iounmap(pdev, mmio); return rc; } /** * piix_check_450nx_errata - Check for problem 450NX setup * @ata_dev: the PCI device to check * * Check for the present of 450NX errata #19 and errata #25. If * they are found return an error code so we can turn off DMA */ static int piix_check_450nx_errata(struct pci_dev *ata_dev) { struct pci_dev *pdev = NULL; u16 cfg; int no_piix_dma = 0; while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) { /* Look for 450NX PXB. Check for problem configurations A PCI quirk checks bit 6 already */ pci_read_config_word(pdev, 0x41, &cfg); /* Only on the original revision: IDE DMA can hang */ if (pdev->revision == 0x00) no_piix_dma = 1; /* On all revisions below 5 PXB bus lock must be disabled for IDE */ else if (cfg & (1<<14) && pdev->revision < 5) no_piix_dma = 2; } if (no_piix_dma) dev_warn(&ata_dev->dev, "450NX errata present, disabling IDE DMA%s\n", no_piix_dma == 2 ? " - a BIOS update may resolve this" : ""); return no_piix_dma; } static void piix_init_pcs(struct ata_host *host, const struct piix_map_db *map_db) { struct pci_dev *pdev = to_pci_dev(host->dev); u16 pcs, new_pcs; pci_read_config_word(pdev, ICH5_PCS, &pcs); new_pcs = pcs | map_db->port_enable; if (new_pcs != pcs) { DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs); pci_write_config_word(pdev, ICH5_PCS, new_pcs); msleep(150); } } static const int *piix_init_sata_map(struct pci_dev *pdev, struct ata_port_info *pinfo, const struct piix_map_db *map_db) { const int *map; int i, invalid_map = 0; u8 map_value; char buf[32]; char *p = buf, *end = buf + sizeof(buf); pci_read_config_byte(pdev, ICH5_PMR, &map_value); map = map_db->map[map_value & map_db->mask]; for (i = 0; i < 4; i++) { switch (map[i]) { case RV: invalid_map = 1; p += scnprintf(p, end - p, " XX"); break; case NA: p += scnprintf(p, end - p, " --"); break; case IDE: WARN_ON((i & 1) || map[i + 1] != IDE); pinfo[i / 2] = piix_port_info[ich_pata_100]; i++; p += scnprintf(p, end - p, " IDE IDE"); break; default: p += scnprintf(p, end - p, " P%d", map[i]); if (i & 1) pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS; break; } } dev_info(&pdev->dev, "MAP [%s ]\n", buf); if (invalid_map) dev_err(&pdev->dev, "invalid MAP value %u\n", map_value); return map; } static bool piix_no_sidpr(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); /* * Samsung DB-P70 only has three ATA ports exposed and * curiously the unconnected first port reports link online * while not responding to SRST protocol causing excessive * detection delay. * * Unfortunately, the system doesn't carry enough DMI * information to identify the machine but does have subsystem * vendor and device set. As it's unclear whether the * subsystem vendor/device is used only for this specific * board, the port can't be disabled solely with the * information; however, turning off SIDPR access works around * the problem. Turn it off. * * This problem is reported in bnc#441240. * * https://bugzilla.novell.com/show_bug.cgi?id=441420 */ if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 && pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG && pdev->subsystem_device == 0xb049) { dev_warn(host->dev, "Samsung DB-P70 detected, disabling SIDPR\n"); return true; } return false; } static int piix_init_sidpr(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); struct piix_host_priv *hpriv = host->private_data; struct ata_link *link0 = &host->ports[0]->link; u32 scontrol; int i, rc; /* check for availability */ for (i = 0; i < 4; i++) if (hpriv->map[i] == IDE) return 0; /* is it blacklisted? */ if (piix_no_sidpr(host)) return 0; if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR)) return 0; if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 || pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN) return 0; if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME)) return 0; hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; /* SCR access via SIDPR doesn't work on some configurations. * Give it a test drive by inhibiting power save modes which * we'll do anyway. */ piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol); /* if IPM is already 3, SCR access is probably working. Don't * un-inhibit power save modes as BIOS might have inhibited * them for a reason. */ if ((scontrol & 0xf00) != 0x300) { scontrol |= 0x300; piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol); piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol); if ((scontrol & 0xf00) != 0x300) { dev_info(host->dev, "SCR access via SIDPR is available but doesn't work\n"); return 0; } } /* okay, SCRs available, set ops and ask libata for slave_link */ for (i = 0; i < 2; i++) { struct ata_port *ap = host->ports[i]; ap->ops = &piix_sidpr_sata_ops; if (ap->flags & ATA_FLAG_SLAVE_POSS) { rc = ata_slave_link_init(ap); if (rc) return rc; } } return 0; } static void piix_iocfg_bit18_quirk(struct ata_host *host) { static const struct dmi_system_id sysids[] = { { /* Clevo M570U sets IOCFG bit 18 if the cdrom * isn't used to boot the system which * disables the channel. */ .ident = "M570U", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."), DMI_MATCH(DMI_PRODUCT_NAME, "M570U"), }, }, { } /* terminate list */ }; struct pci_dev *pdev = to_pci_dev(host->dev); struct piix_host_priv *hpriv = host->private_data; if (!dmi_check_system(sysids)) return; /* The datasheet says that bit 18 is NOOP but certain systems * seem to use it to disable a channel. Clear the bit on the * affected systems. */ if (hpriv->saved_iocfg & (1 << 18)) { dev_info(&pdev->dev, "applying IOCFG bit18 quirk\n"); pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg & ~(1 << 18)); } } static bool piix_broken_system_poweroff(struct pci_dev *pdev) { static const struct dmi_system_id broken_systems[] = { { .ident = "HP Compaq 2510p", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"), }, /* PCI slot number of the controller */ .driver_data = (void *)0x1FUL, }, { .ident = "HP Compaq nc6000", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"), }, /* PCI slot number of the controller */ .driver_data = (void *)0x1FUL, }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(broken_systems); if (dmi) { unsigned long slot = (unsigned long)dmi->driver_data; /* apply the quirk only to on-board controllers */ return slot == PCI_SLOT(pdev->devfn); } return false; } static int prefer_ms_hyperv = 1; module_param(prefer_ms_hyperv, int, 0); MODULE_PARM_DESC(prefer_ms_hyperv, "Prefer Hyper-V paravirtualization drivers instead of ATA, " "0 - Use ATA drivers, " "1 (Default) - Use the paravirtualization drivers."); static void piix_ignore_devices_quirk(struct ata_host *host) { #if IS_ENABLED(CONFIG_HYPERV_STORAGE) static const struct dmi_system_id ignore_hyperv[] = { { /* On Hyper-V hypervisors the disks are exposed on * both the emulated SATA controller and on the * paravirtualised drivers. The CD/DVD devices * are only exposed on the emulated controller. * Request we ignore ATA devices on this host. */ .ident = "Hyper-V Virtual Machine", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), }, }, { } /* terminate list */ }; static const struct dmi_system_id allow_virtual_pc[] = { { /* In MS Virtual PC guests the DMI ident is nearly * identical to a Hyper-V guest. One difference is the * product version which is used here to identify * a Virtual PC guest. This entry allows ata_piix to * drive the emulated hardware. */ .ident = "MS Virtual PC 2007", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"), }, }, { } /* terminate list */ }; const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv); const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc); if (ignore && !allow && prefer_ms_hyperv) { host->flags |= ATA_HOST_IGNORE_ATA; dev_info(host->dev, "%s detected, ATA device ignore set\n", ignore->ident); } #endif } /** * piix_init_one - Register PIIX ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in piix_pci_tbl matching with @pdev * * Called from kernel PCI layer. We probe for combined mode (sigh), * and then hand over control to libata, for it to do the rest. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct ata_port_info port_info[2]; const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; struct scsi_host_template *sht = &piix_sht; unsigned long port_flags; struct ata_host *host; struct piix_host_priv *hpriv; int rc; ata_print_version_once(&pdev->dev, DRV_VERSION); /* no hotplugging support for later devices (FIXME) */ if (!in_module_init && ent->driver_data >= ich5_sata) return -ENODEV; if (piix_broken_system_poweroff(pdev)) { piix_port_info[ent->driver_data].flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN | ATA_FLAG_NO_HIBERNATE_SPINDOWN; dev_info(&pdev->dev, "quirky BIOS, skipping spindown " "on poweroff and hibernation\n"); } port_info[0] = piix_port_info[ent->driver_data]; port_info[1] = piix_port_info[ent->driver_data]; port_flags = port_info[0].flags; /* enable device and prepare host */ rc = pcim_enable_device(pdev); if (rc) return rc; hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; /* Save IOCFG, this will be used for cable detection, quirk * detection and restoration on detach. This is necessary * because some ACPI implementations mess up cable related * bits on _STM. Reported on kernel bz#11879. */ pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg); /* ICH6R may be driven by either ata_piix or ahci driver * regardless of BIOS configuration. Make sure AHCI mode is * off. */ if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) { rc = piix_disable_ahci(pdev); if (rc) return rc; } /* SATA map init can change port_info, do it before prepping host */ if (port_flags & ATA_FLAG_SATA) hpriv->map = piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; host->private_data = hpriv; /* initialize controller */ if (port_flags & ATA_FLAG_SATA) { piix_init_pcs(host, piix_map_db_table[ent->driver_data]); rc = piix_init_sidpr(host); if (rc) return rc; if (host->ports[0]->ops == &piix_sidpr_sata_ops) sht = &piix_sidpr_sht; } /* apply IOCFG bit18 quirk */ piix_iocfg_bit18_quirk(host); /* On ICH5, some BIOSen disable the interrupt using the * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. * On ICH6, this bit has the same effect, but only when * MSI is disabled (and it is disabled, as we don't use * message-signalled interrupts currently). */ if (port_flags & PIIX_FLAG_CHECKINTR) pci_intx(pdev, 1); if (piix_check_450nx_errata(pdev)) { /* This writes into the master table but it does not really matter for this errata as we will apply it to all the PIIX devices on the board */ host->ports[0]->mwdma_mask = 0; host->ports[0]->udma_mask = 0; host->ports[1]->mwdma_mask = 0; host->ports[1]->udma_mask = 0; } host->flags |= ATA_HOST_PARALLEL_SCAN; /* Allow hosts to specify device types to ignore when scanning. */ piix_ignore_devices_quirk(host); pci_set_master(pdev); return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); } static void piix_remove_one(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); struct piix_host_priv *hpriv = host->private_data; pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg); ata_pci_remove_one(pdev); } static struct pci_driver piix_pci_driver = { .name = DRV_NAME, .id_table = piix_pci_tbl, .probe = piix_init_one, .remove = piix_remove_one, #ifdef CONFIG_PM .suspend = piix_pci_device_suspend, .resume = piix_pci_device_resume, #endif }; static int __init piix_init(void) { int rc; DPRINTK("pci_register_driver\n"); rc = pci_register_driver(&piix_pci_driver); if (rc) return rc; in_module_init = 0; DPRINTK("done\n"); return 0; } static void __exit piix_exit(void) { pci_unregister_driver(&piix_pci_driver); } module_init(piix_init); module_exit(piix_exit);
gpl-2.0
bas-t/media_tree
sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
243
15455
/* * mt2701-afe-clock-ctrl.c -- Mediatek 2701 afe clock ctrl * * Copyright (c) 2016 MediaTek Inc. * Author: Garlic Tseng <garlic.tseng@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <sound/soc.h> #include <linux/regmap.h> #include <linux/pm_runtime.h> #include "mt2701-afe-common.h" #include "mt2701-afe-clock-ctrl.h" static const char *aud_clks[MT2701_CLOCK_NUM] = { [MT2701_AUD_INFRA_SYS_AUDIO] = "infra_sys_audio_clk", [MT2701_AUD_AUD_MUX1_SEL] = "top_audio_mux1_sel", [MT2701_AUD_AUD_MUX2_SEL] = "top_audio_mux2_sel", [MT2701_AUD_AUD_MUX1_DIV] = "top_audio_mux1_div", [MT2701_AUD_AUD_MUX2_DIV] = "top_audio_mux2_div", [MT2701_AUD_AUD_48K_TIMING] = "top_audio_48k_timing", [MT2701_AUD_AUD_44K_TIMING] = "top_audio_44k_timing", [MT2701_AUD_AUDPLL_MUX_SEL] = "top_audpll_mux_sel", [MT2701_AUD_APLL_SEL] = "top_apll_sel", [MT2701_AUD_AUD1PLL_98M] = "top_aud1_pll_98M", [MT2701_AUD_AUD2PLL_90M] = "top_aud2_pll_90M", [MT2701_AUD_HADDS2PLL_98M] = "top_hadds2_pll_98M", [MT2701_AUD_HADDS2PLL_294M] = "top_hadds2_pll_294M", [MT2701_AUD_AUDPLL] = "top_audpll", [MT2701_AUD_AUDPLL_D4] = "top_audpll_d4", [MT2701_AUD_AUDPLL_D8] = "top_audpll_d8", [MT2701_AUD_AUDPLL_D16] = "top_audpll_d16", [MT2701_AUD_AUDPLL_D24] = "top_audpll_d24", [MT2701_AUD_AUDINTBUS] = "top_audintbus_sel", [MT2701_AUD_CLK_26M] = "clk_26m", [MT2701_AUD_SYSPLL1_D4] = "top_syspll1_d4", [MT2701_AUD_AUD_K1_SRC_SEL] = "top_aud_k1_src_sel", [MT2701_AUD_AUD_K2_SRC_SEL] = "top_aud_k2_src_sel", [MT2701_AUD_AUD_K3_SRC_SEL] = "top_aud_k3_src_sel", [MT2701_AUD_AUD_K4_SRC_SEL] = "top_aud_k4_src_sel", [MT2701_AUD_AUD_K5_SRC_SEL] = "top_aud_k5_src_sel", [MT2701_AUD_AUD_K6_SRC_SEL] = "top_aud_k6_src_sel", [MT2701_AUD_AUD_K1_SRC_DIV] = "top_aud_k1_src_div", [MT2701_AUD_AUD_K2_SRC_DIV] = "top_aud_k2_src_div", [MT2701_AUD_AUD_K3_SRC_DIV] = "top_aud_k3_src_div", [MT2701_AUD_AUD_K4_SRC_DIV] = "top_aud_k4_src_div", [MT2701_AUD_AUD_K5_SRC_DIV] = "top_aud_k5_src_div", [MT2701_AUD_AUD_K6_SRC_DIV] = "top_aud_k6_src_div", [MT2701_AUD_AUD_I2S1_MCLK] = "top_aud_i2s1_mclk", [MT2701_AUD_AUD_I2S2_MCLK] = "top_aud_i2s2_mclk", [MT2701_AUD_AUD_I2S3_MCLK] = "top_aud_i2s3_mclk", [MT2701_AUD_AUD_I2S4_MCLK] = "top_aud_i2s4_mclk", [MT2701_AUD_AUD_I2S5_MCLK] = "top_aud_i2s5_mclk", [MT2701_AUD_AUD_I2S6_MCLK] = "top_aud_i2s6_mclk", [MT2701_AUD_ASM_M_SEL] = "top_asm_m_sel", [MT2701_AUD_ASM_H_SEL] = "top_asm_h_sel", [MT2701_AUD_UNIVPLL2_D4] = "top_univpll2_d4", [MT2701_AUD_UNIVPLL2_D2] = "top_univpll2_d2", [MT2701_AUD_SYSPLL_D5] = "top_syspll_d5", }; int mt2701_init_clock(struct mtk_base_afe *afe) { struct mt2701_afe_private *afe_priv = afe->platform_priv; int i = 0; for (i = 0; i < MT2701_CLOCK_NUM; i++) { afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]); if (IS_ERR(aud_clks[i])) { dev_warn(afe->dev, "%s devm_clk_get %s fail\n", __func__, aud_clks[i]); return PTR_ERR(aud_clks[i]); } } return 0; } int mt2701_afe_enable_clock(struct mtk_base_afe *afe) { int ret = 0; ret = mt2701_turn_on_a1sys_clock(afe); if (ret) { dev_err(afe->dev, "%s turn_on_a1sys_clock fail %d\n", __func__, ret); return ret; } ret = mt2701_turn_on_a2sys_clock(afe); if (ret) { dev_err(afe->dev, "%s turn_on_a2sys_clock fail %d\n", __func__, ret); mt2701_turn_off_a1sys_clock(afe); return ret; } ret = mt2701_turn_on_afe_clock(afe); if (ret) { dev_err(afe->dev, "%s turn_on_afe_clock fail %d\n", __func__, ret); mt2701_turn_off_a1sys_clock(afe); mt2701_turn_off_a2sys_clock(afe); return ret; } regmap_update_bits(afe->regmap, ASYS_TOP_CON, AUDIO_TOP_CON0_A1SYS_A2SYS_ON, AUDIO_TOP_CON0_A1SYS_A2SYS_ON); regmap_update_bits(afe->regmap, AFE_DAC_CON0, AFE_DAC_CON0_AFE_ON, AFE_DAC_CON0_AFE_ON); regmap_write(afe->regmap, PWR2_TOP_CON, PWR2_TOP_CON_INIT_VAL); regmap_write(afe->regmap, PWR1_ASM_CON1, PWR1_ASM_CON1_INIT_VAL); regmap_write(afe->regmap, PWR2_ASM_CON1, PWR2_ASM_CON1_INIT_VAL); return 0; } void mt2701_afe_disable_clock(struct mtk_base_afe *afe) { mt2701_turn_off_afe_clock(afe); mt2701_turn_off_a1sys_clock(afe); mt2701_turn_off_a2sys_clock(afe); regmap_update_bits(afe->regmap, ASYS_TOP_CON, AUDIO_TOP_CON0_A1SYS_A2SYS_ON, 0); regmap_update_bits(afe->regmap, AFE_DAC_CON0, AFE_DAC_CON0_AFE_ON, 0); } int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe) { struct mt2701_afe_private *afe_priv = afe->platform_priv; int ret = 0; /* Set Mux */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret); goto A1SYS_CLK_AUD_MUX1_SEL_ERR; } ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL], afe_priv->clocks[MT2701_AUD_AUD1PLL_98M]); if (ret) { dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], aud_clks[MT2701_AUD_AUD1PLL_98M], ret); goto A1SYS_CLK_AUD_MUX1_SEL_ERR; } /* Set Divider */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX1_DIV], ret); goto A1SYS_CLK_AUD_MUX1_DIV_ERR; } ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV], MT2701_AUD_AUD_MUX1_DIV_RATE); if (ret) { dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX1_DIV], MT2701_AUD_AUD_MUX1_DIV_RATE, ret); goto A1SYS_CLK_AUD_MUX1_DIV_ERR; } /* Enable clock gate */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_48K_TIMING], ret); goto A1SYS_CLK_AUD_48K_ERR; } /* Enable infra audio */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret); goto A1SYS_CLK_INFRA_ERR; } return 0; A1SYS_CLK_INFRA_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); A1SYS_CLK_AUD_48K_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]); A1SYS_CLK_AUD_MUX1_DIV_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]); A1SYS_CLK_AUD_MUX1_SEL_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]); return ret; } void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe) { struct mt2701_afe_private *afe_priv = afe->platform_priv; clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]); } int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe) { struct mt2701_afe_private *afe_priv = afe->platform_priv; int ret = 0; /* Set Mux */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret); goto A2SYS_CLK_AUD_MUX2_SEL_ERR; } ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL], afe_priv->clocks[MT2701_AUD_AUD2PLL_90M]); if (ret) { dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], aud_clks[MT2701_AUD_AUD2PLL_90M], ret); goto A2SYS_CLK_AUD_MUX2_SEL_ERR; } /* Set Divider */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], ret); goto A2SYS_CLK_AUD_MUX2_DIV_ERR; } ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV], MT2701_AUD_AUD_MUX2_DIV_RATE); if (ret) { dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], MT2701_AUD_AUD_MUX2_DIV_RATE, ret); goto A2SYS_CLK_AUD_MUX2_DIV_ERR; } /* Enable clock gate */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_AUD_44K_TIMING], ret); goto A2SYS_CLK_AUD_44K_ERR; } /* Enable infra audio */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret); goto A2SYS_CLK_INFRA_ERR; } return 0; A2SYS_CLK_INFRA_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); A2SYS_CLK_AUD_44K_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]); A2SYS_CLK_AUD_MUX2_DIV_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]); A2SYS_CLK_AUD_MUX2_SEL_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]); return ret; } void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe) { struct mt2701_afe_private *afe_priv = afe->platform_priv; clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]); } int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe) { struct mt2701_afe_private *afe_priv = afe->platform_priv; int ret; /* enable INFRA_SYS */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret); goto AFE_AUD_INFRA_ERR; } /* Set MT2701_AUD_AUDINTBUS to MT2701_AUD_SYSPLL1_D4 */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUDINTBUS]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_AUDINTBUS], ret); goto AFE_AUD_AUDINTBUS_ERR; } ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUDINTBUS], afe_priv->clocks[MT2701_AUD_SYSPLL1_D4]); if (ret) { dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__, aud_clks[MT2701_AUD_AUDINTBUS], aud_clks[MT2701_AUD_SYSPLL1_D4], ret); goto AFE_AUD_AUDINTBUS_ERR; } /* Set MT2701_AUD_ASM_H_SEL to MT2701_AUD_UNIVPLL2_D2 */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_ASM_H_SEL], ret); goto AFE_AUD_ASM_H_ERR; } ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_H_SEL], afe_priv->clocks[MT2701_AUD_UNIVPLL2_D2]); if (ret) { dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__, aud_clks[MT2701_AUD_ASM_H_SEL], aud_clks[MT2701_AUD_UNIVPLL2_D2], ret); goto AFE_AUD_ASM_H_ERR; } /* Set MT2701_AUD_ASM_M_SEL to MT2701_AUD_UNIVPLL2_D4 */ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]); if (ret) { dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[MT2701_AUD_ASM_M_SEL], ret); goto AFE_AUD_ASM_M_ERR; } ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_M_SEL], afe_priv->clocks[MT2701_AUD_UNIVPLL2_D4]); if (ret) { dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__, aud_clks[MT2701_AUD_ASM_M_SEL], aud_clks[MT2701_AUD_UNIVPLL2_D4], ret); goto AFE_AUD_ASM_M_ERR; } regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUDIO_TOP_CON0_PDN_AFE, 0); regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUDIO_TOP_CON0_PDN_APLL_CK, 0); regmap_update_bits(afe->regmap, AUDIO_TOP_CON4, AUDIO_TOP_CON4_PDN_A1SYS, 0); regmap_update_bits(afe->regmap, AUDIO_TOP_CON4, AUDIO_TOP_CON4_PDN_A2SYS, 0); regmap_update_bits(afe->regmap, AUDIO_TOP_CON4, AUDIO_TOP_CON4_PDN_AFE_CONN, 0); return 0; AFE_AUD_ASM_M_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]); AFE_AUD_ASM_H_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]); AFE_AUD_AUDINTBUS_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]); AFE_AUD_INFRA_ERR: clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); return ret; } void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe) { struct mt2701_afe_private *afe_priv = afe->platform_priv; clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]); clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]); regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUDIO_TOP_CON0_PDN_AFE, AUDIO_TOP_CON0_PDN_AFE); regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUDIO_TOP_CON0_PDN_APLL_CK, AUDIO_TOP_CON0_PDN_APLL_CK); regmap_update_bits(afe->regmap, AUDIO_TOP_CON4, AUDIO_TOP_CON4_PDN_A1SYS, AUDIO_TOP_CON4_PDN_A1SYS); regmap_update_bits(afe->regmap, AUDIO_TOP_CON4, AUDIO_TOP_CON4_PDN_A2SYS, AUDIO_TOP_CON4_PDN_A2SYS); regmap_update_bits(afe->regmap, AUDIO_TOP_CON4, AUDIO_TOP_CON4_PDN_AFE_CONN, AUDIO_TOP_CON4_PDN_AFE_CONN); } void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain, int mclk) { struct mt2701_afe_private *afe_priv = afe->platform_priv; int ret; int aud_src_div_id = MT2701_AUD_AUD_K1_SRC_DIV + id; int aud_src_clk_id = MT2701_AUD_AUD_K1_SRC_SEL + id; /* Set MCLK Kx_SRC_SEL(domain) */ ret = clk_prepare_enable(afe_priv->clocks[aud_src_clk_id]); if (ret) dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[aud_src_clk_id], ret); if (domain == 0) { ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id], afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]); if (ret) dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__, aud_clks[aud_src_clk_id], aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret); } else { ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id], afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]); if (ret) dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__, aud_clks[aud_src_clk_id], aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret); } clk_disable_unprepare(afe_priv->clocks[aud_src_clk_id]); /* Set MCLK Kx_SRC_DIV(divider) */ ret = clk_prepare_enable(afe_priv->clocks[aud_src_div_id]); if (ret) dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n", __func__, aud_clks[aud_src_div_id], ret); ret = clk_set_rate(afe_priv->clocks[aud_src_div_id], mclk); if (ret) dev_err(afe->dev, "%s clk_set_rate %s-%d fail %d\n", __func__, aud_clks[aud_src_div_id], mclk, ret); clk_disable_unprepare(afe_priv->clocks[aud_src_div_id]); } MODULE_DESCRIPTION("MT2701 afe clock control"); MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
GuojianZhou/linux-yocto-3.14
drivers/mtd/nand/bf5xx_nand.c
243
20476
/* linux/drivers/mtd/nand/bf5xx_nand.c * * Copyright 2006-2008 Analog Devices Inc. * http://blackfin.uclinux.org/ * Bryan Wu <bryan.wu@analog.com> * * Blackfin BF5xx on-chip NAND flash controller driver * * Derived from drivers/mtd/nand/s3c2410.c * Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk> * * Derived from drivers/mtd/nand/cafe.c * Copyright © 2006 Red Hat, Inc. * Copyright © 2006 David Woodhouse <dwmw2@infradead.org> * * Changelog: * 12-Jun-2007 Bryan Wu: Initial version * 18-Jul-2007 Bryan Wu: * - ECC_HW and ECC_SW supported * - DMA supported in ECC_HW * - YAFFS tested as rootfs in both ECC_HW and ECC_SW * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/blackfin.h> #include <asm/dma.h> #include <asm/cacheflush.h> #include <asm/nand.h> #include <asm/portmux.h> #define DRV_NAME "bf5xx-nand" #define DRV_VERSION "1.2" #define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" #define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" /* NFC_STAT Masks */ #define NBUSY 0x01 /* Not Busy */ #define WB_FULL 0x02 /* Write Buffer Full */ #define PG_WR_STAT 0x04 /* Page Write Pending */ #define PG_RD_STAT 0x08 /* Page Read Pending */ #define WB_EMPTY 0x10 /* Write Buffer Empty */ /* NFC_IRQSTAT Masks */ #define NBUSYIRQ 0x01 /* Not Busy IRQ */ #define WB_OVF 0x02 /* Write Buffer Overflow */ #define WB_EDGE 0x04 /* Write Buffer Edge Detect */ #define RD_RDY 0x08 /* Read Data Ready */ #define WR_DONE 0x10 /* Page Write Done */ /* NFC_RST Masks */ #define ECC_RST 0x01 /* ECC (and NFC counters) Reset */ /* NFC_PGCTL Masks */ #define PG_RD_START 0x01 /* Page Read Start */ #define PG_WR_START 0x02 /* Page Write Start */ #ifdef CONFIG_MTD_NAND_BF5XX_HWECC static int hardware_ecc = 1; #else static int hardware_ecc; #endif static const unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, P_NAND_D0, P_NAND_D1, P_NAND_D2, P_NAND_D3, P_NAND_D4, P_NAND_D5, P_NAND_D6, P_NAND_D7, P_NAND_WE, P_NAND_RE, P_NAND_CLE, P_NAND_ALE, 0}; #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC static struct nand_ecclayout bootrom_ecclayout = { .eccbytes = 24, .eccpos = { 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2, 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2, 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2, 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2, 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2, 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2, 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2, 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2 }, .oobfree = { { 0x8 * 0 + 3, 5 }, { 0x8 * 1 + 3, 5 }, { 0x8 * 2 + 3, 5 }, { 0x8 * 3 + 3, 5 }, { 0x8 * 4 + 3, 5 }, { 0x8 * 5 + 3, 5 }, { 0x8 * 6 + 3, 5 }, { 0x8 * 7 + 3, 5 }, } }; #endif /* * Data structures for bf5xx nand flash controller driver */ /* bf5xx nand info */ struct bf5xx_nand_info { /* mtd info */ struct nand_hw_control controller; struct mtd_info mtd; struct nand_chip chip; /* platform info */ struct bf5xx_nand_platform *platform; /* device info */ struct device *device; /* DMA stuff */ struct completion dma_completion; }; /* * Conversion functions */ static struct bf5xx_nand_info *mtd_to_nand_info(struct mtd_info *mtd) { return container_of(mtd, struct bf5xx_nand_info, mtd); } static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev) { return platform_get_drvdata(pdev); } static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev) { return dev_get_platdata(&pdev->dev); } /* * struct nand_chip interface function pointers */ /* * bf5xx_nand_hwcontrol * * Issue command and address cycles to the chip */ static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { if (cmd == NAND_CMD_NONE) return; while (bfin_read_NFC_STAT() & WB_FULL) cpu_relax(); if (ctrl & NAND_CLE) bfin_write_NFC_CMD(cmd); else if (ctrl & NAND_ALE) bfin_write_NFC_ADDR(cmd); SSYNC(); } /* * bf5xx_nand_devready() * * returns 0 if the nand is busy, 1 if it is ready */ static int bf5xx_nand_devready(struct mtd_info *mtd) { unsigned short val = bfin_read_NFC_STAT(); if ((val & NBUSY) == NBUSY) return 1; else return 0; } /* * ECC functions * These allow the bf5xx to use the controller's ECC * generator block to ECC the data as it passes through */ /* * ECC error correction function */ static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); u32 syndrome[5]; u32 calced, stored; int i; unsigned short failing_bit, failing_byte; u_char data; calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16); stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16); syndrome[0] = (calced ^ stored); /* * syndrome 0: all zero * No error in data * No action */ if (!syndrome[0] || !calced || !stored) return 0; /* * sysdrome 0: only one bit is one * ECC data was incorrect * No action */ if (hweight32(syndrome[0]) == 1) { dev_err(info->device, "ECC data was incorrect!\n"); return 1; } syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF); syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF); syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF); syndrome[4] = syndrome[2] ^ syndrome[3]; for (i = 0; i < 5; i++) dev_info(info->device, "syndrome[%d] 0x%08x\n", i, syndrome[i]); dev_info(info->device, "calced[0x%08x], stored[0x%08x]\n", calced, stored); /* * sysdrome 0: exactly 11 bits are one, each parity * and parity' pair is 1 & 0 or 0 & 1. * 1-bit correctable error * Correct the error */ if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) { dev_info(info->device, "1-bit correctable error, correct it.\n"); dev_info(info->device, "syndrome[1] 0x%08x\n", syndrome[1]); failing_bit = syndrome[1] & 0x7; failing_byte = syndrome[1] >> 0x3; data = *(dat + failing_byte); data = data ^ (0x1 << failing_bit); *(dat + failing_byte) = data; return 0; } /* * sysdrome 0: random data * More than 1-bit error, non-correctable error * Discard data, mark bad block */ dev_err(info->device, "More than 1-bit error, non-correctable error.\n"); dev_err(info->device, "Please discard data, mark bad block\n"); return 1; } static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct nand_chip *chip = mtd->priv; int ret; ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc); /* If ecc size is 512, correct second 256 bytes */ if (chip->ecc.size == 512) { dat += 256; read_ecc += 3; calc_ecc += 3; ret |= bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc); } return ret; } static void bf5xx_nand_enable_hwecc(struct mtd_info *mtd, int mode) { return; } static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; u16 ecc0, ecc1; u32 code[2]; u8 *p; /* first 3 bytes ECC code for 256 page size */ ecc0 = bfin_read_NFC_ECC0(); ecc1 = bfin_read_NFC_ECC1(); code[0] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11); dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); p = (u8 *) code; memcpy(ecc_code, p, 3); /* second 3 bytes ECC code for 512 ecc size */ if (chip->ecc.size == 512) { ecc0 = bfin_read_NFC_ECC2(); ecc1 = bfin_read_NFC_ECC3(); code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11); /* second 3 bytes in ecc_code for second 256 * bytes of 512 page size */ p = (u8 *) (code + 1); memcpy((ecc_code + 3), p, 3); dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]); } return 0; } /* * PIO mode for buffer writing and reading */ static void bf5xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { int i; unsigned short val; /* * Data reads are requested by first writing to NFC_DATA_RD * and then reading back from NFC_READ. */ for (i = 0; i < len; i++) { while (bfin_read_NFC_STAT() & WB_FULL) cpu_relax(); /* Contents do not matter */ bfin_write_NFC_DATA_RD(0x0000); SSYNC(); while ((bfin_read_NFC_IRQSTAT() & RD_RDY) != RD_RDY) cpu_relax(); buf[i] = bfin_read_NFC_READ(); val = bfin_read_NFC_IRQSTAT(); val |= RD_RDY; bfin_write_NFC_IRQSTAT(val); SSYNC(); } } static uint8_t bf5xx_nand_read_byte(struct mtd_info *mtd) { uint8_t val; bf5xx_nand_read_buf(mtd, &val, 1); return val; } static void bf5xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { int i; for (i = 0; i < len; i++) { while (bfin_read_NFC_STAT() & WB_FULL) cpu_relax(); bfin_write_NFC_DATA_WR(buf[i]); SSYNC(); } } static void bf5xx_nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) { int i; u16 *p = (u16 *) buf; len >>= 1; /* * Data reads are requested by first writing to NFC_DATA_RD * and then reading back from NFC_READ. */ bfin_write_NFC_DATA_RD(0x5555); SSYNC(); for (i = 0; i < len; i++) p[i] = bfin_read_NFC_READ(); } static void bf5xx_nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) { int i; u16 *p = (u16 *) buf; len >>= 1; for (i = 0; i < len; i++) bfin_write_NFC_DATA_WR(p[i]); SSYNC(); } /* * DMA functions for buffer writing and reading */ static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id) { struct bf5xx_nand_info *info = dev_id; clear_dma_irqstat(CH_NFC); disable_dma(CH_NFC); complete(&info->dma_completion); return IRQ_HANDLED; } static void bf5xx_nand_dma_rw(struct mtd_info *mtd, uint8_t *buf, int is_read) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; unsigned short val; dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n", mtd, buf, is_read); /* * Before starting a dma transfer, be sure to invalidate/flush * the cache over the address range of your DMA buffer to * prevent cache coherency problems. Otherwise very subtle bugs * can be introduced to your driver. */ if (is_read) invalidate_dcache_range((unsigned int)buf, (unsigned int)(buf + chip->ecc.size)); else flush_dcache_range((unsigned int)buf, (unsigned int)(buf + chip->ecc.size)); /* * This register must be written before each page is * transferred to generate the correct ECC register * values. */ bfin_write_NFC_RST(ECC_RST); SSYNC(); while (bfin_read_NFC_RST() & ECC_RST) cpu_relax(); disable_dma(CH_NFC); clear_dma_irqstat(CH_NFC); /* setup DMA register with Blackfin DMA API */ set_dma_config(CH_NFC, 0x0); set_dma_start_addr(CH_NFC, (unsigned long) buf); /* The DMAs have different size on BF52x and BF54x */ #ifdef CONFIG_BF52x set_dma_x_count(CH_NFC, (chip->ecc.size >> 1)); set_dma_x_modify(CH_NFC, 2); val = DI_EN | WDSIZE_16; #endif #ifdef CONFIG_BF54x set_dma_x_count(CH_NFC, (chip->ecc.size >> 2)); set_dma_x_modify(CH_NFC, 4); val = DI_EN | WDSIZE_32; #endif /* setup write or read operation */ if (is_read) val |= WNR; set_dma_config(CH_NFC, val); enable_dma(CH_NFC); /* Start PAGE read/write operation */ if (is_read) bfin_write_NFC_PGCTL(PG_RD_START); else bfin_write_NFC_PGCTL(PG_WR_START); wait_for_completion(&info->dma_completion); } static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len); if (len == chip->ecc.size) bf5xx_nand_dma_rw(mtd, buf, 1); else bf5xx_nand_read_buf(mtd, buf, len); } static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct bf5xx_nand_info *info = mtd_to_nand_info(mtd); struct nand_chip *chip = mtd->priv; dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len); if (len == chip->ecc.size) bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0); else bf5xx_nand_write_buf(mtd, buf, len); } static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { bf5xx_nand_read_buf(mtd, buf, mtd->writesize); bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } static int bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required) { bf5xx_nand_write_buf(mtd, buf, mtd->writesize); bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } /* * System initialization functions */ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info) { int ret; /* Do not use dma */ if (!hardware_ecc) return 0; init_completion(&info->dma_completion); /* Request NFC DMA channel */ ret = request_dma(CH_NFC, "BF5XX NFC driver"); if (ret < 0) { dev_err(info->device, " unable to get DMA channel\n"); return ret; } #ifdef CONFIG_BF54x /* Setup DMAC1 channel mux for NFC which shared with SDH */ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() & ~1); SSYNC(); #endif set_dma_callback(CH_NFC, bf5xx_nand_dma_irq, info); /* Turn off the DMA channel first */ disable_dma(CH_NFC); return 0; } static void bf5xx_nand_dma_remove(struct bf5xx_nand_info *info) { /* Free NFC DMA channel */ if (hardware_ecc) free_dma(CH_NFC); } /* * BF5XX NFC hardware initialization * - pin mux setup * - clear interrupt status */ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info) { int err = 0; unsigned short val; struct bf5xx_nand_platform *plat = info->platform; /* setup NFC_CTL register */ dev_info(info->device, "data_width=%d, wr_dly=%d, rd_dly=%d\n", (plat->data_width ? 16 : 8), plat->wr_dly, plat->rd_dly); val = (1 << NFC_PG_SIZE_OFFSET) | (plat->data_width << NFC_NWIDTH_OFFSET) | (plat->rd_dly << NFC_RDDLY_OFFSET) | (plat->wr_dly << NFC_WRDLY_OFFSET); dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val); bfin_write_NFC_CTL(val); SSYNC(); /* clear interrupt status */ bfin_write_NFC_IRQMASK(0x0); SSYNC(); val = bfin_read_NFC_IRQSTAT(); bfin_write_NFC_IRQSTAT(val); SSYNC(); /* DMA initialization */ if (bf5xx_nand_dma_init(info)) err = -ENXIO; return err; } /* * Device management interface */ static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info) { struct mtd_info *mtd = &info->mtd; struct mtd_partition *parts = info->platform->partitions; int nr = info->platform->nr_partitions; return mtd_device_register(mtd, parts, nr); } static int bf5xx_nand_remove(struct platform_device *pdev) { struct bf5xx_nand_info *info = to_nand_info(pdev); /* first thing we need to do is release all our mtds * and their partitions, then go through freeing the * resources used */ nand_release(&info->mtd); peripheral_free_list(bfin_nfc_pin_req); bf5xx_nand_dma_remove(info); /* free the common resources */ kfree(info); return 0; } static int bf5xx_nand_scan(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; int ret; ret = nand_scan_ident(mtd, 1, NULL); if (ret) return ret; if (hardware_ecc) { /* * for nand with page size > 512B, think it as several sections with 512B */ if (likely(mtd->writesize >= 512)) { chip->ecc.size = 512; chip->ecc.bytes = 6; chip->ecc.strength = 2; } else { chip->ecc.size = 256; chip->ecc.bytes = 3; chip->ecc.strength = 1; bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); SSYNC(); } } return nand_scan_tail(mtd); } /* * bf5xx_nand_probe * * called by device layer when it finds a device matching * one our driver can handled. This code checks to see if * it can allocate all necessary resources then calls the * nand layer to look for devices */ static int bf5xx_nand_probe(struct platform_device *pdev) { struct bf5xx_nand_platform *plat = to_nand_plat(pdev); struct bf5xx_nand_info *info = NULL; struct nand_chip *chip = NULL; struct mtd_info *mtd = NULL; int err = 0; dev_dbg(&pdev->dev, "(%p)\n", pdev); if (!plat) { dev_err(&pdev->dev, "no platform specific information\n"); return -EINVAL; } if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) { dev_err(&pdev->dev, "requesting Peripherals failed\n"); return -EFAULT; } info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto out_err_kzalloc; } platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); init_waitqueue_head(&info->controller.wq); info->device = &pdev->dev; info->platform = plat; /* initialise chip data struct */ chip = &info->chip; if (plat->data_width) chip->options |= NAND_BUSWIDTH_16; chip->options |= NAND_CACHEPRG | NAND_SKIP_BBTSCAN; chip->read_buf = (plat->data_width) ? bf5xx_nand_read_buf16 : bf5xx_nand_read_buf; chip->write_buf = (plat->data_width) ? bf5xx_nand_write_buf16 : bf5xx_nand_write_buf; chip->read_byte = bf5xx_nand_read_byte; chip->cmd_ctrl = bf5xx_nand_hwcontrol; chip->dev_ready = bf5xx_nand_devready; chip->priv = &info->mtd; chip->controller = &info->controller; chip->IO_ADDR_R = (void __iomem *) NFC_READ; chip->IO_ADDR_W = (void __iomem *) NFC_DATA_WR; chip->chip_delay = 0; /* initialise mtd info data struct */ mtd = &info->mtd; mtd->priv = chip; mtd->owner = THIS_MODULE; /* initialise the hardware */ err = bf5xx_nand_hw_init(info); if (err) goto out_err_hw_init; /* setup hardware ECC data struct */ if (hardware_ecc) { #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC chip->ecc.layout = &bootrom_ecclayout; #endif chip->read_buf = bf5xx_nand_dma_read_buf; chip->write_buf = bf5xx_nand_dma_write_buf; chip->ecc.calculate = bf5xx_nand_calculate_ecc; chip->ecc.correct = bf5xx_nand_correct_data; chip->ecc.mode = NAND_ECC_HW; chip->ecc.hwctl = bf5xx_nand_enable_hwecc; chip->ecc.read_page_raw = bf5xx_nand_read_page_raw; chip->ecc.write_page_raw = bf5xx_nand_write_page_raw; } else { chip->ecc.mode = NAND_ECC_SOFT; } /* scan hardware nand chip and setup mtd info data struct */ if (bf5xx_nand_scan(mtd)) { err = -ENXIO; goto out_err_nand_scan; } #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC chip->badblockpos = 63; #endif /* add NAND partition */ bf5xx_nand_add_partition(info); dev_dbg(&pdev->dev, "initialised ok\n"); return 0; out_err_nand_scan: bf5xx_nand_dma_remove(info); out_err_hw_init: kfree(info); out_err_kzalloc: peripheral_free_list(bfin_nfc_pin_req); return err; } /* PM Support */ #ifdef CONFIG_PM static int bf5xx_nand_suspend(struct platform_device *dev, pm_message_t pm) { struct bf5xx_nand_info *info = platform_get_drvdata(dev); return 0; } static int bf5xx_nand_resume(struct platform_device *dev) { struct bf5xx_nand_info *info = platform_get_drvdata(dev); return 0; } #else #define bf5xx_nand_suspend NULL #define bf5xx_nand_resume NULL #endif /* driver device registration */ static struct platform_driver bf5xx_nand_driver = { .probe = bf5xx_nand_probe, .remove = bf5xx_nand_remove, .suspend = bf5xx_nand_suspend, .resume = bf5xx_nand_resume, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; module_platform_driver(bf5xx_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
virt2real/linux-3.10
kernel/trace/trace_sched_wakeup.c
243
17356
/* * trace task wakeup timings * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Based on code from the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/module.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/sched/rt.h> #include <linux/sched/deadline.h> #include <trace/events/sched.h> #include "trace.h" static struct trace_array *wakeup_trace; static int __read_mostly tracer_enabled; static struct task_struct *wakeup_task; static int wakeup_cpu; static int wakeup_current_cpu; static unsigned wakeup_prio = -1; static int wakeup_rt; static int wakeup_dl; static int tracing_dl = 0; static arch_spinlock_t wakeup_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static void wakeup_reset(struct trace_array *tr); static void __wakeup_reset(struct trace_array *tr); static int wakeup_graph_entry(struct ftrace_graph_ent *trace); static void wakeup_graph_return(struct ftrace_graph_ret *trace); static int save_flags; static bool function_enabled; #define TRACE_DISPLAY_GRAPH 1 static struct tracer_opt trace_opts[] = { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* display latency trace as call graph */ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, #endif { } /* Empty entry */ }; static struct tracer_flags tracer_flags = { .val = 0, .opts = trace_opts, }; #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) #ifdef CONFIG_FUNCTION_TRACER /* * Prologue for the wakeup function tracers. * * Returns 1 if it is OK to continue, and preemption * is disabled and data->disabled is incremented. * 0 if the trace is to be ignored, and preemption * is not disabled and data->disabled is * kept the same. * * Note, this function is also used outside this ifdef but * inside the #ifdef of the function graph tracer below. * This is OK, since the function graph tracer is * dependent on the function tracer. */ static int func_prolog_preempt_disable(struct trace_array *tr, struct trace_array_cpu **data, int *pc) { long disabled; int cpu; if (likely(!wakeup_task)) return 0; *pc = preempt_count(); preempt_disable_notrace(); cpu = raw_smp_processor_id(); if (cpu != wakeup_current_cpu) goto out_enable; *data = per_cpu_ptr(tr->trace_buffer.data, cpu); disabled = atomic_inc_return(&(*data)->disabled); if (unlikely(disabled != 1)) goto out; return 1; out: atomic_dec(&(*data)->disabled); out_enable: preempt_enable_notrace(); return 0; } /* * wakeup uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; int pc; if (!func_prolog_preempt_disable(tr, &data, &pc)) return; local_irq_save(flags); trace_function(tr, ip, parent_ip, flags, pc); local_irq_restore(flags); atomic_dec(&data->disabled); preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, }; #endif /* CONFIG_FUNCTION_TRACER */ static int register_wakeup_function(int graph, int set) { int ret; /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) return 0; if (graph) ret = register_ftrace_graph(&wakeup_graph_return, &wakeup_graph_entry); else ret = register_ftrace_function(&trace_ops); if (!ret) function_enabled = true; return ret; } static void unregister_wakeup_function(int graph) { if (!function_enabled) return; if (graph) unregister_ftrace_graph(); else unregister_ftrace_function(&trace_ops); function_enabled = false; } static void wakeup_function_set(int set) { if (set) register_wakeup_function(is_graph(), 1); else unregister_wakeup_function(is_graph()); } static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) { if (mask & TRACE_ITER_FUNCTION) wakeup_function_set(set); return trace_keep_overwrite(tracer, mask, set); } static int start_func_tracer(int graph) { int ret; ret = register_wakeup_function(graph, 0); if (!ret && tracing_is_enabled()) tracer_enabled = 1; else tracer_enabled = 0; return ret; } static void stop_func_tracer(int graph) { tracer_enabled = 0; unregister_wakeup_function(graph); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int wakeup_set_flag(u32 old_flags, u32 bit, int set) { if (!(bit & TRACE_DISPLAY_GRAPH)) return -EINVAL; if (!(is_graph() ^ set)) return 0; stop_func_tracer(!set); wakeup_reset(wakeup_trace); tracing_max_latency = 0; return start_func_tracer(set); } static int wakeup_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; int pc, ret = 0; if (!func_prolog_preempt_disable(tr, &data, &pc)) return 0; local_save_flags(flags); ret = __trace_graph_entry(tr, trace, flags, pc); atomic_dec(&data->disabled); preempt_enable_notrace(); return ret; } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; int pc; if (!func_prolog_preempt_disable(tr, &data, &pc)) return; local_save_flags(flags); __trace_graph_return(tr, trace, flags, pc); atomic_dec(&data->disabled); preempt_enable_notrace(); return; } static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph()) graph_trace_open(iter); } static void wakeup_trace_close(struct trace_iterator *iter) { if (iter->private) graph_trace_close(iter); } #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ TRACE_GRAPH_PRINT_ABS_TIME | \ TRACE_GRAPH_PRINT_DURATION) static enum print_line_t wakeup_print_line(struct trace_iterator *iter) { /* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ if (is_graph()) return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return TRACE_TYPE_UNHANDLED; } static void wakeup_print_header(struct seq_file *s) { if (is_graph()) print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); else trace_default_header(s); } static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (is_graph()) trace_graph_function(tr, ip, parent_ip, flags, pc); else trace_function(tr, ip, parent_ip, flags, pc); } #else #define __trace_function trace_function static int wakeup_set_flag(u32 old_flags, u32 bit, int set) { return -EINVAL; } static int wakeup_graph_entry(struct ftrace_graph_ent *trace) { return -1; } static enum print_line_t wakeup_print_line(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } static void wakeup_trace_open(struct trace_iterator *iter) { } static void wakeup_trace_close(struct trace_iterator *iter) { } #ifdef CONFIG_FUNCTION_TRACER static void wakeup_print_header(struct seq_file *s) { trace_default_header(s); } #else static void wakeup_print_header(struct seq_file *s) { trace_latency_header(s); } #endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* * Should this new latency be reported/recorded? */ static int report_latency(cycle_t delta) { if (tracing_thresh) { if (delta < tracing_thresh) return 0; } else { if (delta <= tracing_max_latency) return 0; } return 1; } static void probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) { if (task != wakeup_task) return; wakeup_current_cpu = cpu; } static void notrace probe_wakeup_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; int pc; tracing_record_cmdline(prev); if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; pc = preempt_count(); /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); if (likely(disabled != 1)) goto out; local_irq_save(flags); arch_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; /* The task we are waiting for is waking up */ data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; if (!report_latency(delta)) goto out_unlock; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); } out_unlock: __wakeup_reset(wakeup_trace); arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); } static void __wakeup_reset(struct trace_array *tr) { wakeup_cpu = -1; wakeup_prio = -1; tracing_dl = 0; if (wakeup_task) put_task_struct(wakeup_task); wakeup_task = NULL; } static void wakeup_reset(struct trace_array *tr) { unsigned long flags; tracing_reset_online_cpus(&tr->trace_buffer); local_irq_save(flags); arch_spin_lock(&wakeup_lock); __wakeup_reset(tr); arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); } static void probe_wakeup(void *ignore, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); /* * Semantic is like this: * - wakeup tracer handles all tasks in the system, independently * from their scheduling class; * - wakeup_rt tracer handles tasks belonging to sched_dl and * sched_rt class; * - wakeup_dl handles tasks belonging to sched_dl class only. */ if (tracing_dl || (wakeup_dl && !dl_task(p)) || (wakeup_rt && !dl_task(p) && !rt_task(p)) || (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) return; pc = preempt_count(); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ arch_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || tracing_dl || (!dl_task(p) && p->prio >= wakeup_prio)) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_current_cpu = wakeup_cpu; wakeup_prio = p->prio; /* * Once you start tracing a -deadline task, don't bother tracing * another task until the first one wakes up. */ if (dl_task(p)) tracing_dl = 1; else tracing_dl = 0; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: arch_spin_unlock(&wakeup_lock); out: atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); } static void start_wakeup_tracer(struct trace_array *tr) { int ret; ret = register_trace_sched_wakeup(probe_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup\n"); return; } ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup_new\n"); goto fail_deprobe; } ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); if (ret) { pr_info("sched trace: Couldn't activate tracepoint" " probe to kernel_sched_switch\n"); goto fail_deprobe_wake_new; } ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_migrate_task\n"); return; } wakeup_reset(tr); /* * Don't let the tracer_enabled = 1 show up before * the wakeup_task is reset. This may be overkill since * wakeup_reset does a spin_unlock after setting the * wakeup_task to NULL, but I want to be safe. * This is a slow path anyway. */ smp_wmb(); if (start_func_tracer(is_graph())) printk(KERN_ERR "failed to start wakeup tracer\n"); return; fail_deprobe_wake_new: unregister_trace_sched_wakeup_new(probe_wakeup, NULL); fail_deprobe: unregister_trace_sched_wakeup(probe_wakeup, NULL); } static void stop_wakeup_tracer(struct trace_array *tr) { tracer_enabled = 0; stop_func_tracer(is_graph()); unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); unregister_trace_sched_wakeup_new(probe_wakeup, NULL); unregister_trace_sched_wakeup(probe_wakeup, NULL); unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); } static int __wakeup_tracer_init(struct trace_array *tr) { save_flags = trace_flags; /* non overwrite screws up the latency tracers */ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); tracing_max_latency = 0; wakeup_trace = tr; start_wakeup_tracer(tr); return 0; } static int wakeup_tracer_init(struct trace_array *tr) { wakeup_dl = 0; wakeup_rt = 0; return __wakeup_tracer_init(tr); } static int wakeup_rt_tracer_init(struct trace_array *tr) { wakeup_dl = 0; wakeup_rt = 1; return __wakeup_tracer_init(tr); } static int wakeup_dl_tracer_init(struct trace_array *tr) { wakeup_dl = 1; wakeup_rt = 0; return __wakeup_tracer_init(tr); } static void wakeup_tracer_reset(struct trace_array *tr) { int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; stop_wakeup_tracer(tr); /* make sure we put back any tasks we are tracing */ wakeup_reset(tr); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); } static void wakeup_tracer_start(struct trace_array *tr) { wakeup_reset(tr); tracer_enabled = 1; } static void wakeup_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; } static struct tracer wakeup_tracer __read_mostly = { .name = "wakeup", .init = wakeup_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .print_max = true, .print_header = wakeup_print_header, .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, .flag_changed = wakeup_flag_changed, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif .open = wakeup_trace_open, .close = wakeup_trace_close, .use_max_tr = true, }; static struct tracer wakeup_rt_tracer __read_mostly = { .name = "wakeup_rt", .init = wakeup_rt_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .wait_pipe = poll_wait_pipe, .print_max = true, .print_header = wakeup_print_header, .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, .flag_changed = wakeup_flag_changed, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif .open = wakeup_trace_open, .close = wakeup_trace_close, .use_max_tr = true, }; static struct tracer wakeup_dl_tracer __read_mostly = { .name = "wakeup_dl", .init = wakeup_dl_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .wait_pipe = poll_wait_pipe, .print_max = true, .print_header = wakeup_print_header, .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, .flag_changed = wakeup_flag_changed, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif .open = wakeup_trace_open, .close = wakeup_trace_close, .use_max_tr = true, }; __init static int init_wakeup_tracer(void) { int ret; ret = register_tracer(&wakeup_tracer); if (ret) return ret; ret = register_tracer(&wakeup_rt_tracer); if (ret) return ret; ret = register_tracer(&wakeup_dl_tracer); if (ret) return ret; return 0; } core_initcall(init_wakeup_tracer);
gpl-2.0
pulsarkernel/kernel-enrc2b
arch/arm/mach-davinci/board-tnetv107x-evm.c
499
6586
/* * Texas Instruments TNETV107X EVM Board Support * * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/console.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/ratelimit.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/spi/spi.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <mach/irqs.h> #include <mach/edma.h> #include <mach/mux.h> #include <mach/cp_intc.h> #include <mach/tnetv107x.h> #define EVM_MMC_WP_GPIO 21 #define EVM_MMC_CD_GPIO 24 #define EVM_SPI_CS_GPIO 54 static int initialize_gpio(int gpio, char *desc) { int ret; ret = gpio_request(gpio, desc); if (ret < 0) { pr_err_ratelimited("cannot open %s gpio\n", desc); return -ENOSYS; } gpio_direction_input(gpio); return gpio; } static int mmc_get_cd(int index) { static int gpio; if (!gpio) gpio = initialize_gpio(EVM_MMC_CD_GPIO, "mmc card detect"); if (gpio < 0) return gpio; return gpio_get_value(gpio) ? 0 : 1; } static int mmc_get_ro(int index) { static int gpio; if (!gpio) gpio = initialize_gpio(EVM_MMC_WP_GPIO, "mmc write protect"); if (gpio < 0) return gpio; return gpio_get_value(gpio) ? 1 : 0; } static struct davinci_mmc_config mmc_config = { .get_cd = mmc_get_cd, .get_ro = mmc_get_ro, .wires = 4, .max_freq = 50000000, .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, .version = MMC_CTLR_VERSION_1, }; static const short sdio1_pins[] __initdata = { TNETV107X_SDIO1_CLK_1, TNETV107X_SDIO1_CMD_1, TNETV107X_SDIO1_DATA0_1, TNETV107X_SDIO1_DATA1_1, TNETV107X_SDIO1_DATA2_1, TNETV107X_SDIO1_DATA3_1, TNETV107X_GPIO21, TNETV107X_GPIO24, -1 }; static const short uart1_pins[] __initdata = { TNETV107X_UART1_RD, TNETV107X_UART1_TD, -1 }; static const short ssp_pins[] __initdata = { TNETV107X_SSP0_0, TNETV107X_SSP0_1, TNETV107X_SSP0_2, TNETV107X_SSP1_0, TNETV107X_SSP1_1, TNETV107X_SSP1_2, TNETV107X_SSP1_3, -1 }; static struct mtd_partition nand_partitions[] = { /* bootloader (U-Boot, etc) in first 12 sectors */ { .name = "bootloader", .offset = 0, .size = (12*SZ_128K), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "params", .offset = MTDPART_OFS_NXTBLK, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* kernel */ { .name = "kernel", .offset = MTDPART_OFS_NXTBLK, .size = SZ_4M, .mask_flags = 0, }, /* file system */ { .name = "filesystem", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, .mask_flags = 0, } }; static struct davinci_nand_pdata nand_config = { .mask_cle = 0x4000, .mask_ale = 0x2000, .parts = nand_partitions, .nr_parts = ARRAY_SIZE(nand_partitions), .ecc_mode = NAND_ECC_HW, .options = NAND_USE_FLASH_BBT, .ecc_bits = 1, }; static struct davinci_uart_config serial_config __initconst = { .enabled_uarts = BIT(1), }; static const uint32_t keymap[] = { KEY(0, 0, KEY_NUMERIC_1), KEY(0, 1, KEY_NUMERIC_2), KEY(0, 2, KEY_NUMERIC_3), KEY(0, 3, KEY_FN_F1), KEY(0, 4, KEY_MENU), KEY(1, 0, KEY_NUMERIC_4), KEY(1, 1, KEY_NUMERIC_5), KEY(1, 2, KEY_NUMERIC_6), KEY(1, 3, KEY_UP), KEY(1, 4, KEY_FN_F2), KEY(2, 0, KEY_NUMERIC_7), KEY(2, 1, KEY_NUMERIC_8), KEY(2, 2, KEY_NUMERIC_9), KEY(2, 3, KEY_LEFT), KEY(2, 4, KEY_ENTER), KEY(3, 0, KEY_NUMERIC_STAR), KEY(3, 1, KEY_NUMERIC_0), KEY(3, 2, KEY_NUMERIC_POUND), KEY(3, 3, KEY_DOWN), KEY(3, 4, KEY_RIGHT), KEY(4, 0, KEY_FN_F3), KEY(4, 1, KEY_FN_F4), KEY(4, 2, KEY_MUTE), KEY(4, 3, KEY_HOME), KEY(4, 4, KEY_BACK), KEY(5, 0, KEY_VOLUMEDOWN), KEY(5, 1, KEY_VOLUMEUP), KEY(5, 2, KEY_F1), KEY(5, 3, KEY_F2), KEY(5, 4, KEY_F3), }; static const struct matrix_keymap_data keymap_data = { .keymap = keymap, .keymap_size = ARRAY_SIZE(keymap), }; static struct matrix_keypad_platform_data keypad_config = { .keymap_data = &keymap_data, .num_row_gpios = 6, .num_col_gpios = 5, .debounce_ms = 0, /* minimum */ .active_low = 0, /* pull up realization */ .no_autorepeat = 0, }; static void spi_select_device(int cs) { static int gpio; if (!gpio) { int ret; ret = gpio_request(EVM_SPI_CS_GPIO, "spi chipsel"); if (ret < 0) { pr_err("cannot open spi chipsel gpio\n"); gpio = -ENOSYS; return; } else { gpio = EVM_SPI_CS_GPIO; gpio_direction_output(gpio, 0); } } if (gpio < 0) return; return gpio_set_value(gpio, cs ? 1 : 0); } static struct ti_ssp_spi_data spi_master_data = { .num_cs = 2, .select = spi_select_device, .iosel = SSP_PIN_SEL(0, SSP_CLOCK) | SSP_PIN_SEL(1, SSP_DATA) | SSP_PIN_SEL(2, SSP_CHIPSEL) | SSP_PIN_SEL(3, SSP_IN) | SSP_INPUT_SEL(3), }; static struct ti_ssp_data ssp_config = { .out_clock = 250 * 1000, .dev_data = { [1] = { .dev_name = "ti-ssp-spi", .pdata = &spi_master_data, .pdata_size = sizeof(spi_master_data), }, }, }; static struct tnetv107x_device_info evm_device_info __initconst = { .serial_config = &serial_config, .mmc_config[1] = &mmc_config, /* controller 1 */ .nand_config[0] = &nand_config, /* chip select 0 */ .keypad_config = &keypad_config, .ssp_config = &ssp_config, }; static struct spi_board_info spi_info[] __initconst = { }; static __init void tnetv107x_evm_board_init(void) { davinci_cfg_reg_list(sdio1_pins); davinci_cfg_reg_list(uart1_pins); davinci_cfg_reg_list(ssp_pins); tnetv107x_devices_init(&evm_device_info); spi_register_board_info(spi_info, ARRAY_SIZE(spi_info)); } #ifdef CONFIG_SERIAL_8250_CONSOLE static int __init tnetv107x_evm_console_init(void) { return add_preferred_console("ttyS", 0, "115200"); } console_initcall(tnetv107x_evm_console_init); #endif MACHINE_START(TNETV107X, "TNETV107X EVM") .boot_params = (TNETV107X_DDR_BASE + 0x100), .map_io = tnetv107x_init, .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = tnetv107x_evm_board_init, .dma_zone_size = SZ_128M, MACHINE_END
gpl-2.0
DESHONOR/android_kernel_huawei_msm8916_Blefish
arch/arm/mach-at91/board-dt-sama5.c
2035
1963
/* * Setup code for SAMA5 Evaluation Kits with Device Tree support * * Copyright (C) 2013 Atmel, * 2013 Ludovic Desroches <ludovic.desroches@atmel.com> * * Licensed under GPLv2 or later. */ #include <linux/types.h> #include <linux/init.h> #include <linux/module.h> #include <linux/gpio.h> #include <linux/micrel_phy.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/phy.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include "at91_aic.h" #include "generic.h" static const struct of_device_id irq_of_match[] __initconst = { { .compatible = "atmel,sama5d3-aic", .data = at91_aic5_of_init }, { /*sentinel*/ } }; static void __init at91_dt_init_irq(void) { of_irq_init(irq_of_match); } static int ksz9021rn_phy_fixup(struct phy_device *phy) { int value; #define GMII_RCCPSR 260 #define GMII_RRDPSR 261 #define GMII_ERCR 11 #define GMII_ERDWR 12 /* Set delay values */ value = GMII_RCCPSR | 0x8000; phy_write(phy, GMII_ERCR, value); value = 0xF2F4; phy_write(phy, GMII_ERDWR, value); value = GMII_RRDPSR | 0x8000; phy_write(phy, GMII_ERCR, value); value = 0x2222; phy_write(phy, GMII_ERDWR, value); return 0; } static void __init sama5_dt_device_init(void) { if (of_machine_is_compatible("atmel,sama5d3xcm")) phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, ksz9021rn_phy_fixup); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } static const char *sama5_dt_board_compat[] __initdata = { "atmel,sama5", NULL }; DT_MACHINE_START(sama5_dt, "Atmel SAMA5 (Device Tree)") /* Maintainer: Atmel */ .init_time = at91sam926x_pit_init, .map_io = at91_map_io, .handle_irq = at91_aic5_handle_irq, .init_early = at91_dt_initialize, .init_irq = at91_dt_init_irq, .init_machine = sama5_dt_device_init, .dt_compat = sama5_dt_board_compat, MACHINE_END
gpl-2.0
rr-zenfone2/android_kernel_asus_moorefield
kernel/compat.c
2547
29196
/* * linux/kernel/compat.c * * Kernel compatibililty routines for e.g. 32 bit syscall support * on 64 bit kernels. * * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/signal.h> #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/security.h> #include <linux/timex.h> #include <linux/export.h> #include <linux/migrate.h> #include <linux/posix-timers.h> #include <linux/times.h> #include <linux/ptrace.h> #include <linux/gfp.h> #include <asm/uaccess.h> /* * Get/set struct timeval with struct timespec on the native side */ static int compat_get_timeval_convert(struct timespec *o, struct compat_timeval __user *i) { long usec; if (get_user(o->tv_sec, &i->tv_sec) || get_user(usec, &i->tv_usec)) return -EFAULT; o->tv_nsec = usec * 1000; return 0; } static int compat_put_timeval_convert(struct compat_timeval __user *o, struct timeval *i) { return (put_user(i->tv_sec, &o->tv_sec) || put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; } static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) { memset(txc, 0, sizeof(struct timex)); if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || __get_user(txc->modes, &utp->modes) || __get_user(txc->offset, &utp->offset) || __get_user(txc->freq, &utp->freq) || __get_user(txc->maxerror, &utp->maxerror) || __get_user(txc->esterror, &utp->esterror) || __get_user(txc->status, &utp->status) || __get_user(txc->constant, &utp->constant) || __get_user(txc->precision, &utp->precision) || __get_user(txc->tolerance, &utp->tolerance) || __get_user(txc->time.tv_sec, &utp->time.tv_sec) || __get_user(txc->time.tv_usec, &utp->time.tv_usec) || __get_user(txc->tick, &utp->tick) || __get_user(txc->ppsfreq, &utp->ppsfreq) || __get_user(txc->jitter, &utp->jitter) || __get_user(txc->shift, &utp->shift) || __get_user(txc->stabil, &utp->stabil) || __get_user(txc->jitcnt, &utp->jitcnt) || __get_user(txc->calcnt, &utp->calcnt) || __get_user(txc->errcnt, &utp->errcnt) || __get_user(txc->stbcnt, &utp->stbcnt)) return -EFAULT; return 0; } static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc) { if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || __put_user(txc->modes, &utp->modes) || __put_user(txc->offset, &utp->offset) || __put_user(txc->freq, &utp->freq) || __put_user(txc->maxerror, &utp->maxerror) || __put_user(txc->esterror, &utp->esterror) || __put_user(txc->status, &utp->status) || __put_user(txc->constant, &utp->constant) || __put_user(txc->precision, &utp->precision) || __put_user(txc->tolerance, &utp->tolerance) || __put_user(txc->time.tv_sec, &utp->time.tv_sec) || __put_user(txc->time.tv_usec, &utp->time.tv_usec) || __put_user(txc->tick, &utp->tick) || __put_user(txc->ppsfreq, &utp->ppsfreq) || __put_user(txc->jitter, &utp->jitter) || __put_user(txc->shift, &utp->shift) || __put_user(txc->stabil, &utp->stabil) || __put_user(txc->jitcnt, &utp->jitcnt) || __put_user(txc->calcnt, &utp->calcnt) || __put_user(txc->errcnt, &utp->errcnt) || __put_user(txc->stbcnt, &utp->stbcnt) || __put_user(txc->tai, &utp->tai)) return -EFAULT; return 0; } asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (compat_put_timeval_convert(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { struct timespec kts; struct timezone ktz; if (tv) { if (compat_get_timeval_convert(&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(ktz))) return -EFAULT; } return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || __get_user(tv->tv_sec, &ctv->tv_sec) || __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(get_compat_timeval); int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || __put_user(tv->tv_sec, &ctv->tv_sec) || __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(put_compat_timeval); int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(get_compat_timespec); int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(put_compat_timespec); int compat_get_timeval(struct timeval *tv, const void __user *utv) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; else return get_compat_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_get_timeval); int compat_put_timeval(const struct timeval *tv, void __user *utv) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; else return put_compat_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_put_timeval); int compat_get_timespec(struct timespec *ts, const void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; else return get_compat_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_get_timespec); int compat_put_timespec(const struct timespec *ts, void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; else return put_compat_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_put_timespec); static long compat_nanosleep_restart(struct restart_block *restart) { struct compat_timespec __user *rmtp; struct timespec rmt; mm_segment_t oldfs; long ret; restart->nanosleep.rmtp = (struct timespec __user *) &rmt; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep_restart(restart); set_fs(oldfs); if (ret) { rmtp = restart->nanosleep.compat_rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { struct timespec tu, rmt; mm_segment_t oldfs; long ret; if (get_compat_timespec(&tu, rqtp)) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep(&tu, rmtp ? (struct timespec __user *)&rmt : NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); set_fs(oldfs); if (ret) { struct restart_block *restart = &current_thread_info()->restart_block; restart->fn = compat_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } static inline long get_compat_itimerval(struct itimerval *o, struct compat_itimerval __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_compat_itimerval(struct compat_itimerval __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } COMPAT_SYSCALL_DEFINE2(getitimer, int, which, struct compat_itimerval __user *, it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_compat_itimerval(it, &kit)) error = -EFAULT; return error; } COMPAT_SYSCALL_DEFINE3(setitimer, int, which, struct compat_itimerval __user *, in, struct compat_itimerval __user *, out) { struct itimerval kin, kout; int error; if (in) { if (get_compat_itimerval(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_compat_itimerval(out, &kout)) return -EFAULT; return 0; } static compat_clock_t clock_t_to_compat_clock_t(clock_t x) { return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); } asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) { if (tbuf) { struct tms tms; struct compat_tms tmp; do_sys_times(&tms); /* Convert our struct tms to the compat version. */ tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); if (copy_to_user(tbuf, &tmp, sizeof(tmp))) return -EFAULT; } force_successful_syscall_return(); return compat_jiffies_to_clock_t(jiffies); } #ifdef __ARCH_WANT_SYS_SIGPENDING /* * Assumption: old_sigset_t and compat_old_sigset_t are both * types that can be passed to put_user()/get_user(). */ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) { old_sigset_t s; long ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sigpending((old_sigset_t __user *) &s); set_fs(old_fs); if (ret == 0) ret = put_user(s, set); return ret; } #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK /* * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the * blocked set of signals to the supplied signal set */ static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set) { memcpy(blocked->sig, &set, sizeof(set)); } COMPAT_SYSCALL_DEFINE3(sigprocmask, int, how, compat_old_sigset_t __user *, nset, compat_old_sigset_t __user *, oset) { old_sigset_t old_set, new_set; sigset_t new_blocked; old_set = current->blocked.sig[0]; if (nset) { if (get_user(new_set, nset)) return -EFAULT; new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); new_blocked = current->blocked; switch (how) { case SIG_BLOCK: sigaddsetmask(&new_blocked, new_set); break; case SIG_UNBLOCK: sigdelsetmask(&new_blocked, new_set); break; case SIG_SETMASK: compat_sig_setmask(&new_blocked, new_set); break; default: return -EINVAL; } set_current_blocked(&new_blocked); } if (oset) { if (put_user(old_set, oset)) return -EFAULT; } return 0; } #endif asmlinkage long compat_sys_setrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || __get_user(r.rlim_cur, &rlim->rlim_cur) || __get_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; if (r.rlim_cur == COMPAT_RLIM_INFINITY) r.rlim_cur = RLIM_INFINITY; if (r.rlim_max == COMPAT_RLIM_INFINITY) r.rlim_max = RLIM_INFINITY; return do_prlimit(current, resource, &r, NULL); } #ifdef COMPAT_RLIM_OLD_INFINITY asmlinkage long compat_sys_old_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_old_getrlimit(resource, &r); set_fs(old_fs); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } #endif asmlinkage long compat_sys_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; ret = do_prlimit(current, resource, NULL, &r); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru) { if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) || __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) || __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) || __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) || __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) || __put_user(r->ru_maxrss, &ru->ru_maxrss) || __put_user(r->ru_ixrss, &ru->ru_ixrss) || __put_user(r->ru_idrss, &ru->ru_idrss) || __put_user(r->ru_isrss, &ru->ru_isrss) || __put_user(r->ru_minflt, &ru->ru_minflt) || __put_user(r->ru_majflt, &ru->ru_majflt) || __put_user(r->ru_nswap, &ru->ru_nswap) || __put_user(r->ru_inblock, &ru->ru_inblock) || __put_user(r->ru_oublock, &ru->ru_oublock) || __put_user(r->ru_msgsnd, &ru->ru_msgsnd) || __put_user(r->ru_msgrcv, &ru->ru_msgrcv) || __put_user(r->ru_nsignals, &ru->ru_nsignals) || __put_user(r->ru_nvcsw, &ru->ru_nvcsw) || __put_user(r->ru_nivcsw, &ru->ru_nivcsw)) return -EFAULT; return 0; } COMPAT_SYSCALL_DEFINE4(wait4, compat_pid_t, pid, compat_uint_t __user *, stat_addr, int, options, struct compat_rusage __user *, ru) { if (!ru) { return sys_wait4(pid, stat_addr, options, NULL); } else { struct rusage r; int ret; unsigned int status; mm_segment_t old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (stat_addr ? (unsigned int __user *) &status : NULL), options, (struct rusage __user *) &r); set_fs (old_fs); if (ret > 0) { if (put_compat_rusage(&r, ru)) return -EFAULT; if (stat_addr && put_user(status, stat_addr)) return -EFAULT; } return ret; } } COMPAT_SYSCALL_DEFINE5(waitid, int, which, compat_pid_t, pid, struct compat_siginfo __user *, uinfo, int, options, struct compat_rusage __user *, uru) { siginfo_t info; struct rusage ru; long ret; mm_segment_t old_fs = get_fs(); memset(&info, 0, sizeof(info)); set_fs(KERNEL_DS); ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, uru ? (struct rusage __user *)&ru : NULL); set_fs(old_fs); if ((ret < 0) || (info.si_signo == 0)) return ret; if (uru) { /* sys_waitid() overwrites everything in ru */ if (COMPAT_USE_64BIT_TIME) ret = copy_to_user(uru, &ru, sizeof(ru)); else ret = put_compat_rusage(&ru, uru); if (ret) return -EFAULT; } BUG_ON(info.si_code & __SI_MASK); info.si_code |= __SI_CHLD; return copy_siginfo_to_user32(uinfo, &info); } static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { unsigned long *k; if (len < cpumask_size()) memset(new_mask, 0, cpumask_size()); else if (len > cpumask_size()) len = cpumask_size(); k = cpumask_bits(new_mask); return compat_get_bitmap(k, user_mask_ptr, len * 8); } asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { cpumask_var_t new_mask; int retval; if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) return -ENOMEM; retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); if (retval) goto out; retval = sched_setaffinity(pid, new_mask); out: free_cpumask_var(new_mask); return retval; } asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { int ret; cpumask_var_t mask; if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { size_t retlen = min_t(size_t, len, cpumask_size()); if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) ret = -EFAULT; else ret = retlen; } free_cpumask_var(mask); return ret; } int get_compat_itimerspec(struct itimerspec *dst, const struct compat_itimerspec __user *src) { if (get_compat_timespec(&dst->it_interval, &src->it_interval) || get_compat_timespec(&dst->it_value, &src->it_value)) return -EFAULT; return 0; } int put_compat_itimerspec(struct compat_itimerspec __user *dst, const struct itimerspec *src) { if (put_compat_timespec(&src->it_interval, &dst->it_interval) || put_compat_timespec(&src->it_value, &dst->it_value)) return -EFAULT; return 0; } long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id) { struct sigevent __user *event = NULL; if (timer_event_spec) { struct sigevent kevent; event = compat_alloc_user_space(sizeof(*event)); if (get_compat_sigevent(&kevent, timer_event_spec) || copy_to_user(event, &kevent, sizeof(*event))) return -EFAULT; } return sys_timer_create(which_clock, event, created_timer_id); } long compat_sys_timer_settime(timer_t timer_id, int flags, struct compat_itimerspec __user *new, struct compat_itimerspec __user *old) { long err; mm_segment_t oldfs; struct itimerspec newts, oldts; if (!new) return -EINVAL; if (get_compat_itimerspec(&newts, new)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_settime(timer_id, flags, (struct itimerspec __user *) &newts, (struct itimerspec __user *) &oldts); set_fs(oldfs); if (!err && old && put_compat_itimerspec(old, &oldts)) return -EFAULT; return err; } long compat_sys_timer_gettime(timer_t timer_id, struct compat_itimerspec __user *setting) { long err; mm_segment_t oldfs; struct itimerspec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_gettime(timer_id, (struct itimerspec __user *) &ts); set_fs(oldfs); if (!err && put_compat_itimerspec(setting, &ts)) return -EFAULT; return err; } long compat_sys_clock_settime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; if (get_compat_timespec(&ts, tp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_settime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); return err; } long compat_sys_clock_gettime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_gettime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } long compat_sys_clock_adjtime(clockid_t which_clock, struct compat_timex __user *utp) { struct timex txc; mm_segment_t oldfs; int err, ret; err = compat_get_timex(&txc, utp); if (err) return err; oldfs = get_fs(); set_fs(KERNEL_DS); ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); set_fs(oldfs); err = compat_put_timex(utp, &txc); if (err) return err; return ret; } long compat_sys_clock_getres(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_getres(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && tp && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } static long compat_clock_nanosleep_restart(struct restart_block *restart) { long err; mm_segment_t oldfs; struct timespec tu; struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; restart->nanosleep.rmtp = (struct timespec __user *) &tu; oldfs = get_fs(); set_fs(KERNEL_DS); err = clock_nanosleep_restart(restart); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&tu, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { long err; mm_segment_t oldfs; struct timespec in, out; struct restart_block *restart; if (get_compat_timespec(&in, rqtp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_nanosleep(which_clock, flags, (struct timespec __user *) &in, (struct timespec __user *) &out); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&out, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart = &current_thread_info()->restart_block; restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } /* * We currently only need the following fields from the sigevent * structure: sigev_value, sigev_signo, sig_notify and (sometimes * sigev_notify_thread_id). The others are handled in user mode. * We also assume that copying sigev_value.sival_int is sufficient * to keep all the bits of sigev_value.sival_ptr intact. */ int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event) { memset(event, 0, sizeof(*event)); return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || __get_user(event->sigev_value.sival_int, &u_event->sigev_value.sival_int) || __get_user(event->sigev_signo, &u_event->sigev_signo) || __get_user(event->sigev_notify, &u_event->sigev_notify) || __get_user(event->sigev_notify_thread_id, &u_event->sigev_notify_thread_id)) ? -EFAULT : 0; } long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = 0; for (j = 0; j < sizeof(m)/sizeof(um); j++) { /* * We dont want to read past the end of the userspace * bitmap. We must however ensure the end of the * kernel bitmap is zeroed. */ if (nr_compat_longs-- > 0) { if (__get_user(um, umask)) return -EFAULT; } else { um = 0; } umask++; m |= (long)um << (j * BITS_PER_COMPAT_LONG); } *mask++ = m; } return 0; } long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = *mask++; for (j = 0; j < sizeof(m)/sizeof(um); j++) { um = m; /* * We dont want to write past the end of the userspace * bitmap. */ if (nr_compat_longs-- > 0) { if (__put_user(um, umask)) return -EFAULT; } umask++; m >>= 4*sizeof(um); m >>= 4*sizeof(um); } } return 0; } void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat) { switch (_NSIG_WORDS) { case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); } } EXPORT_SYMBOL_GPL(sigset_from_compat); void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set) { switch (_NSIG_WORDS) { case 4: compat->sig[7] = (set->sig[3] >> 32); compat->sig[6] = set->sig[3]; case 3: compat->sig[5] = (set->sig[2] >> 32); compat->sig[4] = set->sig[2]; case 2: compat->sig[3] = (set->sig[1] >> 32); compat->sig[2] = set->sig[1]; case 1: compat->sig[1] = (set->sig[0] >> 32); compat->sig[0] = set->sig[0]; } } COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, struct compat_siginfo __user *, uinfo, struct compat_timespec __user *, uts, compat_size_t, sigsetsize) { compat_sigset_t s32; sigset_t s; struct timespec t; siginfo_t info; long ret; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&s, &s32); if (uts) { if (compat_get_timespec(&t, uts)) return -EFAULT; } ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); if (ret > 0 && uinfo) { if (copy_siginfo_to_user32(uinfo, &info)) ret = -EFAULT; } return ret; } #ifdef __ARCH_WANT_COMPAT_SYS_TIME /* compat_time_t is a 32 bit "long" and needs to get converted. */ asmlinkage long compat_sys_time(compat_time_t __user * tloc) { compat_time_t i; struct timeval tv; do_gettimeofday(&tv); i = tv.tv_sec; if (tloc) { if (put_user(i,tloc)) return -EFAULT; } force_successful_syscall_return(); return i; } asmlinkage long compat_sys_stime(compat_time_t __user *tptr) { struct timespec tv; int err; if (get_user(tv.tv_sec, tptr)) return -EFAULT; tv.tv_nsec = 0; err = security_settime(&tv, NULL); if (err) return err; do_settimeofday(&tv); return 0; } #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) { struct timex txc; int err, ret; err = compat_get_timex(&txc, utp); if (err) return err; ret = do_adjtimex(&txc); err = compat_put_timex(utp, &txc); if (err) return err; return ret; } #ifdef CONFIG_NUMA asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, compat_uptr_t __user *pages32, const int __user *nodes, int __user *status, int flags) { const void __user * __user *pages; int i; pages = compat_alloc_user_space(nr_pages * sizeof(void *)); for (i = 0; i < nr_pages; i++) { compat_uptr_t p; if (get_user(p, pages32 + i) || put_user(compat_ptr(p), pages + i)) return -EFAULT; } return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); } asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, const compat_ulong_t __user *new_nodes) { unsigned long __user *old = NULL; unsigned long __user *new = NULL; nodemask_t tmp_mask; unsigned long nr_bits; unsigned long size; nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (old_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) return -EFAULT; old = compat_alloc_user_space(new_nodes ? size * 2 : size); if (new_nodes) new = old + size / sizeof(unsigned long); if (copy_to_user(old, nodes_addr(tmp_mask), size)) return -EFAULT; } if (new_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) return -EFAULT; if (new == NULL) new = compat_alloc_user_space(size); if (copy_to_user(new, nodes_addr(tmp_mask), size)) return -EFAULT; } return sys_migrate_pages(pid, nr_bits + 1, old, new); } #endif COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, compat_pid_t, pid, struct compat_timespec __user *, interval) { struct timespec t; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); set_fs(old_fs); if (put_compat_timespec(&t, interval)) return -EFAULT; return ret; } /* * Allocate user-space memory for the duration of a single system call, * in order to marshall parameters inside a compat thunk. */ void __user *compat_alloc_user_space(unsigned long len) { void __user *ptr; /* If len would occupy more than half of the entire compat space... */ if (unlikely(len > (((compat_uptr_t)~0) >> 1))) return NULL; ptr = arch_compat_alloc_user_space(len); if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) return NULL; return ptr; } EXPORT_SYMBOL_GPL(compat_alloc_user_space);
gpl-2.0
partner-seco/linux_SBC
drivers/staging/msm/lcdc_gordon.c
3059
11803
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/delay.h> #include <mach/gpio.h> #include "msm_fb.h" /* registers */ #define GORDON_REG_NOP 0x00 #define GORDON_REG_IMGCTL1 0x10 #define GORDON_REG_IMGCTL2 0x11 #define GORDON_REG_IMGSET1 0x12 #define GORDON_REG_IMGSET2 0x13 #define GORDON_REG_IVBP1 0x14 #define GORDON_REG_IHBP1 0x15 #define GORDON_REG_IVNUM1 0x16 #define GORDON_REG_IHNUM1 0x17 #define GORDON_REG_IVBP2 0x18 #define GORDON_REG_IHBP2 0x19 #define GORDON_REG_IVNUM2 0x1A #define GORDON_REG_IHNUM2 0x1B #define GORDON_REG_LCDIFCTL1 0x30 #define GORDON_REG_VALTRAN 0x31 #define GORDON_REG_AVCTL 0x33 #define GORDON_REG_LCDIFCTL2 0x34 #define GORDON_REG_LCDIFCTL3 0x35 #define GORDON_REG_LCDIFSET1 0x36 #define GORDON_REG_PCCTL 0x3C #define GORDON_REG_TPARAM1 0x40 #define GORDON_REG_TLCDIF1 0x41 #define GORDON_REG_TSSPB_ST1 0x42 #define GORDON_REG_TSSPB_ED1 0x43 #define GORDON_REG_TSCK_ST1 0x44 #define GORDON_REG_TSCK_WD1 0x45 #define GORDON_REG_TGSPB_VST1 0x46 #define GORDON_REG_TGSPB_VED1 0x47 #define GORDON_REG_TGSPB_CH1 0x48 #define GORDON_REG_TGCK_ST1 0x49 #define GORDON_REG_TGCK_ED1 0x4A #define GORDON_REG_TPCTL_ST1 0x4B #define GORDON_REG_TPCTL_ED1 0x4C #define GORDON_REG_TPCHG_ED1 0x4D #define GORDON_REG_TCOM_CH1 0x4E #define GORDON_REG_THBP1 0x4F #define GORDON_REG_TPHCTL1 0x50 #define GORDON_REG_EVPH1 0x51 #define GORDON_REG_EVPL1 0x52 #define GORDON_REG_EVNH1 0x53 #define GORDON_REG_EVNL1 0x54 #define GORDON_REG_TBIAS1 0x55 #define GORDON_REG_TPARAM2 0x56 #define GORDON_REG_TLCDIF2 0x57 #define GORDON_REG_TSSPB_ST2 0x58 #define GORDON_REG_TSSPB_ED2 0x59 #define GORDON_REG_TSCK_ST2 0x5A #define GORDON_REG_TSCK_WD2 0x5B #define GORDON_REG_TGSPB_VST2 0x5C #define GORDON_REG_TGSPB_VED2 0x5D #define GORDON_REG_TGSPB_CH2 0x5E #define GORDON_REG_TGCK_ST2 0x5F #define GORDON_REG_TGCK_ED2 0x60 #define GORDON_REG_TPCTL_ST2 0x61 #define GORDON_REG_TPCTL_ED2 0x62 #define GORDON_REG_TPCHG_ED2 0x63 #define GORDON_REG_TCOM_CH2 0x64 #define GORDON_REG_THBP2 0x65 #define GORDON_REG_TPHCTL2 0x66 #define GORDON_REG_POWCTL 0x80 static int lcdc_gordon_panel_off(struct platform_device *pdev); static int spi_cs; static int spi_sclk; static int spi_sdo; static int spi_sdi; static int spi_dac; static unsigned char bit_shift[8] = { (1 << 7), /* MSB */ (1 << 6), (1 << 5), (1 << 4), (1 << 3), (1 << 2), (1 << 1), (1 << 0) /* LSB */ }; struct gordon_state_type{ boolean disp_initialized; boolean display_on; boolean disp_powered_up; }; static struct gordon_state_type gordon_state = { 0 }; static struct msm_panel_common_pdata *lcdc_gordon_pdata; static void serigo(uint16 reg, uint8 data) { unsigned int tx_val = ((0x00FF & reg) << 8) | data; unsigned char i, val = 0; /* Enable the Chip Select */ gpio_set_value(spi_cs, 1); udelay(33); /* Transmit it in two parts, Higher Byte first, then Lower Byte */ val = (unsigned char)((tx_val & 0xFF00) >> 8); /* Clock should be Low before entering ! */ for (i = 0; i < 8; i++) { /* #1: Drive the Data (High or Low) */ if (val & bit_shift[i]) gpio_set_value(spi_sdi, 1); else gpio_set_value(spi_sdi, 0); /* #2: Drive the Clk High and then Low */ udelay(33); gpio_set_value(spi_sclk, 1); udelay(33); gpio_set_value(spi_sclk, 0); } /* Idle state of SDO (MOSI) is Low */ gpio_set_value(spi_sdi, 0); /* ..then Lower Byte */ val = (uint8) (tx_val & 0x00FF); /* Before we enter here the Clock should be Low ! */ for (i = 0; i < 8; i++) { /* #1: Drive the Data (High or Low) */ if (val & bit_shift[i]) gpio_set_value(spi_sdi, 1); else gpio_set_value(spi_sdi, 0); /* #2: Drive the Clk High and then Low */ udelay(33); gpio_set_value(spi_sclk, 1); udelay(33); gpio_set_value(spi_sclk, 0); } /* Idle state of SDO (MOSI) is Low */ gpio_set_value(spi_sdi, 0); /* Now Disable the Chip Select */ udelay(33); gpio_set_value(spi_cs, 0); } static void spi_init(void) { /* Setting the Default GPIO's */ spi_sclk = *(lcdc_gordon_pdata->gpio_num); spi_cs = *(lcdc_gordon_pdata->gpio_num + 1); spi_sdi = *(lcdc_gordon_pdata->gpio_num + 2); spi_sdo = *(lcdc_gordon_pdata->gpio_num + 3); /* Set the output so that we dont disturb the slave device */ gpio_set_value(spi_sclk, 0); gpio_set_value(spi_sdi, 0); /* Set the Chip Select De-asserted */ gpio_set_value(spi_cs, 0); } static void gordon_disp_powerup(void) { if (!gordon_state.disp_powered_up && !gordon_state.display_on) { /* Reset the hardware first */ /* Include DAC power up implementation here */ gordon_state.disp_powered_up = TRUE; } } static void gordon_init(void) { /* Image interface settings */ serigo(GORDON_REG_IMGCTL2, 0x00); serigo(GORDON_REG_IMGSET1, 0x00); /* Exchange the RGB signal for J510(Softbank mobile) */ serigo(GORDON_REG_IMGSET2, 0x12); serigo(GORDON_REG_LCDIFSET1, 0x00); /* Pre-charge settings */ serigo(GORDON_REG_PCCTL, 0x09); serigo(GORDON_REG_LCDIFCTL2, 0x7B); mdelay(1); } static void gordon_disp_on(void) { if (gordon_state.disp_powered_up && !gordon_state.display_on) { gordon_init(); mdelay(20); /* gordon_dispmode setting */ serigo(GORDON_REG_TPARAM1, 0x30); serigo(GORDON_REG_TLCDIF1, 0x00); serigo(GORDON_REG_TSSPB_ST1, 0x8B); serigo(GORDON_REG_TSSPB_ED1, 0x93); serigo(GORDON_REG_TSCK_ST1, 0x88); serigo(GORDON_REG_TSCK_WD1, 0x00); serigo(GORDON_REG_TGSPB_VST1, 0x01); serigo(GORDON_REG_TGSPB_VED1, 0x02); serigo(GORDON_REG_TGSPB_CH1, 0x5E); serigo(GORDON_REG_TGCK_ST1, 0x80); serigo(GORDON_REG_TGCK_ED1, 0x3C); serigo(GORDON_REG_TPCTL_ST1, 0x50); serigo(GORDON_REG_TPCTL_ED1, 0x74); serigo(GORDON_REG_TPCHG_ED1, 0x78); serigo(GORDON_REG_TCOM_CH1, 0x50); serigo(GORDON_REG_THBP1, 0x84); serigo(GORDON_REG_TPHCTL1, 0x00); serigo(GORDON_REG_EVPH1, 0x70); serigo(GORDON_REG_EVPL1, 0x64); serigo(GORDON_REG_EVNH1, 0x56); serigo(GORDON_REG_EVNL1, 0x48); serigo(GORDON_REG_TBIAS1, 0x88); /* QVGA settings */ serigo(GORDON_REG_TPARAM2, 0x28); serigo(GORDON_REG_TLCDIF2, 0x14); serigo(GORDON_REG_TSSPB_ST2, 0x49); serigo(GORDON_REG_TSSPB_ED2, 0x4B); serigo(GORDON_REG_TSCK_ST2, 0x4A); serigo(GORDON_REG_TSCK_WD2, 0x02); serigo(GORDON_REG_TGSPB_VST2, 0x02); serigo(GORDON_REG_TGSPB_VED2, 0x03); serigo(GORDON_REG_TGSPB_CH2, 0x2F); serigo(GORDON_REG_TGCK_ST2, 0x40); serigo(GORDON_REG_TGCK_ED2, 0x1E); serigo(GORDON_REG_TPCTL_ST2, 0x2C); serigo(GORDON_REG_TPCTL_ED2, 0x3A); serigo(GORDON_REG_TPCHG_ED2, 0x3C); serigo(GORDON_REG_TCOM_CH2, 0x28); serigo(GORDON_REG_THBP2, 0x4D); serigo(GORDON_REG_TPHCTL2, 0x1A); /* VGA settings */ serigo(GORDON_REG_IVBP1, 0x02); serigo(GORDON_REG_IHBP1, 0x90); serigo(GORDON_REG_IVNUM1, 0xA0); serigo(GORDON_REG_IHNUM1, 0x78); /* QVGA settings */ serigo(GORDON_REG_IVBP2, 0x02); serigo(GORDON_REG_IHBP2, 0x48); serigo(GORDON_REG_IVNUM2, 0x50); serigo(GORDON_REG_IHNUM2, 0x3C); /* Gordon Charge pump settings and ON */ serigo(GORDON_REG_POWCTL, 0x03); mdelay(15); serigo(GORDON_REG_POWCTL, 0x07); mdelay(15); serigo(GORDON_REG_POWCTL, 0x0F); mdelay(15); serigo(GORDON_REG_AVCTL, 0x03); mdelay(15); serigo(GORDON_REG_POWCTL, 0x1F); mdelay(15); serigo(GORDON_REG_POWCTL, 0x5F); mdelay(15); serigo(GORDON_REG_POWCTL, 0x7F); mdelay(15); serigo(GORDON_REG_LCDIFCTL1, 0x02); mdelay(15); serigo(GORDON_REG_IMGCTL1, 0x00); mdelay(15); serigo(GORDON_REG_LCDIFCTL3, 0x00); mdelay(15); serigo(GORDON_REG_VALTRAN, 0x01); mdelay(15); serigo(GORDON_REG_LCDIFCTL1, 0x03); mdelay(1); gordon_state.display_on = TRUE; } } static int lcdc_gordon_panel_on(struct platform_device *pdev) { if (!gordon_state.disp_initialized) { /* Configure reset GPIO that drives DAC */ lcdc_gordon_pdata->panel_config_gpio(1); spi_dac = *(lcdc_gordon_pdata->gpio_num + 4); gpio_set_value(spi_dac, 0); udelay(15); gpio_set_value(spi_dac, 1); spi_init(); /* LCD needs SPI */ gordon_disp_powerup(); gordon_disp_on(); gordon_state.disp_initialized = TRUE; } return 0; } static int lcdc_gordon_panel_off(struct platform_device *pdev) { if (gordon_state.disp_powered_up && gordon_state.display_on) { serigo(GORDON_REG_LCDIFCTL2, 0x7B); serigo(GORDON_REG_VALTRAN, 0x01); serigo(GORDON_REG_LCDIFCTL1, 0x02); serigo(GORDON_REG_LCDIFCTL3, 0x01); mdelay(20); serigo(GORDON_REG_VALTRAN, 0x01); serigo(GORDON_REG_IMGCTL1, 0x01); serigo(GORDON_REG_LCDIFCTL1, 0x00); mdelay(20); serigo(GORDON_REG_POWCTL, 0x1F); mdelay(40); serigo(GORDON_REG_POWCTL, 0x07); mdelay(40); serigo(GORDON_REG_POWCTL, 0x03); mdelay(40); serigo(GORDON_REG_POWCTL, 0x00); mdelay(40); lcdc_gordon_pdata->panel_config_gpio(0); gordon_state.display_on = FALSE; gordon_state.disp_initialized = FALSE; } return 0; } static void lcdc_gordon_set_backlight(struct msm_fb_data_type *mfd) { int bl_level = mfd->bl_level; if (bl_level <= 1) { /* keep back light OFF */ serigo(GORDON_REG_LCDIFCTL2, 0x0B); udelay(15); serigo(GORDON_REG_VALTRAN, 0x01); } else { /* keep back light ON */ serigo(GORDON_REG_LCDIFCTL2, 0x7B); udelay(15); serigo(GORDON_REG_VALTRAN, 0x01); } } static int __init gordon_probe(struct platform_device *pdev) { if (pdev->id == 0) { lcdc_gordon_pdata = pdev->dev.platform_data; return 0; } msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = gordon_probe, .driver = { .name = "lcdc_gordon_vga", }, }; static struct msm_fb_panel_data gordon_panel_data = { .on = lcdc_gordon_panel_on, .off = lcdc_gordon_panel_off, .set_backlight = lcdc_gordon_set_backlight, }; static struct platform_device this_device = { .name = "lcdc_gordon_vga", .id = 1, .dev = { .platform_data = &gordon_panel_data, } }; static int __init lcdc_gordon_panel_init(void) { int ret; struct msm_panel_info *pinfo; #ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM if (msm_fb_detect_client("lcdc_gordon_vga")) return 0; #endif ret = platform_driver_register(&this_driver); if (ret) return ret; pinfo = &gordon_panel_data.panel_info; pinfo->xres = 480; pinfo->yres = 640; pinfo->type = LCDC_PANEL; pinfo->pdest = DISPLAY_1; pinfo->wait_cycle = 0; pinfo->bpp = 24; pinfo->fb_num = 2; pinfo->clk_rate = 24500000; pinfo->bl_max = 4; pinfo->bl_min = 1; pinfo->lcdc.h_back_porch = 84; pinfo->lcdc.h_front_porch = 33; pinfo->lcdc.h_pulse_width = 60; pinfo->lcdc.v_back_porch = 0; pinfo->lcdc.v_front_porch = 2; pinfo->lcdc.v_pulse_width = 2; pinfo->lcdc.border_clr = 0; /* blk */ pinfo->lcdc.underflow_clr = 0xff; /* blue */ pinfo->lcdc.hsync_skew = 0; ret = platform_device_register(&this_device); if (ret) platform_driver_unregister(&this_driver); return ret; } module_init(lcdc_gordon_panel_init);
gpl-2.0
Schischu/xoom-ElementalX
drivers/staging/msm/mddi_prism.c
3059
2763
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include "msm_fb.h" #include "mddihost.h" #include "mddihosti.h" static int prism_lcd_on(struct platform_device *pdev); static int prism_lcd_off(struct platform_device *pdev); static int prism_lcd_on(struct platform_device *pdev) { /* Set the MDP pixel data attributes for Primary Display */ mddi_host_write_pix_attr_reg(0x00C3); return 0; } static int prism_lcd_off(struct platform_device *pdev) { return 0; } static int __init prism_probe(struct platform_device *pdev) { msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = prism_probe, .driver = { .name = "mddi_prism_wvga", }, }; static struct msm_fb_panel_data prism_panel_data = { .on = prism_lcd_on, .off = prism_lcd_off, }; static struct platform_device this_device = { .name = "mddi_prism_wvga", .id = 0, .dev = { .platform_data = &prism_panel_data, } }; static int __init prism_init(void) { int ret; struct msm_panel_info *pinfo; #ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT u32 id; ret = msm_fb_detect_client("mddi_prism_wvga"); if (ret == -ENODEV) return 0; if (ret) { id = mddi_get_client_id(); if (((id >> 16) != 0x4474) || ((id & 0xffff) == 0x8960)) return 0; } #endif ret = platform_driver_register(&this_driver); if (!ret) { pinfo = &prism_panel_data.panel_info; pinfo->xres = 800; pinfo->yres = 480; pinfo->type = MDDI_PANEL; pinfo->pdest = DISPLAY_1; pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR; pinfo->wait_cycle = 0; pinfo->bpp = 18; pinfo->fb_num = 2; pinfo->clk_rate = 153600000; pinfo->clk_min = 150000000; pinfo->clk_max = 160000000; pinfo->lcd.vsync_enable = TRUE; pinfo->lcd.refx100 = 6050; pinfo->lcd.v_back_porch = 23; pinfo->lcd.v_front_porch = 20; pinfo->lcd.v_pulse_width = 105; pinfo->lcd.hw_vsync_mode = TRUE; pinfo->lcd.vsync_notifier_period = 0; ret = platform_device_register(&this_device); if (ret) platform_driver_unregister(&this_driver); } return ret; } module_init(prism_init);
gpl-2.0
Team-Hydra/sultan-kernel-pyramid-pure-3.4
arch/mn10300/unit-asb2305/pci.c
4595
14151
/* ASB2305 PCI support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * Derived from arch/i386/kernel/pci-pc.c * (c) 1999--2000 Martin Mares <mj@suse.cz> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <asm/io.h> #include "pci-asb2305.h" unsigned int pci_probe = 1; int pcibios_last_bus = -1; struct pci_bus *pci_root_bus; struct pci_ops *pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but * there are devices we want to access outside of that window, so we need to * insert specific PCI bus resources instead of using the platform-level bus * resources directly for the PCI root bus. * * These are configured and inserted by pcibios_init(). */ static struct resource pci_ioport_resource = { .name = "PCI IO", .start = 0xbe000000, .end = 0xbe03ffff, .flags = IORESOURCE_IO, }; static struct resource pci_iomem_resource = { .name = "PCI mem", .start = 0xb8000000, .end = 0xbbffffff, .flags = IORESOURCE_MEM, }; /* * Functions for accessing PCI configuration space */ #define CONFIG_CMD(bus, devfn, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) #define MEM_PAGING_REG (*(volatile __u32 *) 0xBFFFFFF4) #define CONFIG_ADDRESS (*(volatile __u32 *) 0xBFFFFFF8) #define CONFIG_DATAL(X) (*(volatile __u32 *) 0xBFFFFFFC) #define CONFIG_DATAW(X) (*(volatile __u16 *) (0xBFFFFFFC + ((X) & 2))) #define CONFIG_DATAB(X) (*(volatile __u8 *) (0xBFFFFFFC + ((X) & 3))) #define BRIDGEREGB(X) (*(volatile __u8 *) (0xBE040000 + (X))) #define BRIDGEREGW(X) (*(volatile __u16 *) (0xBE040000 + (X))) #define BRIDGEREGL(X) (*(volatile __u32 *) (0xBE040000 + (X))) static inline int __query(const struct pci_bus *bus, unsigned int devfn) { #if 0 return bus->number == 0 && (devfn == PCI_DEVFN(0, 0)); return bus->number == 1; return bus->number == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(3, 0)); #endif return 1; } /* * */ static int pci_ampci_read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u32 *_value) { u32 rawval, value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { value = BRIDGEREGB(where); __pcbdebug("=> %02hx", &BRIDGEREGL(where), value); } else { CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; value = CONFIG_DATAB(where); if (__query(bus, devfn)) __pcidebug("=> %02hx", bus, devfn, where, value); } *_value = value; return PCIBIOS_SUCCESSFUL; } static int pci_ampci_read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u32 *_value) { u32 rawval, value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { value = BRIDGEREGW(where); __pcbdebug("=> %04hx", &BRIDGEREGL(where), value); } else { CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; value = CONFIG_DATAW(where); if (__query(bus, devfn)) __pcidebug("=> %04hx", bus, devfn, where, value); } *_value = value; return PCIBIOS_SUCCESSFUL; } static int pci_ampci_read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 *_value) { u32 rawval, value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { value = BRIDGEREGL(where); __pcbdebug("=> %08x", &BRIDGEREGL(where), value); } else { CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; value = CONFIG_DATAL(where); if (__query(bus, devfn)) __pcidebug("=> %08x", bus, devfn, where, value); } *_value = value; return PCIBIOS_SUCCESSFUL; } static int pci_ampci_write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 value) { u32 rawval; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __pcbdebug("<= %02x", &BRIDGEREGB(where), value); BRIDGEREGB(where) = value; } else { if (bus->number == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(3, 0)) ) __pcidebug("<= %02x", bus, devfn, where, value); CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; CONFIG_DATAB(where) = value; } return PCIBIOS_SUCCESSFUL; } static int pci_ampci_write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 value) { u32 rawval; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __pcbdebug("<= %04hx", &BRIDGEREGW(where), value); BRIDGEREGW(where) = value; } else { if (__query(bus, devfn)) __pcidebug("<= %04hx", bus, devfn, where, value); CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; CONFIG_DATAW(where) = value; } return PCIBIOS_SUCCESSFUL; } static int pci_ampci_write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 value) { u32 rawval; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __pcbdebug("<= %08x", &BRIDGEREGL(where), value); BRIDGEREGL(where) = value; } else { if (__query(bus, devfn)) __pcidebug("<= %08x", bus, devfn, where, value); CONFIG_ADDRESS = CONFIG_CMD(bus, devfn, where); rawval = CONFIG_ADDRESS; CONFIG_DATAL(where) = value; } return PCIBIOS_SUCCESSFUL; } static int pci_ampci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { switch (size) { case 1: return pci_ampci_read_config_byte(bus, devfn, where, val); case 2: return pci_ampci_read_config_word(bus, devfn, where, val); case 4: return pci_ampci_read_config_dword(bus, devfn, where, val); default: BUG(); return -EOPNOTSUPP; } } static int pci_ampci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { switch (size) { case 1: return pci_ampci_write_config_byte(bus, devfn, where, val); case 2: return pci_ampci_write_config_word(bus, devfn, where, val); case 4: return pci_ampci_write_config_dword(bus, devfn, where, val); default: BUG(); return -EOPNOTSUPP; } } static struct pci_ops pci_direct_ampci = { pci_ampci_read_config, pci_ampci_write_config, }; /* * Before we decide to use direct hardware access mechanisms, we try to do some * trivial checks to ensure it at least _seems_ to be working -- we just test * whether bus 00 contains a host bridge (this is similar to checking * techniques used in XFree86, but ours should be more reliable since we * attempt to make use of direct access hints provided by the PCI BIOS). * * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ static int __init pci_sanity_check(struct pci_ops *o) { struct pci_bus bus; /* Fake bus and device */ u32 x; bus.number = 0; if ((!o->read(&bus, 0, PCI_CLASS_DEVICE, 2, &x) && (x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA)) || (!o->read(&bus, 0, PCI_VENDOR_ID, 2, &x) && (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ))) return 1; printk(KERN_ERR "PCI: Sanity check failed\n"); return 0; } static int __init pci_check_direct(void) { unsigned long flags; local_irq_save(flags); /* * Check if access works. */ if (pci_sanity_check(&pci_direct_ampci)) { local_irq_restore(flags); printk(KERN_INFO "PCI: Using configuration ampci\n"); request_mem_region(0xBE040000, 256, "AMPCI bridge"); request_mem_region(0xBFFFFFF4, 12, "PCI ampci"); request_mem_region(0xBC000000, 32 * 1024 * 1024, "PCI SRAM"); return 0; } local_irq_restore(flags); return -ENODEV; } static int __devinit is_valid_resource(struct pci_dev *dev, int idx) { unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; struct resource *devr = &dev->resource[idx], *busr; if (dev->bus) { pci_bus_for_each_resource(dev->bus, busr, i) { if (!busr || (busr->flags ^ devr->flags) & type_mask) continue; if (devr->start && devr->start >= busr->start && devr->end <= busr->end) return 1; } } return 0; } static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) { struct pci_bus_region region; int i; int limit; if (dev->bus->number != 0) return; limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES; for (i = 0; i < limit; i++) { if (!dev->resource[i].flags) continue; if (is_valid_resource(dev, i)) pci_claim_resource(dev, i); } } /* * Called after each bus is probed, but before its children * are examined. */ void __devinit pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; if (bus->self) { pci_read_bridge_bases(bus); pcibios_fixup_device_resources(bus->self); } list_for_each_entry(dev, &bus->devices, bus_list) pcibios_fixup_device_resources(dev); } /* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ static int __init pcibios_init(void) { resource_size_t io_offset, mem_offset; LIST_HEAD(resources); ioport_resource.start = 0xA0000000; ioport_resource.end = 0xDFFFFFFF; iomem_resource.start = 0xA0000000; iomem_resource.end = 0xDFFFFFFF; if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0) panic("Unable to insert PCI IOMEM resource\n"); if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0) panic("Unable to insert PCI IOPORT resource\n"); if (!pci_probe) return 0; if (pci_check_direct() < 0) { printk(KERN_WARNING "PCI: No PCI bus detected\n"); return 0; } printk(KERN_INFO "PCI: Probing PCI hardware [mempage %08x]\n", MEM_PAGING_REG); io_offset = pci_ioport_resource.start - (pci_ioport_resource.start & 0x00ffffff); mem_offset = pci_iomem_resource.start - ((pci_iomem_resource.start & 0x03ffffff) | MEM_PAGING_REG); pci_add_resource_offset(&resources, &pci_ioport_resource, io_offset); pci_add_resource_offset(&resources, &pci_iomem_resource, mem_offset); pci_root_bus = pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources); pcibios_irq_init(); pcibios_fixup_irqs(); pcibios_resource_survey(); return 0; } arch_initcall(pcibios_init); char *__init pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; } return str; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; err = pci_enable_resources(dev, mask); if (err == 0) pcibios_enable_irq(dev); return err; } /* * disable the ethernet chipset */ static void __init unit_disable_pcnet(struct pci_bus *bus, struct pci_ops *o) { u32 x; bus->number = 0; o->read (bus, PCI_DEVFN(2, 0), PCI_VENDOR_ID, 4, &x); o->read (bus, PCI_DEVFN(2, 0), PCI_COMMAND, 2, &x); x |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_SERR | PCI_COMMAND_PARITY; o->write(bus, PCI_DEVFN(2, 0), PCI_COMMAND, 2, x); o->read (bus, PCI_DEVFN(2, 0), PCI_COMMAND, 2, &x); o->write(bus, PCI_DEVFN(2, 0), PCI_BASE_ADDRESS_0, 4, 0x00030001); o->read (bus, PCI_DEVFN(2, 0), PCI_BASE_ADDRESS_0, 4, &x); #define RDP (*(volatile u32 *) 0xBE030010) #define RAP (*(volatile u32 *) 0xBE030014) #define __set_RAP(X) do { RAP = (X); x = RAP; } while (0) #define __set_RDP(X) do { RDP = (X); x = RDP; } while (0) #define __get_RDP() ({ RDP & 0xffff; }) __set_RAP(0); __set_RDP(0x0004); /* CSR0 = STOP */ __set_RAP(88); /* check CSR88 indicates an Am79C973 */ BUG_ON(__get_RDP() != 0x5003); for (x = 0; x < 100; x++) asm volatile("nop"); __set_RDP(0x0004); /* CSR0 = STOP */ } /* * initialise the unit hardware */ asmlinkage void __init unit_pci_init(void) { struct pci_bus bus; /* Fake bus and device */ struct pci_ops *o = &pci_direct_ampci; u32 x; set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_PCI_IRQ_LEVEL)); memset(&bus, 0, sizeof(bus)); MEM_PAGING_REG = 0xE8000000; /* we need to set up the bridge _now_ or we won't be able to access the * PCI config registers */ BRIDGEREGW(PCI_COMMAND) |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MEMORY | PCI_COMMAND_IO | PCI_COMMAND_MASTER; BRIDGEREGW(PCI_STATUS) = 0xF800; BRIDGEREGB(PCI_LATENCY_TIMER) = 0x10; BRIDGEREGL(PCI_BASE_ADDRESS_0) = 0x80000000; BRIDGEREGB(PCI_INTERRUPT_LINE) = 1; BRIDGEREGL(0x48) = 0x98000000; /* AMPCI base addr */ BRIDGEREGB(0x41) = 0x00; /* secondary bus * number */ BRIDGEREGB(0x42) = 0x01; /* subordinate bus * number */ BRIDGEREGB(0x44) = 0x01; BRIDGEREGL(0x50) = 0x00000001; BRIDGEREGL(0x58) = 0x00001002; BRIDGEREGL(0x5C) = 0x00000011; /* we also need to set up the PCI-PCI bridge */ bus.number = 0; /* IO: 0x00000000-0x00020000 */ o->read (&bus, PCI_DEVFN(3, 0), PCI_COMMAND, 2, &x); x |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_SERR | PCI_COMMAND_PARITY; o->write(&bus, PCI_DEVFN(3, 0), PCI_COMMAND, 2, x); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE, 1, &x); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE_UPPER16, 4, &x); o->read (&bus, PCI_DEVFN(3, 0), PCI_MEMORY_BASE, 4, &x); o->read (&bus, PCI_DEVFN(3, 0), PCI_PREF_MEMORY_BASE, 4, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_IO_BASE, 1, 0x01); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE, 1, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_IO_BASE_UPPER16, 4, 0x00020000); o->read (&bus, PCI_DEVFN(3, 0), PCI_IO_BASE_UPPER16, 4, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_MEMORY_BASE, 4, 0xEBB0EA00); o->read (&bus, PCI_DEVFN(3, 0), PCI_MEMORY_BASE, 4, &x); o->write(&bus, PCI_DEVFN(3, 0), PCI_PREF_MEMORY_BASE, 4, 0xE9F0E800); o->read (&bus, PCI_DEVFN(3, 0), PCI_PREF_MEMORY_BASE, 4, &x); unit_disable_pcnet(&bus, o); }
gpl-2.0
ausdim/GE-Edition-I9505-jfltexx-new
sound/drivers/mtpav.c
5107
20528
/* * MOTU Midi Timepiece ALSA Main routines * Copyright by Michael T. Mayers (c) Jan 09, 2000 * mail: michael@tweakoz.com * Thanks to John Galbraith * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * This driver is for the 'Mark Of The Unicorn' (MOTU) * MidiTimePiece AV multiport MIDI interface * * IOPORTS * ------- * 8 MIDI Ins and 8 MIDI outs * Video Sync In (BNC), Word Sync Out (BNC), * ADAT Sync Out (DB9) * SMPTE in/out (1/4") * 2 programmable pedal/footswitch inputs and 4 programmable MIDI controller knobs. * Macintosh RS422 serial port * RS422 "network" port for ganging multiple MTP's * PC Parallel Port ( which this driver currently uses ) * * MISC FEATURES * ------------- * Hardware MIDI routing, merging, and filtering * MIDI Synchronization to Video, ADAT, SMPTE and other Clock sources * 128 'scene' memories, recallable from MIDI program change * * * ChangeLog * Jun 11 2001 Takashi Iwai <tiwai@suse.de> * - Recoded & debugged * - Added timer interrupt for midi outputs * - hwports is between 1 and 8, which specifies the number of hardware ports. * The three global ports, computer, adat and broadcast ports, are created * always after h/w and remote ports. * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <linux/delay.h> /* * globals */ MODULE_AUTHOR("Michael T. Mayers"); MODULE_DESCRIPTION("MOTU MidiTimePiece AV multiport MIDI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{MOTU,MidiTimePiece AV multiport MIDI}}"); // io resources #define MTPAV_IOBASE 0x378 #define MTPAV_IRQ 7 #define MTPAV_MAX_PORTS 8 static int index = SNDRV_DEFAULT_IDX1; static char *id = SNDRV_DEFAULT_STR1; static long port = MTPAV_IOBASE; /* 0x378, 0x278 */ static int irq = MTPAV_IRQ; /* 7, 5 */ static int hwports = MTPAV_MAX_PORTS; /* use hardware ports 1-8 */ module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for MotuMTPAV MIDI."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for MotuMTPAV MIDI."); module_param(port, long, 0444); MODULE_PARM_DESC(port, "Parallel port # for MotuMTPAV MIDI."); module_param(irq, int, 0444); MODULE_PARM_DESC(irq, "Parallel IRQ # for MotuMTPAV MIDI."); module_param(hwports, int, 0444); MODULE_PARM_DESC(hwports, "Hardware ports # for MotuMTPAV MIDI."); static struct platform_device *device; /* * defines */ //#define USE_FAKE_MTP // don't actually read/write to MTP device (for debugging without an actual unit) (does not work yet) // parallel port usage masks #define SIGS_BYTE 0x08 #define SIGS_RFD 0x80 #define SIGS_IRQ 0x40 #define SIGS_IN0 0x10 #define SIGS_IN1 0x20 #define SIGC_WRITE 0x04 #define SIGC_READ 0x08 #define SIGC_INTEN 0x10 #define DREG 0 #define SREG 1 #define CREG 2 // #define MTPAV_MODE_INPUT_OPENED 0x01 #define MTPAV_MODE_OUTPUT_OPENED 0x02 #define MTPAV_MODE_INPUT_TRIGGERED 0x04 #define MTPAV_MODE_OUTPUT_TRIGGERED 0x08 #define NUMPORTS (0x12+1) /* */ struct mtpav_port { u8 number; u8 hwport; u8 mode; u8 running_status; struct snd_rawmidi_substream *input; struct snd_rawmidi_substream *output; }; struct mtpav { struct snd_card *card; unsigned long port; struct resource *res_port; int irq; /* interrupt (for inputs) */ spinlock_t spinlock; int share_irq; /* number of accesses to input interrupts */ int istimer; /* number of accesses to timer interrupts */ struct timer_list timer; /* timer interrupts for outputs */ struct snd_rawmidi *rmidi; int num_ports; /* number of hw ports (1-8) */ struct mtpav_port ports[NUMPORTS]; /* all ports including computer, adat and bc */ u32 inmidiport; /* selected input midi port */ u32 inmidistate; /* during midi command 0xf5 */ u32 outmidihwport; /* selected output midi hw port */ }; /* * possible hardware ports (selected by 0xf5 port message) * 0x00 all ports * 0x01 .. 0x08 this MTP's ports 1..8 * 0x09 .. 0x10 networked MTP's ports (9..16) * 0x11 networked MTP's computer port * 0x63 to ADAT * * mappig: * subdevice 0 - (X-1) ports * X - (2*X-1) networked ports * X computer * X+1 ADAT * X+2 all ports * * where X = chip->num_ports */ #define MTPAV_PIDX_COMPUTER 0 #define MTPAV_PIDX_ADAT 1 #define MTPAV_PIDX_BROADCAST 2 static int translate_subdevice_to_hwport(struct mtpav *chip, int subdev) { if (subdev < 0) return 0x01; /* invalid - use port 0 as default */ else if (subdev < chip->num_ports) return subdev + 1; /* single mtp port */ else if (subdev < chip->num_ports * 2) return subdev - chip->num_ports + 0x09; /* remote port */ else if (subdev == chip->num_ports * 2 + MTPAV_PIDX_COMPUTER) return 0x11; /* computer port */ else if (subdev == chip->num_ports + MTPAV_PIDX_ADAT) return 0x63; /* ADAT */ return 0; /* all ports */ } static int translate_hwport_to_subdevice(struct mtpav *chip, int hwport) { int p; if (hwport <= 0x00) /* all ports */ return chip->num_ports + MTPAV_PIDX_BROADCAST; else if (hwport <= 0x08) { /* single port */ p = hwport - 1; if (p >= chip->num_ports) p = 0; return p; } else if (hwport <= 0x10) { /* remote port */ p = hwport - 0x09 + chip->num_ports; if (p >= chip->num_ports * 2) p = chip->num_ports; return p; } else if (hwport == 0x11) /* computer port */ return chip->num_ports + MTPAV_PIDX_COMPUTER; else /* ADAT */ return chip->num_ports + MTPAV_PIDX_ADAT; } /* */ static u8 snd_mtpav_getreg(struct mtpav *chip, u16 reg) { u8 rval = 0; if (reg == SREG) { rval = inb(chip->port + SREG); rval = (rval & 0xf8); } else if (reg == CREG) { rval = inb(chip->port + CREG); rval = (rval & 0x1c); } return rval; } /* */ static inline void snd_mtpav_mputreg(struct mtpav *chip, u16 reg, u8 val) { if (reg == DREG || reg == CREG) outb(val, chip->port + reg); } /* */ static void snd_mtpav_wait_rfdhi(struct mtpav *chip) { int counts = 10000; u8 sbyte; sbyte = snd_mtpav_getreg(chip, SREG); while (!(sbyte & SIGS_RFD) && counts--) { sbyte = snd_mtpav_getreg(chip, SREG); udelay(10); } } static void snd_mtpav_send_byte(struct mtpav *chip, u8 byte) { u8 tcbyt; u8 clrwrite; u8 setwrite; snd_mtpav_wait_rfdhi(chip); ///////////////// tcbyt = snd_mtpav_getreg(chip, CREG); clrwrite = tcbyt & (SIGC_WRITE ^ 0xff); setwrite = tcbyt | SIGC_WRITE; snd_mtpav_mputreg(chip, DREG, byte); snd_mtpav_mputreg(chip, CREG, clrwrite); // clear write bit snd_mtpav_mputreg(chip, CREG, setwrite); // set write bit } /* */ /* call this with spin lock held */ static void snd_mtpav_output_port_write(struct mtpav *mtp_card, struct mtpav_port *portp, struct snd_rawmidi_substream *substream) { u8 outbyte; // Get the outbyte first, so we can emulate running status if // necessary if (snd_rawmidi_transmit(substream, &outbyte, 1) != 1) return; // send port change command if necessary if (portp->hwport != mtp_card->outmidihwport) { mtp_card->outmidihwport = portp->hwport; snd_mtpav_send_byte(mtp_card, 0xf5); snd_mtpav_send_byte(mtp_card, portp->hwport); /* snd_printk(KERN_DEBUG "new outport: 0x%x\n", (unsigned int) portp->hwport); */ if (!(outbyte & 0x80) && portp->running_status) snd_mtpav_send_byte(mtp_card, portp->running_status); } // send data do { if (outbyte & 0x80) portp->running_status = outbyte; snd_mtpav_send_byte(mtp_card, outbyte); } while (snd_rawmidi_transmit(substream, &outbyte, 1) == 1); } static void snd_mtpav_output_write(struct snd_rawmidi_substream *substream) { struct mtpav *mtp_card = substream->rmidi->private_data; struct mtpav_port *portp = &mtp_card->ports[substream->number]; unsigned long flags; spin_lock_irqsave(&mtp_card->spinlock, flags); snd_mtpav_output_port_write(mtp_card, portp, substream); spin_unlock_irqrestore(&mtp_card->spinlock, flags); } /* * mtpav control */ static void snd_mtpav_portscan(struct mtpav *chip) // put mtp into smart routing mode { u8 p; for (p = 0; p < 8; p++) { snd_mtpav_send_byte(chip, 0xf5); snd_mtpav_send_byte(chip, p); snd_mtpav_send_byte(chip, 0xfe); } } /* */ static int snd_mtpav_input_open(struct snd_rawmidi_substream *substream) { struct mtpav *mtp_card = substream->rmidi->private_data; struct mtpav_port *portp = &mtp_card->ports[substream->number]; unsigned long flags; spin_lock_irqsave(&mtp_card->spinlock, flags); portp->mode |= MTPAV_MODE_INPUT_OPENED; portp->input = substream; if (mtp_card->share_irq++ == 0) snd_mtpav_mputreg(mtp_card, CREG, (SIGC_INTEN | SIGC_WRITE)); // enable pport interrupts spin_unlock_irqrestore(&mtp_card->spinlock, flags); return 0; } /* */ static int snd_mtpav_input_close(struct snd_rawmidi_substream *substream) { struct mtpav *mtp_card = substream->rmidi->private_data; struct mtpav_port *portp = &mtp_card->ports[substream->number]; unsigned long flags; spin_lock_irqsave(&mtp_card->spinlock, flags); portp->mode &= ~MTPAV_MODE_INPUT_OPENED; portp->input = NULL; if (--mtp_card->share_irq == 0) snd_mtpav_mputreg(mtp_card, CREG, 0); // disable pport interrupts spin_unlock_irqrestore(&mtp_card->spinlock, flags); return 0; } /* */ static void snd_mtpav_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct mtpav *mtp_card = substream->rmidi->private_data; struct mtpav_port *portp = &mtp_card->ports[substream->number]; unsigned long flags; spin_lock_irqsave(&mtp_card->spinlock, flags); if (up) portp->mode |= MTPAV_MODE_INPUT_TRIGGERED; else portp->mode &= ~MTPAV_MODE_INPUT_TRIGGERED; spin_unlock_irqrestore(&mtp_card->spinlock, flags); } /* * timer interrupt for outputs */ static void snd_mtpav_output_timer(unsigned long data) { unsigned long flags; struct mtpav *chip = (struct mtpav *)data; int p; spin_lock_irqsave(&chip->spinlock, flags); /* reprogram timer */ chip->timer.expires = 1 + jiffies; add_timer(&chip->timer); /* process each port */ for (p = 0; p <= chip->num_ports * 2 + MTPAV_PIDX_BROADCAST; p++) { struct mtpav_port *portp = &chip->ports[p]; if ((portp->mode & MTPAV_MODE_OUTPUT_TRIGGERED) && portp->output) snd_mtpav_output_port_write(chip, portp, portp->output); } spin_unlock_irqrestore(&chip->spinlock, flags); } /* spinlock held! */ static void snd_mtpav_add_output_timer(struct mtpav *chip) { chip->timer.expires = 1 + jiffies; add_timer(&chip->timer); } /* spinlock held! */ static void snd_mtpav_remove_output_timer(struct mtpav *chip) { del_timer(&chip->timer); } /* */ static int snd_mtpav_output_open(struct snd_rawmidi_substream *substream) { struct mtpav *mtp_card = substream->rmidi->private_data; struct mtpav_port *portp = &mtp_card->ports[substream->number]; unsigned long flags; spin_lock_irqsave(&mtp_card->spinlock, flags); portp->mode |= MTPAV_MODE_OUTPUT_OPENED; portp->output = substream; spin_unlock_irqrestore(&mtp_card->spinlock, flags); return 0; }; /* */ static int snd_mtpav_output_close(struct snd_rawmidi_substream *substream) { struct mtpav *mtp_card = substream->rmidi->private_data; struct mtpav_port *portp = &mtp_card->ports[substream->number]; unsigned long flags; spin_lock_irqsave(&mtp_card->spinlock, flags); portp->mode &= ~MTPAV_MODE_OUTPUT_OPENED; portp->output = NULL; spin_unlock_irqrestore(&mtp_card->spinlock, flags); return 0; }; /* */ static void snd_mtpav_output_trigger(struct snd_rawmidi_substream *substream, int up) { struct mtpav *mtp_card = substream->rmidi->private_data; struct mtpav_port *portp = &mtp_card->ports[substream->number]; unsigned long flags; spin_lock_irqsave(&mtp_card->spinlock, flags); if (up) { if (! (portp->mode & MTPAV_MODE_OUTPUT_TRIGGERED)) { if (mtp_card->istimer++ == 0) snd_mtpav_add_output_timer(mtp_card); portp->mode |= MTPAV_MODE_OUTPUT_TRIGGERED; } } else { portp->mode &= ~MTPAV_MODE_OUTPUT_TRIGGERED; if (--mtp_card->istimer == 0) snd_mtpav_remove_output_timer(mtp_card); } spin_unlock_irqrestore(&mtp_card->spinlock, flags); if (up) snd_mtpav_output_write(substream); } /* * midi interrupt for inputs */ static void snd_mtpav_inmidi_process(struct mtpav *mcrd, u8 inbyte) { struct mtpav_port *portp; if ((int)mcrd->inmidiport > mcrd->num_ports * 2 + MTPAV_PIDX_BROADCAST) return; portp = &mcrd->ports[mcrd->inmidiport]; if (portp->mode & MTPAV_MODE_INPUT_TRIGGERED) snd_rawmidi_receive(portp->input, &inbyte, 1); } static void snd_mtpav_inmidi_h(struct mtpav *mcrd, u8 inbyte) { if (inbyte >= 0xf8) { /* real-time midi code */ snd_mtpav_inmidi_process(mcrd, inbyte); return; } if (mcrd->inmidistate == 0) { // awaiting command if (inbyte == 0xf5) // MTP port # mcrd->inmidistate = 1; else snd_mtpav_inmidi_process(mcrd, inbyte); } else if (mcrd->inmidistate) { mcrd->inmidiport = translate_hwport_to_subdevice(mcrd, inbyte); mcrd->inmidistate = 0; } } static void snd_mtpav_read_bytes(struct mtpav *mcrd) { u8 clrread, setread; u8 mtp_read_byte; u8 sr, cbyt; int i; u8 sbyt = snd_mtpav_getreg(mcrd, SREG); /* printk(KERN_DEBUG "snd_mtpav_read_bytes() sbyt: 0x%x\n", sbyt); */ if (!(sbyt & SIGS_BYTE)) return; cbyt = snd_mtpav_getreg(mcrd, CREG); clrread = cbyt & (SIGC_READ ^ 0xff); setread = cbyt | SIGC_READ; do { mtp_read_byte = 0; for (i = 0; i < 4; i++) { snd_mtpav_mputreg(mcrd, CREG, setread); sr = snd_mtpav_getreg(mcrd, SREG); snd_mtpav_mputreg(mcrd, CREG, clrread); sr &= SIGS_IN0 | SIGS_IN1; sr >>= 4; mtp_read_byte |= sr << (i * 2); } snd_mtpav_inmidi_h(mcrd, mtp_read_byte); sbyt = snd_mtpav_getreg(mcrd, SREG); } while (sbyt & SIGS_BYTE); } static irqreturn_t snd_mtpav_irqh(int irq, void *dev_id) { struct mtpav *mcard = dev_id; spin_lock(&mcard->spinlock); snd_mtpav_read_bytes(mcard); spin_unlock(&mcard->spinlock); return IRQ_HANDLED; } /* * get ISA resources */ static int __devinit snd_mtpav_get_ISA(struct mtpav * mcard) { if ((mcard->res_port = request_region(port, 3, "MotuMTPAV MIDI")) == NULL) { snd_printk(KERN_ERR "MTVAP port 0x%lx is busy\n", port); return -EBUSY; } mcard->port = port; if (request_irq(irq, snd_mtpav_irqh, 0, "MOTU MTPAV", mcard)) { snd_printk(KERN_ERR "MTVAP IRQ %d busy\n", irq); return -EBUSY; } mcard->irq = irq; return 0; } /* */ static struct snd_rawmidi_ops snd_mtpav_output = { .open = snd_mtpav_output_open, .close = snd_mtpav_output_close, .trigger = snd_mtpav_output_trigger, }; static struct snd_rawmidi_ops snd_mtpav_input = { .open = snd_mtpav_input_open, .close = snd_mtpav_input_close, .trigger = snd_mtpav_input_trigger, }; /* * get RAWMIDI resources */ static void __devinit snd_mtpav_set_name(struct mtpav *chip, struct snd_rawmidi_substream *substream) { if (substream->number >= 0 && substream->number < chip->num_ports) sprintf(substream->name, "MTP direct %d", (substream->number % chip->num_ports) + 1); else if (substream->number >= 8 && substream->number < chip->num_ports * 2) sprintf(substream->name, "MTP remote %d", (substream->number % chip->num_ports) + 1); else if (substream->number == chip->num_ports * 2) strcpy(substream->name, "MTP computer"); else if (substream->number == chip->num_ports * 2 + 1) strcpy(substream->name, "MTP ADAT"); else strcpy(substream->name, "MTP broadcast"); } static int __devinit snd_mtpav_get_RAWMIDI(struct mtpav *mcard) { int rval; struct snd_rawmidi *rawmidi; struct snd_rawmidi_substream *substream; struct list_head *list; if (hwports < 1) hwports = 1; else if (hwports > 8) hwports = 8; mcard->num_ports = hwports; if ((rval = snd_rawmidi_new(mcard->card, "MotuMIDI", 0, mcard->num_ports * 2 + MTPAV_PIDX_BROADCAST + 1, mcard->num_ports * 2 + MTPAV_PIDX_BROADCAST + 1, &mcard->rmidi)) < 0) return rval; rawmidi = mcard->rmidi; rawmidi->private_data = mcard; list_for_each(list, &rawmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT].substreams) { substream = list_entry(list, struct snd_rawmidi_substream, list); snd_mtpav_set_name(mcard, substream); substream->ops = &snd_mtpav_input; } list_for_each(list, &rawmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams) { substream = list_entry(list, struct snd_rawmidi_substream, list); snd_mtpav_set_name(mcard, substream); substream->ops = &snd_mtpav_output; mcard->ports[substream->number].hwport = translate_subdevice_to_hwport(mcard, substream->number); } rawmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; sprintf(rawmidi->name, "MTP AV MIDI"); return 0; } /* */ static void snd_mtpav_free(struct snd_card *card) { struct mtpav *crd = card->private_data; unsigned long flags; spin_lock_irqsave(&crd->spinlock, flags); if (crd->istimer > 0) snd_mtpav_remove_output_timer(crd); spin_unlock_irqrestore(&crd->spinlock, flags); if (crd->irq >= 0) free_irq(crd->irq, (void *)crd); release_and_free_resource(crd->res_port); } /* */ static int __devinit snd_mtpav_probe(struct platform_device *dev) { struct snd_card *card; int err; struct mtpav *mtp_card; err = snd_card_create(index, id, THIS_MODULE, sizeof(*mtp_card), &card); if (err < 0) return err; mtp_card = card->private_data; spin_lock_init(&mtp_card->spinlock); init_timer(&mtp_card->timer); mtp_card->card = card; mtp_card->irq = -1; mtp_card->share_irq = 0; mtp_card->inmidistate = 0; mtp_card->outmidihwport = 0xffffffff; init_timer(&mtp_card->timer); mtp_card->timer.function = snd_mtpav_output_timer; mtp_card->timer.data = (unsigned long) mtp_card; card->private_free = snd_mtpav_free; err = snd_mtpav_get_RAWMIDI(mtp_card); if (err < 0) goto __error; mtp_card->inmidiport = mtp_card->num_ports + MTPAV_PIDX_BROADCAST; err = snd_mtpav_get_ISA(mtp_card); if (err < 0) goto __error; strcpy(card->driver, "MTPAV"); strcpy(card->shortname, "MTPAV on parallel port"); snprintf(card->longname, sizeof(card->longname), "MTPAV on parallel port at 0x%lx", port); snd_mtpav_portscan(mtp_card); snd_card_set_dev(card, &dev->dev); err = snd_card_register(mtp_card->card); if (err < 0) goto __error; platform_set_drvdata(dev, card); printk(KERN_INFO "Motu MidiTimePiece on parallel port irq: %d ioport: 0x%lx\n", irq, port); return 0; __error: snd_card_free(card); return err; } static int __devexit snd_mtpav_remove(struct platform_device *devptr) { snd_card_free(platform_get_drvdata(devptr)); platform_set_drvdata(devptr, NULL); return 0; } #define SND_MTPAV_DRIVER "snd_mtpav" static struct platform_driver snd_mtpav_driver = { .probe = snd_mtpav_probe, .remove = __devexit_p(snd_mtpav_remove), .driver = { .name = SND_MTPAV_DRIVER }, }; static int __init alsa_card_mtpav_init(void) { int err; if ((err = platform_driver_register(&snd_mtpav_driver)) < 0) return err; device = platform_device_register_simple(SND_MTPAV_DRIVER, -1, NULL, 0); if (!IS_ERR(device)) { if (platform_get_drvdata(device)) return 0; platform_device_unregister(device); err = -ENODEV; } else err = PTR_ERR(device); platform_driver_unregister(&snd_mtpav_driver); return err; } static void __exit alsa_card_mtpav_exit(void) { platform_device_unregister(device); platform_driver_unregister(&snd_mtpav_driver); } module_init(alsa_card_mtpav_init) module_exit(alsa_card_mtpav_exit)
gpl-2.0
EPDCenterSpain/kernel_Archos_97b_Titan
fs/ceph/locks.c
6899
7876
#include <linux/ceph/ceph_debug.h> #include <linux/file.h> #include <linux/namei.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/pagelist.h> /** * Implement fcntl and flock locking functions. */ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, int cmd, u8 wait, struct file_lock *fl) { struct inode *inode = file->f_dentry->d_inode; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_mds_request *req; int err; u64 length = 0; req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = inode; ihold(inode); /* mds requires start and length rather than start and end */ if (LLONG_MAX == fl->fl_end) length = 0; else length = fl->fl_end - fl->fl_start + 1; dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " "length: %llu, wait: %d, type: %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type); req->r_args.filelock_change.rule = lock_type; req->r_args.filelock_change.type = cmd; req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); /* This should be adjusted, but I'm not sure if namespaces actually get id numbers*/ req->r_args.filelock_change.pid_namespace = cpu_to_le64((u64)(unsigned long)fl->fl_nspid); req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); req->r_args.filelock_change.length = cpu_to_le64(length); req->r_args.filelock_change.wait = wait; err = ceph_mdsc_do_request(mdsc, inode, req); if ( operation == CEPH_MDS_OP_GETFILELOCK){ fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid); if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) fl->fl_type = F_RDLCK; else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type) fl->fl_type = F_WRLCK; else fl->fl_type = F_UNLCK; fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + le64_to_cpu(req->r_reply_info.filelock_reply->length); if (length >= 1) fl->fl_end = length -1; else fl->fl_end = 0; } ceph_mdsc_put_request(req); dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type, err); return err; } /** * Attempt to set an fcntl lock. * For now, this just goes away to the server. Later it may be more awesome. */ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_lock, fl_pid:%d", fl->fl_pid); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (F_SETLKW == cmd) wait = 1; if (F_GETLK == cmd) op = CEPH_MDS_OP_GETFILELOCK; if (F_RDLCK == fl->fl_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->fl_type) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); if (!err) { if ( op != CEPH_MDS_OP_GETFILELOCK ){ dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { /* undo! This should only happen if * the kernel detects local * deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on posix_lock_file, undid lock", err); } } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; } int ceph_flock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 1; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_flock, fl_pid:%d", fl->fl_pid); /* set wait bit, then clear it out of cmd*/ if (cmd & LOCK_NB) wait = 0; cmd = cmd & (LOCK_SH | LOCK_EX | LOCK_UN); /* set command sequence that Ceph wants to see: shared lock, exclusive lock, or unlock */ if (LOCK_SH == cmd) lock_cmd = CEPH_LOCK_SHARED; else if (LOCK_EX == cmd) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, lock_cmd, wait, fl); if (!err) { err = flock_lock_file_wait(file, fl); if (err) { ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on flock_lock_file_wait, undid lock", err); } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; } /** * Must be called with BKL already held. Fills in the passed * counter variables, so you can prepare pagelist metadata before calling * ceph_encode_locks. */ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) { struct file_lock *lock; *fcntl_count = 0; *flock_count = 0; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) ++(*fcntl_count); else if (lock->fl_flags & FL_FLOCK) ++(*flock_count); } dout("counted %d flock locks and %d fcntl locks", *flock_count, *fcntl_count); } /** * Encode the flock and fcntl locks for the given inode into the pagelist. * Format is: #fcntl locks, sequential fcntl locks, #flock locks, * sequential flock locks. * Must be called with lock_flocks() already held. * If we encounter more of a specific lock type than expected, * we return the value 1. */ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, int num_fcntl_locks, int num_flock_locks) { struct file_lock *lock; struct ceph_filelock cephlock; int err = 0; int seen_fcntl = 0; int seen_flock = 0; dout("encoding %d flock and %d fcntl locks", num_flock_locks, num_fcntl_locks); err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); if (err) goto fail; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; goto fail; } err = lock_to_ceph_filelock(lock, &cephlock); if (err) goto fail; err = ceph_pagelist_append(pagelist, &cephlock, sizeof(struct ceph_filelock)); } if (err) goto fail; } err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32)); if (err) goto fail; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_FLOCK) { ++seen_flock; if (seen_flock > num_flock_locks) { err = -ENOSPC; goto fail; } err = lock_to_ceph_filelock(lock, &cephlock); if (err) goto fail; err = ceph_pagelist_append(pagelist, &cephlock, sizeof(struct ceph_filelock)); } if (err) goto fail; } fail: return err; } /* * Given a pointer to a lock, convert it to a ceph filelock */ int lock_to_ceph_filelock(struct file_lock *lock, struct ceph_filelock *cephlock) { int err = 0; cephlock->start = cpu_to_le64(lock->fl_start); cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); cephlock->client = cpu_to_le64(0); cephlock->pid = cpu_to_le64(lock->fl_pid); cephlock->pid_namespace = cpu_to_le64((u64)(unsigned long)lock->fl_nspid); switch (lock->fl_type) { case F_RDLCK: cephlock->type = CEPH_LOCK_SHARED; break; case F_WRLCK: cephlock->type = CEPH_LOCK_EXCL; break; case F_UNLCK: cephlock->type = CEPH_LOCK_UNLOCK; break; default: dout("Have unknown lock type %d", lock->fl_type); err = -EINVAL; } return err; }
gpl-2.0
profglavcho/mt6735-kernel-3.10.61
drivers/isdn/sc/timer.c
9203
3858
/* $Id: timer.c,v 1.3.6.1 2001/09/23 22:24:59 kai Exp $ * * Copyright (C) 1996 SpellCaster Telecommunications Inc. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For more information, please contact gpl-info@spellcast.com or write: * * SpellCaster Telecommunications Inc. * 5621 Finch Avenue East, Unit #3 * Scarborough, Ontario Canada * M1B 2T9 * +1 (416) 297-8565 * +1 (416) 297-6433 Facsimile */ #include "includes.h" #include "hardware.h" #include "message.h" #include "card.h" /* * Write the proper values into the I/O ports following a reset */ static void setup_ports(int card) { outb((sc_adapter[card]->rambase >> 12), sc_adapter[card]->ioport[EXP_BASE]); /* And the IRQ */ outb((sc_adapter[card]->interrupt | 0x80), sc_adapter[card]->ioport[IRQ_SELECT]); } /* * Timed function to check the status of a previous reset * Must be very fast as this function runs in the context of * an interrupt handler. * * Setup the ioports for the board that were cleared by the reset. * Then, check to see if the signate has been set. Next, set the * signature to a known value and issue a startproc if needed. */ void sc_check_reset(unsigned long data) { unsigned long flags; unsigned long sig; int card = (unsigned int) data; pr_debug("%s: check_timer timer called\n", sc_adapter[card]->devicename); /* Setup the io ports */ setup_ports(card); spin_lock_irqsave(&sc_adapter[card]->lock, flags); outb(sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport], (sc_adapter[card]->shmem_magic >> 14) | 0x80); sig = (unsigned long) *((unsigned long *)(sc_adapter[card]->rambase + SIG_OFFSET)); /* check the signature */ if (sig == SIGNATURE) { flushreadfifo(card); spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); /* See if we need to do a startproc */ if (sc_adapter[card]->StartOnReset) startproc(card); } else { pr_debug("%s: No signature yet, waiting another %lu jiffies.\n", sc_adapter[card]->devicename, CHECKRESET_TIME); mod_timer(&sc_adapter[card]->reset_timer, jiffies + CHECKRESET_TIME); spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); } } /* * Timed function to check the status of a previous reset * Must be very fast as this function runs in the context of * an interrupt handler. * * Send check sc_adapter->phystat to see if the channels are up * If they are, tell ISDN4Linux that the board is up. If not, * tell IADN4Linux that it is up. Always reset the timer to * fire again (endless loop). */ void check_phystat(unsigned long data) { unsigned long flags; int card = (unsigned int) data; pr_debug("%s: Checking status...\n", sc_adapter[card]->devicename); /* * check the results of the last PhyStat and change only if * has changed drastically */ if (sc_adapter[card]->nphystat && !sc_adapter[card]->phystat) { /* All is well */ pr_debug("PhyStat transition to RUN\n"); pr_info("%s: Switch contacted, transmitter enabled\n", sc_adapter[card]->devicename); indicate_status(card, ISDN_STAT_RUN, 0, NULL); } else if (!sc_adapter[card]->nphystat && sc_adapter[card]->phystat) { /* All is not well */ pr_debug("PhyStat transition to STOP\n"); pr_info("%s: Switch connection lost, transmitter disabled\n", sc_adapter[card]->devicename); indicate_status(card, ISDN_STAT_STOP, 0, NULL); } sc_adapter[card]->phystat = sc_adapter[card]->nphystat; /* Reinitialize the timer */ spin_lock_irqsave(&sc_adapter[card]->lock, flags); mod_timer(&sc_adapter[card]->stat_timer, jiffies + CHECKSTAT_TIME); spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); /* Send a new cePhyStatus message */ sendmessage(card, CEPID, ceReqTypePhy, ceReqClass2, ceReqPhyStatus, 0, 0, NULL); }
gpl-2.0
free-z4u/android_kernel_htc_z4u
drivers/isdn/sc/interrupt.c
9203
6631
/* $Id: interrupt.c,v 1.4.8.3 2001/09/23 22:24:59 kai Exp $ * * Copyright (C) 1996 SpellCaster Telecommunications Inc. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For more information, please contact gpl-info@spellcast.com or write: * * SpellCaster Telecommunications Inc. * 5621 Finch Avenue East, Unit #3 * Scarborough, Ontario Canada * M1B 2T9 * +1 (416) 297-8565 * +1 (416) 297-6433 Facsimile */ #include "includes.h" #include "hardware.h" #include "message.h" #include "card.h" #include <linux/interrupt.h> /* * */ irqreturn_t interrupt_handler(int dummy, void *card_inst) { RspMessage rcvmsg; int channel; int card = (int)(unsigned long) card_inst; if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return IRQ_NONE; } pr_debug("%s: Entered Interrupt handler\n", sc_adapter[card]->devicename); /* * Pull all of the waiting messages off the response queue */ while (!receivemessage(card, &rcvmsg)) { /* * Push the message to the adapter structure for * send_and_receive to snoop */ if (sc_adapter[card]->want_async_messages) memcpy(&(sc_adapter[card]->async_msg), &rcvmsg, sizeof(RspMessage)); channel = (unsigned int) rcvmsg.phy_link_no; /* * Trap Invalid request messages */ if (IS_CM_MESSAGE(rcvmsg, 0, 0, Invalid)) { pr_debug("%s: Invalid request Message, rsp_status = %d\n", sc_adapter[card]->devicename, rcvmsg.rsp_status); break; } /* * Check for a linkRead message */ if (IS_CE_MESSAGE(rcvmsg, Lnk, 1, Read)) { pr_debug("%s: Received packet 0x%x bytes long at 0x%lx\n", sc_adapter[card]->devicename, rcvmsg.msg_data.response.msg_len, rcvmsg.msg_data.response.buff_offset); rcvpkt(card, &rcvmsg); continue; } /* * Handle a write acknoledgement */ if (IS_CE_MESSAGE(rcvmsg, Lnk, 1, Write)) { pr_debug("%s: Packet Send ACK on channel %d\n", sc_adapter[card]->devicename, rcvmsg.phy_link_no); sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].free_sendbufs++; continue; } /* * Handle a connection message */ if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Connect)) { unsigned int callid; setup_parm setup; pr_debug("%s: Connect message: line %d: status %d: cause 0x%x\n", sc_adapter[card]->devicename, rcvmsg.phy_link_no, rcvmsg.rsp_status, rcvmsg.msg_data.byte_array[2]); memcpy(&callid, rcvmsg.msg_data.byte_array, sizeof(int)); if (callid >= 0x8000 && callid <= 0xFFFF) { pr_debug("%s: Got Dial-Out Rsp\n", sc_adapter[card]->devicename); indicate_status(card, ISDN_STAT_DCONN, (unsigned long)rcvmsg.phy_link_no - 1, NULL); } else if (callid >= 0x0000 && callid <= 0x7FFF) { int len; pr_debug("%s: Got Incoming Call\n", sc_adapter[card]->devicename); len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]), sizeof(setup.phone)); if (len >= sizeof(setup.phone)) continue; len = strlcpy(setup.eazmsn, sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, sizeof(setup.eazmsn)); if (len >= sizeof(setup.eazmsn)) continue; setup.si1 = 7; setup.si2 = 0; setup.plan = 0; setup.screen = 0; indicate_status(card, ISDN_STAT_ICALL, (unsigned long)rcvmsg.phy_link_no - 1, (char *)&setup); indicate_status(card, ISDN_STAT_DCONN, (unsigned long)rcvmsg.phy_link_no - 1, NULL); } continue; } /* * Handle a disconnection message */ if (IS_CE_MESSAGE(rcvmsg, Phy, 1, Disconnect)) { pr_debug("%s: disconnect message: line %d: status %d: cause 0x%x\n", sc_adapter[card]->devicename, rcvmsg.phy_link_no, rcvmsg.rsp_status, rcvmsg.msg_data.byte_array[2]); indicate_status(card, ISDN_STAT_BHUP, (unsigned long)rcvmsg.phy_link_no - 1, NULL); indicate_status(card, ISDN_STAT_DHUP, (unsigned long)rcvmsg.phy_link_no - 1, NULL); continue; } /* * Handle a startProc engine up message */ if (IS_CM_MESSAGE(rcvmsg, 5, 0, MiscEngineUp)) { pr_debug("%s: Received EngineUp message\n", sc_adapter[card]->devicename); sc_adapter[card]->EngineUp = 1; sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetMyNumber, 1, 0, NULL); sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetMyNumber, 2, 0, NULL); init_timer(&sc_adapter[card]->stat_timer); sc_adapter[card]->stat_timer.function = check_phystat; sc_adapter[card]->stat_timer.data = card; sc_adapter[card]->stat_timer.expires = jiffies + CHECKSTAT_TIME; add_timer(&sc_adapter[card]->stat_timer); continue; } /* * Start proc response */ if (IS_CM_MESSAGE(rcvmsg, 2, 0, StartProc)) { pr_debug("%s: StartProc Response Status %d\n", sc_adapter[card]->devicename, rcvmsg.rsp_status); continue; } /* * Handle a GetMyNumber Rsp */ if (IS_CE_MESSAGE(rcvmsg, Call, 0, GetMyNumber)) { strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, rcvmsg.msg_data.byte_array, sizeof(rcvmsg.msg_data.byte_array)); continue; } /* * PhyStatus response */ if (IS_CE_MESSAGE(rcvmsg, Phy, 2, Status)) { unsigned int b1stat, b2stat; /* * Covert the message data to the adapter->phystat code */ b1stat = (unsigned int) rcvmsg.msg_data.byte_array[0]; b2stat = (unsigned int) rcvmsg.msg_data.byte_array[1]; sc_adapter[card]->nphystat = (b2stat >> 8) | b1stat; /* endian?? */ pr_debug("%s: PhyStat is 0x%2x\n", sc_adapter[card]->devicename, sc_adapter[card]->nphystat); continue; } /* * Handle a GetFramFormat */ if (IS_CE_MESSAGE(rcvmsg, Call, 0, GetFrameFormat)) { if (rcvmsg.msg_data.byte_array[0] != HDLC_PROTO) { unsigned int proto = HDLC_PROTO; /* * Set board format to HDLC if it wasn't already */ pr_debug("%s: current frame format: 0x%x, will change to HDLC\n", sc_adapter[card]->devicename, rcvmsg.msg_data.byte_array[0]); sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallSetFrameFormat, (unsigned char)channel + 1, 1, &proto); } continue; } /* * Hmm... */ pr_debug("%s: Received unhandled message (%d,%d,%d) link %d\n", sc_adapter[card]->devicename, rcvmsg.type, rcvmsg.class, rcvmsg.code, rcvmsg.phy_link_no); } /* while */ pr_debug("%s: Exiting Interrupt Handler\n", sc_adapter[card]->devicename); return IRQ_HANDLED; }
gpl-2.0
joryb/android_kernel_samsung_jf
arch/x86/pci/irq.c
10483
33408
/* * Low-Level PCI Support for PC -- Routing of Interrupts * * (c) 1999--2000 Martin Mares <mj@ucw.cz> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dmi.h> #include <linux/io.h> #include <linux/smp.h> #include <asm/io_apic.h> #include <linux/irq.h> #include <linux/acpi.h> #include <asm/pci_x86.h> #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) #define PIRQ_VERSION 0x0100 static int broken_hp_bios_irq9; static int acer_tm360_irqrouting; static struct irq_routing_table *pirq_table; static int pirq_enable_irq(struct pci_dev *dev); /* * Never use: 0, 1, 2 (timer, keyboard, and cascade) * Avoid using: 13, 14 and 15 (FP error and IDE). * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse) */ unsigned int pcibios_irq_mask = 0xfff8; static int pirq_penalty[16] = { 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000, 0, 0, 0, 0, 1000, 100000, 100000, 100000 }; struct irq_router { char *name; u16 vendor, device; int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq); int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); }; struct irq_router_handler { u16 vendor; int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); }; int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq; void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; /* * Check passed address for the PCI IRQ Routing Table signature * and perform checksum verification. */ static inline struct irq_routing_table *pirq_check_routing_table(u8 *addr) { struct irq_routing_table *rt; int i; u8 sum; rt = (struct irq_routing_table *) addr; if (rt->signature != PIRQ_SIGNATURE || rt->version != PIRQ_VERSION || rt->size % 16 || rt->size < sizeof(struct irq_routing_table)) return NULL; sum = 0; for (i = 0; i < rt->size; i++) sum += addr[i]; if (!sum) { DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt); return rt; } return NULL; } /* * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table. */ static struct irq_routing_table * __init pirq_find_routing_table(void) { u8 *addr; struct irq_routing_table *rt; if (pirq_table_addr) { rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr)); if (rt) return rt; printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n"); } for (addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) { rt = pirq_check_routing_table(addr); if (rt) return rt; } return NULL; } /* * If we have a IRQ routing table, use it to search for peer host * bridges. It's a gross hack, but since there are no other known * ways how to get a list of buses, we have to go this way. */ static void __init pirq_peer_trick(void) { struct irq_routing_table *rt = pirq_table; u8 busmap[256]; int i; struct irq_info *e; memset(busmap, 0, sizeof(busmap)); for (i = 0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) { e = &rt->slots[i]; #ifdef DEBUG { int j; DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot); for (j = 0; j < 4; j++) DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap); DBG("\n"); } #endif busmap[e->bus] = 1; } for (i = 1; i < 256; i++) { int node; if (!busmap[i] || pci_find_bus(0, i)) continue; node = get_mp_bus_to_node(i); if (pci_scan_bus_on_node(i, &pci_root_ops, node)) printk(KERN_INFO "PCI: Discovered primary peer " "bus %02x [IRQ]\n", i); } pcibios_last_bus = -1; } /* * Code for querying and setting of IRQ routes on various interrupt routers. */ void eisa_set_level_irq(unsigned int irq) { unsigned char mask = 1 << (irq & 7); unsigned int port = 0x4d0 + (irq >> 3); unsigned char val; static u16 eisa_irq_mask; if (irq >= 16 || (1 << irq) & eisa_irq_mask) return; eisa_irq_mask |= (1 << irq); printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq); val = inb(port); if (!(val & mask)) { DBG(KERN_DEBUG " -> edge"); outb(val | mask, port); } } /* * Common IRQ routing practice: nibbles in config space, * offset by some magic constant. */ static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) { u8 x; unsigned reg = offset + (nr >> 1); pci_read_config_byte(router, reg, &x); return (nr & 1) ? (x >> 4) : (x & 0xf); } static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val) { u8 x; unsigned reg = offset + (nr >> 1); pci_read_config_byte(router, reg, &x); x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val); pci_write_config_byte(router, reg, x); } /* * ALI pirq entries are damn ugly, and completely undocumented. * This has been figured out from pirq tables, and it's not a pretty * picture. */ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; WARN_ON_ONCE(pirq > 16); return irqmap[read_config_nybble(router, 0x48, pirq-1)]; } static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; unsigned int val = irqmap[irq]; WARN_ON_ONCE(pirq > 16); if (val) { write_config_nybble(router, 0x48, pirq-1, val); return 1; } return 0; } /* * The Intel PIIX4 pirq rules are fairly simple: "pirq" is * just a pointer to the config space. */ static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; pci_read_config_byte(router, pirq, &x); return (x < 16) ? x : 0; } static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { pci_write_config_byte(router, pirq, irq); return 1; } /* * The VIA pirq rules are nibble-based, like ALI, * but without the ugly irq number munging. * However, PIRQD is in the upper instead of lower 4 bits. */ static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq); } static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq); return 1; } /* * The VIA pirq rules are nibble-based, like ALI, * but without the ugly irq number munging. * However, for 82C586, nibble map is different . */ static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; WARN_ON_ONCE(pirq > 5); return read_config_nybble(router, 0x55, pirqmap[pirq-1]); } static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; WARN_ON_ONCE(pirq > 5); write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); return 1; } /* * ITE 8330G pirq rules are nibble-based * FIXME: pirqmap may be { 1, 0, 3, 2 }, * 2+3 are both mapped to irq 9 on my system */ static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; WARN_ON_ONCE(pirq > 4); return read_config_nybble(router, 0x43, pirqmap[pirq-1]); } static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; WARN_ON_ONCE(pirq > 4); write_config_nybble(router, 0x43, pirqmap[pirq-1], irq); return 1; } /* * OPTI: high four bits are nibble pointer.. * I wonder what the low bits do? */ static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0xb8, pirq >> 4); } static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0xb8, pirq >> 4, irq); return 1; } /* * Cyrix: nibble offset 0x5C * 0x5C bits 7:4 is INTB bits 3:0 is INTA * 0x5D bits 7:4 is INTD bits 3:0 is INTC */ static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0x5C, (pirq-1)^1); } static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0x5C, (pirq-1)^1, irq); return 1; } /* * PIRQ routing for SiS 85C503 router used in several SiS chipsets. * We have to deal with the following issues here: * - vendors have different ideas about the meaning of link values * - some onboard devices (integrated in the chipset) have special * links and are thus routed differently (i.e. not via PCI INTA-INTD) * - different revision of the router have a different layout for * the routing registers, particularly for the onchip devices * * For all routing registers the common thing is we have one byte * per routeable link which is defined as: * bit 7 IRQ mapping enabled (0) or disabled (1) * bits [6:4] reserved (sometimes used for onchip devices) * bits [3:0] IRQ to map to * allowed: 3-7, 9-12, 14-15 * reserved: 0, 1, 2, 8, 13 * * The config-space registers located at 0x41/0x42/0x43/0x44 are * always used to route the normal PCI INT A/B/C/D respectively. * Apparently there are systems implementing PCI routing table using * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D. * We try our best to handle both link mappings. * * Currently (2003-05-21) it appears most SiS chipsets follow the * definition of routing registers from the SiS-5595 southbridge. * According to the SiS 5595 datasheets the revision id's of the * router (ISA-bridge) should be 0x01 or 0xb0. * * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1. * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets. * They seem to work with the current routing code. However there is * some concern because of the two USB-OHCI HCs (original SiS 5595 * had only one). YMMV. * * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1: * * 0x61: IDEIRQ: * bits [6:5] must be written 01 * bit 4 channel-select primary (0), secondary (1) * * 0x62: USBIRQ: * bit 6 OHCI function disabled (0), enabled (1) * * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved * * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved * * We support USBIRQ (in addition to INTA-INTD) and keep the * IDE, ACPI and DAQ routing untouched as set by the BIOS. * * Currently the only reported exception is the new SiS 65x chipset * which includes the SiS 69x southbridge. Here we have the 85C503 * router revision 0x04 and there are changes in the register layout * mostly related to the different USB HCs with USB 2.0 support. * * Onchip routing for router rev-id 0x04 (try-and-error observation) * * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs * bit 6-4 are probably unused, not like 5595 */ #define PIRQ_SIS_IRQ_MASK 0x0f #define PIRQ_SIS_IRQ_DISABLE 0x80 #define PIRQ_SIS_USB_ENABLE 0x40 static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; int reg; reg = pirq; if (reg >= 0x01 && reg <= 0x04) reg += 0x40; pci_read_config_byte(router, reg, &x); return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK); } static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { u8 x; int reg; reg = pirq; if (reg >= 0x01 && reg <= 0x04) reg += 0x40; pci_read_config_byte(router, reg, &x); x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE); x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE; pci_write_config_byte(router, reg, x); return 1; } /* * VLSI: nibble offset 0x74 - educated guess due to routing table and * config space of VLSI 82C534 PCI-bridge/router (1004:0102) * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6 * for the busbridge to the docking station. */ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } return read_config_nybble(router, 0x74, pirq-1); } static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } write_config_nybble(router, 0x74, pirq-1, irq); return 1; } /* * ServerWorks: PCI interrupts mapped to system IRQ lines through Index * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect * register is a straight binary coding of desired PIC IRQ (low nibble). * * The 'link' value in the PIRQ table is already in the correct format * for the Index register. There are some special index values: * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1, * and 0x03 for SMBus. */ static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { outb(pirq, 0xc00); return inb(0xc01) & 0xf; } static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { outb(pirq, 0xc00); outb(irq, 0xc01); return 1; } /* Support for AMD756 PCI IRQ Routing * Jhon H. Caicedo <jhcaiced@osso.org.co> * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced) * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced) * The AMD756 pirq rules are nibble-based * offset 0x56 0-3 PIRQA 4-7 PIRQB * offset 0x57 0-3 PIRQC 4-7 PIRQD */ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 irq; irq = 0; if (pirq <= 4) irq = read_config_nybble(router, 0x56, pirq - 1); dev_info(&dev->dev, "AMD756: dev [%04x:%04x], router PIRQ %d get IRQ %d\n", dev->vendor, dev->device, pirq, irq); return irq; } static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { dev_info(&dev->dev, "AMD756: dev [%04x:%04x], router PIRQ %d set IRQ %d\n", dev->vendor, dev->device, pirq, irq); if (pirq <= 4) write_config_nybble(router, 0x56, pirq - 1, irq); return 1; } /* * PicoPower PT86C523 */ static int pirq_pico_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { outb(0x10 + ((pirq - 1) >> 1), 0x24); return ((pirq - 1) & 1) ? (inb(0x26) >> 4) : (inb(0x26) & 0xf); } static int pirq_pico_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { unsigned int x; outb(0x10 + ((pirq - 1) >> 1), 0x24); x = inb(0x26); x = ((pirq - 1) & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | (irq)); outb(x, 0x26); return 1; } #ifdef CONFIG_PCI_BIOS static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { struct pci_dev *bridge; int pin = pci_get_interrupt_pin(dev, &bridge); return pcibios_set_irq_routing(bridge, pin - 1, irq); } #endif static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { static struct pci_device_id __initdata pirq_440gx[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) }, { }, }; /* 440GX has a proprietary PIRQ router -- don't use it */ if (pci_dev_present(pirq_440gx)) return 0; switch (device) { case PCI_DEVICE_ID_INTEL_82371FB_0: case PCI_DEVICE_ID_INTEL_82371SB_0: case PCI_DEVICE_ID_INTEL_82371AB_0: case PCI_DEVICE_ID_INTEL_82371MX: case PCI_DEVICE_ID_INTEL_82443MX_0: case PCI_DEVICE_ID_INTEL_82801AA_0: case PCI_DEVICE_ID_INTEL_82801AB_0: case PCI_DEVICE_ID_INTEL_82801BA_0: case PCI_DEVICE_ID_INTEL_82801BA_10: case PCI_DEVICE_ID_INTEL_82801CA_0: case PCI_DEVICE_ID_INTEL_82801CA_12: case PCI_DEVICE_ID_INTEL_82801DB_0: case PCI_DEVICE_ID_INTEL_82801E_0: case PCI_DEVICE_ID_INTEL_82801EB_0: case PCI_DEVICE_ID_INTEL_ESB_1: case PCI_DEVICE_ID_INTEL_ICH6_0: case PCI_DEVICE_ID_INTEL_ICH6_1: case PCI_DEVICE_ID_INTEL_ICH7_0: case PCI_DEVICE_ID_INTEL_ICH7_1: case PCI_DEVICE_ID_INTEL_ICH7_30: case PCI_DEVICE_ID_INTEL_ICH7_31: case PCI_DEVICE_ID_INTEL_TGP_LPC: case PCI_DEVICE_ID_INTEL_ESB2_0: case PCI_DEVICE_ID_INTEL_ICH8_0: case PCI_DEVICE_ID_INTEL_ICH8_1: case PCI_DEVICE_ID_INTEL_ICH8_2: case PCI_DEVICE_ID_INTEL_ICH8_3: case PCI_DEVICE_ID_INTEL_ICH8_4: case PCI_DEVICE_ID_INTEL_ICH9_0: case PCI_DEVICE_ID_INTEL_ICH9_1: case PCI_DEVICE_ID_INTEL_ICH9_2: case PCI_DEVICE_ID_INTEL_ICH9_3: case PCI_DEVICE_ID_INTEL_ICH9_4: case PCI_DEVICE_ID_INTEL_ICH9_5: case PCI_DEVICE_ID_INTEL_EP80579_0: case PCI_DEVICE_ID_INTEL_ICH10_0: case PCI_DEVICE_ID_INTEL_ICH10_1: case PCI_DEVICE_ID_INTEL_ICH10_2: case PCI_DEVICE_ID_INTEL_ICH10_3: case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0: case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1: r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX)) { r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } return 0; } static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { /* FIXME: We should move some of the quirk fixup stuff here */ /* * workarounds for some buggy BIOSes */ if (device == PCI_DEVICE_ID_VIA_82C586_0) { switch (router->device) { case PCI_DEVICE_ID_VIA_82C686: /* * Asus k7m bios wrongly reports 82C686A * as 586-compatible */ device = PCI_DEVICE_ID_VIA_82C686; break; case PCI_DEVICE_ID_VIA_8235: /** * Asus a7v-x bios wrongly reports 8235 * as 586-compatible */ device = PCI_DEVICE_ID_VIA_8235; break; case PCI_DEVICE_ID_VIA_8237: /** * Asus a7v600 bios wrongly reports 8237 * as 586-compatible */ device = PCI_DEVICE_ID_VIA_8237; break; } } switch (device) { case PCI_DEVICE_ID_VIA_82C586_0: r->name = "VIA"; r->get = pirq_via586_get; r->set = pirq_via586_set; return 1; case PCI_DEVICE_ID_VIA_82C596: case PCI_DEVICE_ID_VIA_82C686: case PCI_DEVICE_ID_VIA_8231: case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8235: case PCI_DEVICE_ID_VIA_8237: /* FIXME: add new ones for 8233/5 */ r->name = "VIA"; r->get = pirq_via_get; r->set = pirq_via_set; return 1; } return 0; } static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_VLSI_82C534: r->name = "VLSI 82C534"; r->get = pirq_vlsi_get; r->set = pirq_vlsi_set; return 1; } return 0; } static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_SERVERWORKS_OSB4: case PCI_DEVICE_ID_SERVERWORKS_CSB5: r->name = "ServerWorks"; r->get = pirq_serverworks_get; r->set = pirq_serverworks_set; return 1; } return 0; } static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { if (device != PCI_DEVICE_ID_SI_503) return 0; r->name = "SIS"; r->get = pirq_sis_get; r->set = pirq_sis_set; return 1; } static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_CYRIX_5520: r->name = "NatSemi"; r->get = pirq_cyrix_get; r->set = pirq_cyrix_set; return 1; } return 0; } static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_OPTI_82C700: r->name = "OPTI"; r->get = pirq_opti_get; r->set = pirq_opti_set; return 1; } return 0; } static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_ITE_IT8330G_0: r->name = "ITE"; r->get = pirq_ite_get; r->set = pirq_ite_set; return 1; } return 0; } static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_AL_M1533: case PCI_DEVICE_ID_AL_M1563: r->name = "ALI"; r->get = pirq_ali_get; r->set = pirq_ali_set; return 1; } return 0; } static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_AMD_VIPER_740B: r->name = "AMD756"; break; case PCI_DEVICE_ID_AMD_VIPER_7413: r->name = "AMD766"; break; case PCI_DEVICE_ID_AMD_VIPER_7443: r->name = "AMD768"; break; default: return 0; } r->get = pirq_amd756_get; r->set = pirq_amd756_set; return 1; } static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_PICOPOWER_PT86C523: r->name = "PicoPower PT86C523"; r->get = pirq_pico_get; r->set = pirq_pico_set; return 1; case PCI_DEVICE_ID_PICOPOWER_PT86C523BBP: r->name = "PicoPower PT86C523 rev. BB+"; r->get = pirq_pico_get; r->set = pirq_pico_set; return 1; } return 0; } static __initdata struct irq_router_handler pirq_routers[] = { { PCI_VENDOR_ID_INTEL, intel_router_probe }, { PCI_VENDOR_ID_AL, ali_router_probe }, { PCI_VENDOR_ID_ITE, ite_router_probe }, { PCI_VENDOR_ID_VIA, via_router_probe }, { PCI_VENDOR_ID_OPTI, opti_router_probe }, { PCI_VENDOR_ID_SI, sis_router_probe }, { PCI_VENDOR_ID_CYRIX, cyrix_router_probe }, { PCI_VENDOR_ID_VLSI, vlsi_router_probe }, { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe }, { PCI_VENDOR_ID_AMD, amd_router_probe }, { PCI_VENDOR_ID_PICOPOWER, pico_router_probe }, /* Someone with docs needs to add the ATI Radeon IGP */ { 0, NULL } }; static struct irq_router pirq_router; static struct pci_dev *pirq_router_dev; /* * FIXME: should we have an option to say "generic for * chipset" ? */ static void __init pirq_find_router(struct irq_router *r) { struct irq_routing_table *rt = pirq_table; struct irq_router_handler *h; #ifdef CONFIG_PCI_BIOS if (!rt->signature) { printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n"); r->set = pirq_bios_set; r->name = "BIOS"; return; } #endif /* Default unless a driver reloads it */ r->name = "default"; r->get = NULL; r->set = NULL; DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n", rt->rtr_vendor, rt->rtr_device); pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn); if (!pirq_router_dev) { DBG(KERN_DEBUG "PCI: Interrupt router not found at " "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); return; } for (h = pirq_routers; h->vendor; h++) { /* First look for a router match */ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device)) break; /* Fall back to a device match */ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device)) break; } dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x:%04x]\n", pirq_router.name, pirq_router_dev->vendor, pirq_router_dev->device); /* The device remains referenced for the kernel lifetime */ } static struct irq_info *pirq_get_info(struct pci_dev *dev) { struct irq_routing_table *rt = pirq_table; int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); struct irq_info *info; for (info = rt->slots; entries--; info++) if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn)) return info; return NULL; } static int pcibios_lookup_irq(struct pci_dev *dev, int assign) { u8 pin; struct irq_info *info; int i, pirq, newirq; int irq = 0; u32 mask; struct irq_router *r = &pirq_router; struct pci_dev *dev2 = NULL; char *msg = NULL; /* Find IRQ pin */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) { dev_dbg(&dev->dev, "no interrupt pin\n"); return 0; } if (io_apic_assign_pci_irqs) return 0; /* Find IRQ routing entry */ if (!pirq_table) return 0; info = pirq_get_info(dev); if (!info) { dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", 'A' + pin - 1); return 0; } pirq = info->irq[pin - 1].link; mask = info->irq[pin - 1].bitmap; if (!pirq) { dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1); return 0; } dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", 'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs); mask &= pcibios_irq_mask; /* Work around broken HP Pavilion Notebooks which assign USB to IRQ 9 even though it is actually wired to IRQ 11 */ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) { dev->irq = 11; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); r->set(pirq_router_dev, dev, pirq, 11); } /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) { pirq = 0x68; mask = 0x400; dev->irq = r->get(pirq_router_dev, dev, pirq); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } /* * Find the best IRQ to assign: use the one * reported by the device if possible. */ newirq = dev->irq; if (newirq && !((1 << newirq) & mask)) { if (pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; else dev_warn(&dev->dev, "IRQ %d doesn't match PIRQ mask " "%#x; try pci=usepirqmask\n", newirq, mask); } if (!newirq && assign) { for (i = 0; i < 16; i++) { if (!(mask & (1 << i))) continue; if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED)) newirq = i; } } dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq); /* Check if it is hardcoded */ if ((pirq & 0xf0) == 0xf0) { irq = pirq & 0xf; msg = "hardcoded"; } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) { msg = "found"; eisa_set_level_irq(irq); } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { if (r->set(pirq_router_dev, dev, pirq, newirq)) { eisa_set_level_irq(newirq); msg = "assigned"; irq = newirq; } } if (!irq) { if (newirq && mask == (1 << newirq)) { msg = "guessed"; irq = newirq; } else { dev_dbg(&dev->dev, "can't route interrupt\n"); return 0; } } dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq); /* Update IRQ for all devices with the same pirq value */ for_each_pci_dev(dev2) { pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; info = pirq_get_info(dev2); if (!info) continue; if (info->irq[pin - 1].link == pirq) { /* * We refuse to override the dev->irq * information. Give a warning! */ if (dev2->irq && dev2->irq != irq && \ (!(pci_probe & PCI_USE_PIRQ_MASK) || \ ((1 << dev2->irq) & mask))) { #ifndef CONFIG_PCI_MSI dev_info(&dev2->dev, "IRQ routing conflict: " "have IRQ %d, want IRQ %d\n", dev2->irq, irq); #endif continue; } dev2->irq = irq; pirq_penalty[irq]++; if (dev != dev2) dev_info(&dev->dev, "sharing IRQ %d with %s\n", irq, pci_name(dev2)); } } return 1; } void __init pcibios_fixup_irqs(void) { struct pci_dev *dev = NULL; u8 pin; DBG(KERN_DEBUG "PCI: IRQ fixup\n"); for_each_pci_dev(dev) { /* * If the BIOS has set an out of range IRQ number, just * ignore it. Also keep track of which IRQ's are * already in use. */ if (dev->irq >= 16) { dev_dbg(&dev->dev, "ignoring bogus IRQ %d\n", dev->irq); dev->irq = 0; } /* * If the IRQ is already assigned to a PCI device, * ignore its ISA use penalty */ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000) pirq_penalty[dev->irq] = 0; pirq_penalty[dev->irq]++; } if (io_apic_assign_pci_irqs) return; dev = NULL; for_each_pci_dev(dev) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; /* * Still no IRQ? Try to lookup one... */ if (!dev->irq) pcibios_lookup_irq(dev, 0); } } /* * Work around broken HP Pavilion Notebooks which assign USB to * IRQ 9 even though it is actually wired to IRQ 11 */ static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d) { if (!broken_hp_bios_irq9) { broken_hp_bios_irq9 = 1; printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); } return 0; } /* * Work around broken Acer TravelMate 360 Notebooks which assign * Cardbus to IRQ 11 even though it is actually wired to IRQ 10 */ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) { if (!acer_tm360_irqrouting) { acer_tm360_irqrouting = 1; printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); } return 0; } static struct dmi_system_id __initdata pciirq_dmi_table[] = { { .callback = fix_broken_hp_bios_irq9, .ident = "HP Pavilion N5400 Series Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"), DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"), DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), }, }, { .callback = fix_acer_tm360_irqrouting, .ident = "Acer TravelMate 36x Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, { } }; void __init pcibios_irq_init(void) { DBG(KERN_DEBUG "PCI: IRQ init\n"); if (raw_pci_ops == NULL) return; dmi_check_system(pciirq_dmi_table); pirq_table = pirq_find_routing_table(); #ifdef CONFIG_PCI_BIOS if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) pirq_table = pcibios_get_irq_routing_table(); #endif if (pirq_table) { pirq_peer_trick(); pirq_find_router(&pirq_router); if (pirq_table->exclusive_irqs) { int i; for (i = 0; i < 16; i++) if (!(pirq_table->exclusive_irqs & (1 << i))) pirq_penalty[i] += 100; } /* * If we're using the I/O APIC, avoid using the PCI IRQ * routing table */ if (io_apic_assign_pci_irqs) pirq_table = NULL; } x86_init.pci.fixup_irqs(); if (io_apic_assign_pci_irqs && pci_routeirq) { struct pci_dev *dev = NULL; /* * PCI IRQ routing is set up by pci_enable_device(), but we * also do it here in case there are still broken drivers that * don't use pci_enable_device(). */ printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); for_each_pci_dev(dev) pirq_enable_irq(dev); } } static void pirq_penalize_isa_irq(int irq, int active) { /* * If any ISAPnP device reports an IRQ in its list of possible * IRQ's, we try to avoid assigning it to PCI devices. */ if (irq < 16) { if (active) pirq_penalty[irq] += 1000; else pirq_penalty[irq] += 100; } } void pcibios_penalize_isa_irq(int irq, int active) { #ifdef CONFIG_ACPI if (!acpi_noirq) acpi_penalize_isa_irq(irq, active); else #endif pirq_penalize_isa_irq(irq, active); } static int pirq_enable_irq(struct pci_dev *dev) { u8 pin; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin && !pcibios_lookup_irq(dev, 1)) { char *msg = ""; if (!io_apic_assign_pci_irqs && dev->irq) return 0; if (io_apic_assign_pci_irqs) { #ifdef CONFIG_X86_IO_APIC struct pci_dev *temp_dev; int irq; struct io_apic_irq_attr irq_attr; irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1, &irq_attr); /* * Busses behind bridges are typically not listed in the MP-table. * In this case we have to look up the IRQ based on the parent bus, * parent slot, and pin number. The SMP code detects such bridged * busses itself so we should get into this branch reliably. */ temp_dev = dev; while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ struct pci_dev *bridge = dev->bus->self; pin = pci_swizzle_interrupt_pin(dev, pin); irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, PCI_SLOT(bridge->devfn), pin - 1, &irq_attr); if (irq >= 0) dev_warn(&dev->dev, "using bridge %s " "INT %c to get IRQ %d\n", pci_name(bridge), 'A' + pin - 1, irq); dev = bridge; } dev = temp_dev; if (irq >= 0) { io_apic_set_pci_routing(&dev->dev, irq, &irq_attr); dev->irq = irq; dev_info(&dev->dev, "PCI->APIC IRQ transform: " "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); return 0; } else msg = "; probably buggy MP table"; #endif } else if (pci_probe & PCI_BIOS_IRQ_SCAN) msg = ""; else msg = "; please try using pci=biosirq"; /* * With IDE legacy devices the IRQ lookup failure is not * a problem.. */ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5)) return 0; dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", 'A' + pin - 1, msg); } return 0; }
gpl-2.0
droidcore/Hydra
drivers/mtd/ubi/misc.c
244
4661
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* Here we keep miscellaneous functions which are used all over the UBI code */ #include "ubi.h" /** * calc_data_len - calculate how much real data is stored in a buffer. * @ubi: UBI device description object * @buf: a buffer with the contents of the physical eraseblock * @length: the buffer length * * This function calculates how much "real data" is stored in @buf and returnes * the length. Continuous 0xFF bytes at the end of the buffer are not * considered as "real data". */ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length) { int i; ubi_assert(!(length & (ubi->min_io_size - 1))); for (i = length - 1; i >= 0; i--) if (((const uint8_t *)buf)[i] != 0xFF) break; /* The resulting length must be aligned to the minimum flash I/O size */ length = ALIGN(i + 1, ubi->min_io_size); return length; } /** * ubi_check_volume - check the contents of a static volume. * @ubi: UBI device description object * @vol_id: ID of the volume to check * * This function checks if static volume @vol_id is corrupted by fully reading * it and checking data CRC. This function returns %0 if the volume is not * corrupted, %1 if it is corrupted and a negative error code in case of * failure. Dynamic volumes are not checked and zero is returned immediately. */ int ubi_check_volume(struct ubi_device *ubi, int vol_id) { void *buf; int err = 0, i; struct ubi_volume *vol = ubi->volumes[vol_id]; if (vol->vol_type != UBI_STATIC_VOLUME) return 0; buf = vmalloc(vol->usable_leb_size); if (!buf) return -ENOMEM; for (i = 0; i < vol->used_ebs; i++) { int size; cond_resched(); if (i == vol->used_ebs - 1) size = vol->last_eb_bytes; else size = vol->usable_leb_size; err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); if (err) { if (mtd_is_eccerr(err)) err = 1; break; } } vfree(buf); return err; } /** * ubi_update_reserved - update bad eraseblock handling accounting data. * @ubi: UBI device description object * * This function calculates the gap between current number of PEBs reserved for * bad eraseblock handling and the required level of PEBs that must be * reserved, and if necessary, reserves more PEBs to fill that gap, according * to availability. Should be called with ubi->volumes_lock held. */ void ubi_update_reserved(struct ubi_device *ubi) { int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; if (need <= 0 || ubi->avail_pebs == 0) return; need = min_t(int, need, ubi->avail_pebs); ubi->avail_pebs -= need; ubi->rsvd_pebs += need; ubi->beb_rsvd_pebs += need; ubi_msg(ubi->ubi_num, "reserved more %d PEBs for bad PEB handling", need); } /** * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad * eraseblock handling. * @ubi: UBI device description object */ void ubi_calculate_reserved(struct ubi_device *ubi) { /* * Calculate the actual number of PEBs currently needed to be reserved * for future bad eraseblock handling. */ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count; if (ubi->beb_rsvd_level < 0) { ubi->beb_rsvd_level = 0; ubi_warn(ubi->ubi_num, "number of bad PEBs (%d) is above the expected limit " "(%d), not reserving any PEBs for bad PEB handling, " "will use available PEBs (if any)", ubi->bad_peb_count, ubi->bad_peb_limit); } } /** * ubi_check_pattern - check if buffer contains only a certain byte pattern. * @buf: buffer to check * @patt: the pattern to check * @size: buffer size in bytes * * This function returns %1 in there are only @patt bytes in @buf, and %0 if * something else was also found. */ int ubi_check_pattern(const void *buf, uint8_t patt, int size) { int i; for (i = 0; i < size; i++) if (((const uint8_t *)buf)[i] != patt) return 0; return 1; }
gpl-2.0
fkfk/linux_gt-i9000-gb
drivers/usb/gadget/g_ffs.c
244
10940
#include <linux/module.h> #include <linux/utsname.h> /* * kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS # if defined USB_ETH_RNDIS # undef USB_ETH_RNDIS # endif # ifdef CONFIG_USB_FUNCTIONFS_RNDIS # define USB_ETH_RNDIS y # endif # include "f_ecm.c" # include "f_subset.c" # ifdef USB_ETH_RNDIS # include "f_rndis.c" # include "rndis.c" # endif # include "u_ether.c" static u8 gfs_hostaddr[ETH_ALEN]; #else # if !defined CONFIG_USB_FUNCTIONFS_GENERIC # define CONFIG_USB_FUNCTIONFS_GENERIC # endif # define gether_cleanup() do { } while (0) # define gether_setup(gadget, hostaddr) ((int)0) #endif #include "f_fs.c" #define DRIVER_NAME "g_ffs" #define DRIVER_DESC "USB Function Filesystem" #define DRIVER_VERSION "24 Aug 2004" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Michal Nazarewicz"); MODULE_LICENSE("GPL"); static unsigned short gfs_vendor_id = 0x1d6b; /* Linux Foundation */ static unsigned short gfs_product_id = 0x0105; /* FunctionFS Gadget */ static struct usb_device_descriptor gfs_dev_desc = { .bLength = sizeof gfs_dev_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_PER_INTERFACE, /* Vendor and product id can be overridden by module parameters. */ /* .idVendor = cpu_to_le16(gfs_vendor_id), */ /* .idProduct = cpu_to_le16(gfs_product_id), */ /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ .bNumConfigurations = 1, }; #define GFS_MODULE_PARAM_DESC(name, field) \ MODULE_PARM_DESC(name, "Value of the " #field " field of the device descriptor sent to the host. Takes effect only prior to the user-space driver registering to the FunctionFS.") module_param_named(usb_class, gfs_dev_desc.bDeviceClass, byte, 0644); GFS_MODULE_PARAM_DESC(usb_class, bDeviceClass); module_param_named(usb_subclass, gfs_dev_desc.bDeviceSubClass, byte, 0644); GFS_MODULE_PARAM_DESC(usb_subclass, bDeviceSubClass); module_param_named(usb_protocol, gfs_dev_desc.bDeviceProtocol, byte, 0644); GFS_MODULE_PARAM_DESC(usb_protocol, bDeviceProtocol); module_param_named(usb_vendor, gfs_vendor_id, ushort, 0644); GFS_MODULE_PARAM_DESC(usb_vendor, idVendor); module_param_named(usb_product, gfs_product_id, ushort, 0644); GFS_MODULE_PARAM_DESC(usb_product, idProduct); static const struct usb_descriptor_header *gfs_otg_desc[] = { (const struct usb_descriptor_header *) &(const struct usb_otg_descriptor) { .bLength = sizeof(struct usb_otg_descriptor), .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }, NULL }; /* string IDs are assigned dynamically */ enum { GFS_STRING_MANUFACTURER_IDX, GFS_STRING_PRODUCT_IDX, #ifdef CONFIG_USB_FUNCTIONFS_RNDIS GFS_STRING_RNDIS_CONFIG_IDX, #endif #ifdef CONFIG_USB_FUNCTIONFS_ETH GFS_STRING_ECM_CONFIG_IDX, #endif #ifdef CONFIG_USB_FUNCTIONFS_GENERIC GFS_STRING_GENERIC_CONFIG_IDX, #endif }; static char gfs_manufacturer[50]; static const char gfs_driver_desc[] = DRIVER_DESC; static const char gfs_short_name[] = DRIVER_NAME; static struct usb_string gfs_strings[] = { [GFS_STRING_MANUFACTURER_IDX].s = gfs_manufacturer, [GFS_STRING_PRODUCT_IDX].s = gfs_driver_desc, #ifdef CONFIG_USB_FUNCTIONFS_RNDIS [GFS_STRING_RNDIS_CONFIG_IDX].s = "FunctionFS + RNDIS", #endif #ifdef CONFIG_USB_FUNCTIONFS_ETH [GFS_STRING_ECM_CONFIG_IDX].s = "FunctionFS + ECM", #endif #ifdef CONFIG_USB_FUNCTIONFS_GENERIC [GFS_STRING_GENERIC_CONFIG_IDX].s = "FunctionFS", #endif { } /* end of list */ }; static struct usb_gadget_strings *gfs_dev_strings[] = { &(struct usb_gadget_strings) { .language = 0x0409, /* en-us */ .strings = gfs_strings, }, NULL, }; #ifdef CONFIG_USB_FUNCTIONFS_RNDIS static int gfs_do_rndis_config(struct usb_configuration *c); static struct usb_configuration gfs_rndis_config_driver = { .label = "FunctionFS + RNDIS", .bind = gfs_do_rndis_config, .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; # define gfs_add_rndis_config(cdev) \ usb_add_config(cdev, &gfs_rndis_config_driver) #else # define gfs_add_rndis_config(cdev) 0 #endif #ifdef CONFIG_USB_FUNCTIONFS_ETH static int gfs_do_ecm_config(struct usb_configuration *c); static struct usb_configuration gfs_ecm_config_driver = { .label = "FunctionFS + ECM", .bind = gfs_do_ecm_config, .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; # define gfs_add_ecm_config(cdev) \ usb_add_config(cdev, &gfs_ecm_config_driver) #else # define gfs_add_ecm_config(cdev) 0 #endif #ifdef CONFIG_USB_FUNCTIONFS_GENERIC static int gfs_do_generic_config(struct usb_configuration *c); static struct usb_configuration gfs_generic_config_driver = { .label = "FunctionFS", .bind = gfs_do_generic_config, .bConfigurationValue = 2, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; # define gfs_add_generic_config(cdev) \ usb_add_config(cdev, &gfs_generic_config_driver) #else # define gfs_add_generic_config(cdev) 0 #endif static int gfs_bind(struct usb_composite_dev *cdev); static int gfs_unbind(struct usb_composite_dev *cdev); static struct usb_composite_driver gfs_driver = { .name = gfs_short_name, .dev = &gfs_dev_desc, .strings = gfs_dev_strings, .bind = gfs_bind, .unbind = gfs_unbind, }; static struct ffs_data *gfs_ffs_data; static unsigned long gfs_registered; static int gfs_init(void) { ENTER(); return functionfs_init(); } module_init(gfs_init); static void gfs_exit(void) { ENTER(); if (test_and_clear_bit(0, &gfs_registered)) usb_composite_unregister(&gfs_driver); functionfs_cleanup(); } module_exit(gfs_exit); static int functionfs_ready_callback(struct ffs_data *ffs) { int ret; ENTER(); if (WARN_ON(test_and_set_bit(0, &gfs_registered))) return -EBUSY; gfs_ffs_data = ffs; ret = usb_composite_register(&gfs_driver); if (unlikely(ret < 0)) clear_bit(0, &gfs_registered); return ret; } static void functionfs_closed_callback(struct ffs_data *ffs) { ENTER(); if (test_and_clear_bit(0, &gfs_registered)) usb_composite_unregister(&gfs_driver); } static int functionfs_check_dev_callback(const char *dev_name) { return 0; } static int gfs_bind(struct usb_composite_dev *cdev) { int ret; ENTER(); if (WARN_ON(!gfs_ffs_data)) return -ENODEV; ret = gether_setup(cdev->gadget, gfs_hostaddr); if (unlikely(ret < 0)) goto error_quick; gfs_dev_desc.idVendor = cpu_to_le16(gfs_vendor_id); gfs_dev_desc.idProduct = cpu_to_le16(gfs_product_id); snprintf(gfs_manufacturer, sizeof gfs_manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, cdev->gadget->name); ret = usb_string_id(cdev); if (unlikely(ret < 0)) goto error; gfs_strings[GFS_STRING_MANUFACTURER_IDX].id = ret; gfs_dev_desc.iManufacturer = ret; ret = usb_string_id(cdev); if (unlikely(ret < 0)) goto error; gfs_strings[GFS_STRING_PRODUCT_IDX].id = ret; gfs_dev_desc.iProduct = ret; #ifdef CONFIG_USB_FUNCTIONFS_RNDIS ret = usb_string_id(cdev); if (unlikely(ret < 0)) goto error; gfs_strings[GFS_STRING_RNDIS_CONFIG_IDX].id = ret; gfs_rndis_config_driver.iConfiguration = ret; #endif #ifdef CONFIG_USB_FUNCTIONFS_ETH ret = usb_string_id(cdev); if (unlikely(ret < 0)) goto error; gfs_strings[GFS_STRING_ECM_CONFIG_IDX].id = ret; gfs_ecm_config_driver.iConfiguration = ret; #endif #ifdef CONFIG_USB_FUNCTIONFS_GENERIC ret = usb_string_id(cdev); if (unlikely(ret < 0)) goto error; gfs_strings[GFS_STRING_GENERIC_CONFIG_IDX].id = ret; gfs_generic_config_driver.iConfiguration = ret; #endif ret = functionfs_bind(gfs_ffs_data, cdev); if (unlikely(ret < 0)) goto error; ret = gfs_add_rndis_config(cdev); if (unlikely(ret < 0)) goto error_unbind; ret = gfs_add_ecm_config(cdev); if (unlikely(ret < 0)) goto error_unbind; ret = gfs_add_generic_config(cdev); if (unlikely(ret < 0)) goto error_unbind; return 0; error_unbind: functionfs_unbind(gfs_ffs_data); error: gether_cleanup(); error_quick: gfs_ffs_data = NULL; return ret; } static int gfs_unbind(struct usb_composite_dev *cdev) { ENTER(); /* We may have been called in an error recovery frem * composite_bind() after gfs_unbind() failure so we need to * check if gfs_ffs_data is not NULL since gfs_bind() handles * all error recovery itself. I'd rather we werent called * from composite on orror recovery, but what you're gonna * do...? */ if (gfs_ffs_data) { gether_cleanup(); functionfs_unbind(gfs_ffs_data); gfs_ffs_data = NULL; } return 0; } static int __gfs_do_config(struct usb_configuration *c, int (*eth)(struct usb_configuration *c, u8 *ethaddr), u8 *ethaddr) { int ret; if (WARN_ON(!gfs_ffs_data)) return -ENODEV; if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = gfs_otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } if (eth) { ret = eth(c, ethaddr); if (unlikely(ret < 0)) return ret; } ret = functionfs_add(c->cdev, c, gfs_ffs_data); if (unlikely(ret < 0)) return ret; /* After previous do_configs there may be some invalid * pointers in c->interface array. This happens every time * a user space function with fewer interfaces than a user * space function that was run before the new one is run. The * compasit's set_config() assumes that if there is no more * then MAX_CONFIG_INTERFACES interfaces in a configuration * then there is a NULL pointer after the last interface in * c->interface array. We need to make sure this is true. */ if (c->next_interface_id < ARRAY_SIZE(c->interface)) c->interface[c->next_interface_id] = NULL; return 0; } #ifdef CONFIG_USB_FUNCTIONFS_RNDIS static int gfs_do_rndis_config(struct usb_configuration *c) { ENTER(); return __gfs_do_config(c, rndis_bind_config, gfs_hostaddr); } #endif #ifdef CONFIG_USB_FUNCTIONFS_ETH static int gfs_do_ecm_config(struct usb_configuration *c) { ENTER(); return __gfs_do_config(c, can_support_ecm(c->cdev->gadget) ? ecm_bind_config : geth_bind_config, gfs_hostaddr); } #endif #ifdef CONFIG_USB_FUNCTIONFS_GENERIC static int gfs_do_generic_config(struct usb_configuration *c) { ENTER(); return __gfs_do_config(c, NULL, NULL); } #endif
gpl-2.0
vpeter4/linux-fslc
mm/sparse.c
244
21366
/* * sparse memory mappings. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include "internal.h" #include <asm/dma.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> /* * Permanent SPARSEMEM data: * * 1) mem_section - memory sections, mem_map's for valid memory */ #ifdef CONFIG_SPARSEMEM_EXTREME struct mem_section *mem_section[NR_SECTION_ROOTS] ____cacheline_internodealigned_in_smp; #else struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] ____cacheline_internodealigned_in_smp; #endif EXPORT_SYMBOL(mem_section); #ifdef NODE_NOT_IN_PAGE_FLAGS /* * If we did not store the node number in the page then we have to * do a lookup in the section_to_node_table in order to find which * node the page belongs to. */ #if MAX_NUMNODES <= 256 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; #else static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; #endif int page_to_nid(const struct page *page) { return section_to_node_table[page_to_section(page)]; } EXPORT_SYMBOL(page_to_nid); static void set_section_nid(unsigned long section_nr, int nid) { section_to_node_table[section_nr] = nid; } #else /* !NODE_NOT_IN_PAGE_FLAGS */ static inline void set_section_nid(unsigned long section_nr, int nid) { } #endif #ifdef CONFIG_SPARSEMEM_EXTREME static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) { struct mem_section *section = NULL; unsigned long array_size = SECTIONS_PER_ROOT * sizeof(struct mem_section); if (slab_is_available()) { if (node_state(nid, N_HIGH_MEMORY)) section = kzalloc_node(array_size, GFP_KERNEL, nid); else section = kzalloc(array_size, GFP_KERNEL); } else { section = memblock_virt_alloc_node(array_size, nid); } return section; } static int __meminit sparse_index_init(unsigned long section_nr, int nid) { unsigned long root = SECTION_NR_TO_ROOT(section_nr); struct mem_section *section; if (mem_section[root]) return -EEXIST; section = sparse_index_alloc(nid); if (!section) return -ENOMEM; mem_section[root] = section; return 0; } #else /* !SPARSEMEM_EXTREME */ static inline int sparse_index_init(unsigned long section_nr, int nid) { return 0; } #endif /* * Although written for the SPARSEMEM_EXTREME case, this happens * to also work for the flat array case because * NR_SECTION_ROOTS==NR_MEM_SECTIONS. */ int __section_nr(struct mem_section* ms) { unsigned long root_nr; struct mem_section* root; for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); if (!root) continue; if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) break; } VM_BUG_ON(root_nr == NR_SECTION_ROOTS); return (root_nr * SECTIONS_PER_ROOT) + (ms - root); } /* * During early boot, before section_mem_map is used for an actual * mem_map, we use section_mem_map to store the section's NUMA * node. This keeps us from having to use another data structure. The * node information is cleared just before we store the real mem_map. */ static inline unsigned long sparse_encode_early_nid(int nid) { return (nid << SECTION_NID_SHIFT); } static inline int sparse_early_nid(struct mem_section *section) { return (section->section_mem_map >> SECTION_NID_SHIFT); } /* Validate the physical addressing limitations of the model */ void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); /* * Sanity checks - do not allow an architecture to pass * in larger pfns than the maximum scope of sparsemem: */ if (*start_pfn > max_sparsemem_pfn) { mminit_dprintk(MMINIT_WARNING, "pfnvalidation", "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", *start_pfn, *end_pfn, max_sparsemem_pfn); WARN_ON_ONCE(1); *start_pfn = max_sparsemem_pfn; *end_pfn = max_sparsemem_pfn; } else if (*end_pfn > max_sparsemem_pfn) { mminit_dprintk(MMINIT_WARNING, "pfnvalidation", "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", *start_pfn, *end_pfn, max_sparsemem_pfn); WARN_ON_ONCE(1); *end_pfn = max_sparsemem_pfn; } } /* Record a memory area against a node. */ void __init memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; start &= PAGE_SECTION_MASK; mminit_validate_memmodel_limits(&start, &end); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); struct mem_section *ms; sparse_index_init(section, nid); set_section_nid(section, nid); ms = __nr_to_section(section); if (!ms->section_mem_map) ms->section_mem_map = sparse_encode_early_nid(nid) | SECTION_MARKED_PRESENT; } } /* * Only used by the i386 NUMA architecures, but relatively * generic code. */ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; unsigned long nr_pages = 0; mminit_validate_memmodel_limits(&start_pfn, &end_pfn); for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { if (nid != early_pfn_to_nid(pfn)) continue; if (pfn_present(pfn)) nr_pages += PAGES_PER_SECTION; } return nr_pages * sizeof(struct page); } /* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. */ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) { return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); } /* * Decode mem_map from the coded memmap */ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) { /* mask off the extra low bits of information */ coded_mem_map &= SECTION_MAP_MASK; return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); } static int __meminit sparse_init_one_section(struct mem_section *ms, unsigned long pnum, struct page *mem_map, unsigned long *pageblock_bitmap) { if (!present_section(ms)) return -EINVAL; ms->section_mem_map &= ~SECTION_MAP_MASK; ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | SECTION_HAS_MEM_MAP; ms->pageblock_flags = pageblock_bitmap; return 1; } unsigned long usemap_size(void) { unsigned long size_bytes; size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; size_bytes = roundup(size_bytes, sizeof(unsigned long)); return size_bytes; } #ifdef CONFIG_MEMORY_HOTPLUG static unsigned long *__kmalloc_section_usemap(void) { return kmalloc(usemap_size(), GFP_KERNEL); } #endif /* CONFIG_MEMORY_HOTPLUG */ #ifdef CONFIG_MEMORY_HOTREMOVE static unsigned long * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { unsigned long goal, limit; unsigned long *p; int nid; /* * A page may contain usemaps for other sections preventing the * page being freed and making a section unremovable while * other sections referencing the usemap retmain active. Similarly, * a pgdat can prevent a section being removed. If section A * contains a pgdat and section B contains the usemap, both * sections become inter-dependent. This allocates usemaps * from the same section as the pgdat where possible to avoid * this problem. */ goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); limit = goal + (1UL << PA_SECTION_SHIFT); nid = early_pfn_to_nid(goal >> PAGE_SHIFT); again: p = memblock_virt_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, goal, limit, nid); if (!p && limit) { limit = 0; goto again; } return p; } static void __init check_usemap_section_nr(int nid, unsigned long *usemap) { unsigned long usemap_snr, pgdat_snr; static unsigned long old_usemap_snr = NR_MEM_SECTIONS; static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; struct pglist_data *pgdat = NODE_DATA(nid); int usemap_nid; usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); if (usemap_snr == pgdat_snr) return; if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) /* skip redundant message */ return; old_usemap_snr = usemap_snr; old_pgdat_snr = pgdat_snr; usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); if (usemap_nid != nid) { printk(KERN_INFO "node %d must be removed before remove section %ld\n", nid, usemap_snr); return; } /* * There is a circular dependency. * Some platforms allow un-removable section because they will just * gather other removable sections for dynamic partitioning. * Just notify un-removable section's number here. */ printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, pgdat_snr, nid); printk(KERN_CONT " have a circular dependency on usemap and pgdat allocations\n"); } #else static unsigned long * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); } static void __init check_usemap_section_nr(int nid, unsigned long *usemap) { } #endif /* CONFIG_MEMORY_HOTREMOVE */ static void __init sparse_early_usemaps_alloc_node(void *data, unsigned long pnum_begin, unsigned long pnum_end, unsigned long usemap_count, int nodeid) { void *usemap; unsigned long pnum; unsigned long **usemap_map = (unsigned long **)data; int size = usemap_size(); usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), size * usemap_count); if (!usemap) { printk(KERN_WARNING "%s: allocation failed\n", __func__); return; } for (pnum = pnum_begin; pnum < pnum_end; pnum++) { if (!present_section_nr(pnum)) continue; usemap_map[pnum] = usemap; usemap += size; check_usemap_section_nr(nodeid, usemap_map[pnum]); } } #ifndef CONFIG_SPARSEMEM_VMEMMAP struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) { struct page *map; unsigned long size; map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); if (map) return map; size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); map = memblock_virt_alloc_try_nid(size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); return map; } void __init sparse_mem_maps_populate_node(struct page **map_map, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count, int nodeid) { void *map; unsigned long pnum; unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; map = alloc_remap(nodeid, size * map_count); if (map) { for (pnum = pnum_begin; pnum < pnum_end; pnum++) { if (!present_section_nr(pnum)) continue; map_map[pnum] = map; map += size; } return; } size = PAGE_ALIGN(size); map = memblock_virt_alloc_try_nid(size * map_count, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nodeid); if (map) { for (pnum = pnum_begin; pnum < pnum_end; pnum++) { if (!present_section_nr(pnum)) continue; map_map[pnum] = map; map += size; } return; } /* fallback */ for (pnum = pnum_begin; pnum < pnum_end; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); if (map_map[pnum]) continue; ms = __nr_to_section(pnum); printk(KERN_ERR "%s: sparsemem memory map backing failed " "some memory will not be available.\n", __func__); ms->section_mem_map = 0; } } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER static void __init sparse_early_mem_maps_alloc_node(void *data, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count, int nodeid) { struct page **map_map = (struct page **)data; sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, map_count, nodeid); } #else static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); map = sparse_mem_map_populate(pnum, nid); if (map) return map; printk(KERN_ERR "%s: sparsemem memory map backing failed " "some memory will not be available.\n", __func__); ms->section_mem_map = 0; return NULL; } #endif void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) { } /** * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap * @map: usemap_map for pageblock flags or mmap_map for vmemmap */ static void __init alloc_usemap_and_memmap(void (*alloc_func) (void *, unsigned long, unsigned long, unsigned long, int), void *data) { unsigned long pnum; unsigned long map_count; int nodeid_begin = 0; unsigned long pnum_begin = 0; for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; ms = __nr_to_section(pnum); nodeid_begin = sparse_early_nid(ms); pnum_begin = pnum; break; } map_count = 1; for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { struct mem_section *ms; int nodeid; if (!present_section_nr(pnum)) continue; ms = __nr_to_section(pnum); nodeid = sparse_early_nid(ms); if (nodeid == nodeid_begin) { map_count++; continue; } /* ok, we need to take cake of from pnum_begin to pnum - 1*/ alloc_func(data, pnum_begin, pnum, map_count, nodeid_begin); /* new start, update count etc*/ nodeid_begin = nodeid; pnum_begin = pnum; map_count = 1; } /* ok, last chunk */ alloc_func(data, pnum_begin, NR_MEM_SECTIONS, map_count, nodeid_begin); } /* * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */ void __init sparse_init(void) { unsigned long pnum; struct page *map; unsigned long *usemap; unsigned long **usemap_map; int size; #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER int size2; struct page **map_map; #endif /* see include/linux/mmzone.h 'struct mem_section' definition */ BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ set_pageblock_order(); /* * map is using big page (aka 2M in x86 64 bit) * usemap is less one page (aka 24 bytes) * so alloc 2M (with 2M align) and 24 bytes in turn will * make next 2M slip to one more 2M later. * then in big system, the memory will have a lot of holes... * here try to allocate 2M pages continuously. * * powerpc need to call sparse_init_one_section right after each * sparse_early_mem_map_alloc, so allocate usemap_map at first. */ size = sizeof(unsigned long *) * NR_MEM_SECTIONS; usemap_map = memblock_virt_alloc(size, 0); if (!usemap_map) panic("can not allocate usemap_map\n"); alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, (void *)usemap_map); #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER size2 = sizeof(struct page *) * NR_MEM_SECTIONS; map_map = memblock_virt_alloc(size2, 0); if (!map_map) panic("can not allocate map_map\n"); alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, (void *)map_map); #endif for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { if (!present_section_nr(pnum)) continue; usemap = usemap_map[pnum]; if (!usemap) continue; #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER map = map_map[pnum]; #else map = sparse_early_mem_map_alloc(pnum); #endif if (!map) continue; sparse_init_one_section(__nr_to_section(pnum), pnum, map, usemap); } vmemmap_populate_print_last(); #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER memblock_free_early(__pa(map_map), size2); #endif memblock_free_early(__pa(usemap_map), size); } #ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_SPARSEMEM_VMEMMAP static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) { /* This will make the necessary allocations eventually. */ return sparse_mem_map_populate(pnum, nid); } static void __kfree_section_memmap(struct page *memmap) { unsigned long start = (unsigned long)memmap; unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); vmemmap_free(start, end); } #ifdef CONFIG_MEMORY_HOTREMOVE static void free_map_bootmem(struct page *memmap) { unsigned long start = (unsigned long)memmap; unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); vmemmap_free(start, end); } #endif /* CONFIG_MEMORY_HOTREMOVE */ #else static struct page *__kmalloc_section_memmap(void) { struct page *page, *ret; unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); if (page) goto got_map_page; ret = vmalloc(memmap_size); if (ret) goto got_map_ptr; return NULL; got_map_page: ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); got_map_ptr: return ret; } static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) { return __kmalloc_section_memmap(); } static void __kfree_section_memmap(struct page *memmap) { if (is_vmalloc_addr(memmap)) vfree(memmap); else free_pages((unsigned long)memmap, get_order(sizeof(struct page) * PAGES_PER_SECTION)); } #ifdef CONFIG_MEMORY_HOTREMOVE static void free_map_bootmem(struct page *memmap) { unsigned long maps_section_nr, removing_section_nr, i; unsigned long magic, nr_pages; struct page *page = virt_to_page(memmap); nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) >> PAGE_SHIFT; for (i = 0; i < nr_pages; i++, page++) { magic = (unsigned long) page->lru.next; BUG_ON(magic == NODE_INFO); maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); removing_section_nr = page->private; /* * When this function is called, the removing section is * logical offlined state. This means all pages are isolated * from page allocator. If removing section's memmap is placed * on the same section, it must not be freed. * If it is freed, page allocator may allocate it which will * be removed physically soon. */ if (maps_section_nr != removing_section_nr) put_page_bootmem(page); } } #endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) { unsigned long section_nr = pfn_to_section_nr(start_pfn); struct pglist_data *pgdat = zone->zone_pgdat; struct mem_section *ms; struct page *memmap; unsigned long *usemap; unsigned long flags; int ret; /* * no locking for this, because it does its own * plus, it does a kmalloc */ ret = sparse_index_init(section_nr, pgdat->node_id); if (ret < 0 && ret != -EEXIST) return ret; memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); if (!memmap) return -ENOMEM; usemap = __kmalloc_section_usemap(); if (!usemap) { __kfree_section_memmap(memmap); return -ENOMEM; } pgdat_resize_lock(pgdat, &flags); ms = __pfn_to_section(start_pfn); if (ms->section_mem_map & SECTION_MARKED_PRESENT) { ret = -EEXIST; goto out; } memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); ms->section_mem_map |= SECTION_MARKED_PRESENT; ret = sparse_init_one_section(ms, section_nr, memmap, usemap); out: pgdat_resize_unlock(pgdat, &flags); if (ret <= 0) { kfree(usemap); __kfree_section_memmap(memmap); } return ret; } #ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_FAILURE static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) { int i; if (!memmap) return; for (i = 0; i < PAGES_PER_SECTION; i++) { if (PageHWPoison(&memmap[i])) { atomic_long_sub(1, &num_poisoned_pages); ClearPageHWPoison(&memmap[i]); } } } #else static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) { } #endif static void free_section_usemap(struct page *memmap, unsigned long *usemap) { struct page *usemap_page; if (!usemap) return; usemap_page = virt_to_page(usemap); /* * Check to see if allocation came from hot-plug-add */ if (PageSlab(usemap_page) || PageCompound(usemap_page)) { kfree(usemap); if (memmap) __kfree_section_memmap(memmap); return; } /* * The usemap came from bootmem. This is packed with other usemaps * on the section which has pgdat at boot time. Just keep it as is now. */ if (memmap) free_map_bootmem(memmap); } void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) { struct page *memmap = NULL; unsigned long *usemap = NULL, flags; struct pglist_data *pgdat = zone->zone_pgdat; pgdat_resize_lock(pgdat, &flags); if (ms->section_mem_map) { usemap = ms->pageblock_flags; memmap = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms)); ms->section_mem_map = 0; ms->pageblock_flags = NULL; } pgdat_resize_unlock(pgdat, &flags); clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION); free_section_usemap(memmap, usemap); } #endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_MEMORY_HOTPLUG */
gpl-2.0
9kight/WoWSourceV9
dep/gsoap/soapServer.cpp
244
3447
/* soapServer.cpp Generated by gSOAP 2.8.10 from gsoap.stub Copyright(C) 2000-2012, Robert van Engelen, Genivia Inc. All Rights Reserved. The generated code is released under one of the following licenses: 1) GPL or 2) Genivia's license for commercial use. This program is released under the GPL with the additional exemption that compiling, linking, and/or using OpenSSL is allowed. */ #if defined(__BORLANDC__) #pragma option push -w-8060 #pragma option push -w-8004 #endif #include "soapH.h" SOAP_SOURCE_STAMP("@(#) soapServer.cpp ver 2.8.10 2012-09-02 20:48:00 GMT") SOAP_FMAC5 int SOAP_FMAC6 soap_serve(struct soap *soap) { #ifndef WITH_FASTCGI unsigned int k = soap->max_keep_alive; #endif do { #ifndef WITH_FASTCGI if (soap->max_keep_alive > 0 && !--k) soap->keep_alive = 0; #endif if (soap_begin_serve(soap)) { if (soap->error >= SOAP_STOP) continue; return soap->error; } if (soap_serve_request(soap) || (soap->fserveloop && soap->fserveloop(soap))) { #ifdef WITH_FASTCGI soap_send_fault(soap); #else return soap_send_fault(soap); #endif } #ifdef WITH_FASTCGI soap_destroy(soap); soap_end(soap); } while (1); #else } while (soap->keep_alive); #endif return SOAP_OK; } #ifndef WITH_NOSERVEREQUEST SOAP_FMAC5 int SOAP_FMAC6 soap_serve_request(struct soap *soap) { soap_peek_element(soap); if (!soap_match_tag(soap, soap->tag, "ns1:executeCommand")) return soap_serve_ns1__executeCommand(soap); return soap->error = SOAP_NO_METHOD; } #endif SOAP_FMAC5 int SOAP_FMAC6 soap_serve_ns1__executeCommand(struct soap *soap) { struct ns1__executeCommand soap_tmp_ns1__executeCommand; struct ns1__executeCommandResponse soap_tmp_ns1__executeCommandResponse; char * soap_tmp_string; soap_default_ns1__executeCommandResponse(soap, &soap_tmp_ns1__executeCommandResponse); soap_tmp_string = NULL; soap_tmp_ns1__executeCommandResponse.result = &soap_tmp_string; soap_default_ns1__executeCommand(soap, &soap_tmp_ns1__executeCommand); soap->encodingStyle = NULL; if (!soap_get_ns1__executeCommand(soap, &soap_tmp_ns1__executeCommand, "ns1:executeCommand", NULL)) return soap->error; if (soap_body_end_in(soap) || soap_envelope_end_in(soap) || soap_end_recv(soap)) return soap->error; soap->error = ns1__executeCommand(soap, soap_tmp_ns1__executeCommand.command, soap_tmp_ns1__executeCommandResponse.result); if (soap->error) return soap->error; soap_serializeheader(soap); soap_serialize_ns1__executeCommandResponse(soap, &soap_tmp_ns1__executeCommandResponse); if (soap_begin_count(soap)) return soap->error; if (soap->mode & SOAP_IO_LENGTH) { if (soap_envelope_begin_out(soap) || soap_putheader(soap) || soap_body_begin_out(soap) || soap_put_ns1__executeCommandResponse(soap, &soap_tmp_ns1__executeCommandResponse, "ns1:executeCommandResponse", NULL) || soap_body_end_out(soap) || soap_envelope_end_out(soap)) return soap->error; }; if (soap_end_count(soap) || soap_response(soap, SOAP_OK) || soap_envelope_begin_out(soap) || soap_putheader(soap) || soap_body_begin_out(soap) || soap_put_ns1__executeCommandResponse(soap, &soap_tmp_ns1__executeCommandResponse, "ns1:executeCommandResponse", NULL) || soap_body_end_out(soap) || soap_envelope_end_out(soap) || soap_end_send(soap)) return soap->error; return soap_closesock(soap); } #if defined(__BORLANDC__) #pragma option pop #pragma option pop #endif /* End of soapServer.cpp */
gpl-2.0
mirsys/amlogic_kernel
fs/gfs2/acl.c
244
2628
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/xattr.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include <linux/gfs2_ondisk.h> #include "gfs2.h" #include "incore.h" #include "acl.h" #include "xattr.h" #include "glock.h" #include "inode.h" #include "meta_io.h" #include "trans.h" #include "util.h" static const char *gfs2_acl_name(int type) { switch (type) { case ACL_TYPE_ACCESS: return GFS2_POSIX_ACL_ACCESS; case ACL_TYPE_DEFAULT: return GFS2_POSIX_ACL_DEFAULT; } return NULL; } struct posix_acl *gfs2_get_acl(struct inode *inode, int type) { struct gfs2_inode *ip = GFS2_I(inode); struct posix_acl *acl; const char *name; char *data; int len; if (!ip->i_eattr) return NULL; name = gfs2_acl_name(type); if (name == NULL) return ERR_PTR(-EINVAL); len = gfs2_xattr_acl_get(ip, name, &data); if (len < 0) return ERR_PTR(len); if (len == 0) return NULL; acl = posix_acl_from_xattr(&init_user_ns, data, len); kfree(data); return acl; } static int gfs2_set_mode(struct inode *inode, umode_t mode) { int error = 0; if (mode != inode->i_mode) { inode->i_mode = mode; mark_inode_dirty(inode); } return error; } int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error; int len; char *data; const char *name = gfs2_acl_name(type); BUG_ON(name == NULL); if (acl->a_count > GFS2_ACL_MAX_ENTRIES) return -EINVAL; if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error < 0) return error; if (error == 0) acl = NULL; error = gfs2_set_mode(inode, mode); if (error) return error; } if (acl) { len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0); if (len == 0) return 0; data = kmalloc(len, GFP_NOFS); if (data == NULL) return -ENOMEM; error = posix_acl_to_xattr(&init_user_ns, acl, data, len); if (error < 0) goto out; } else { data = NULL; len = 0; } error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS); if (error) goto out; if (acl) set_cached_acl(inode, type, acl); else forget_cached_acl(inode, type); out: kfree(data); return error; }
gpl-2.0
cminyard/linux-live-app-coredump
drivers/gpu/drm/i915/selftests/i915_syncmap.c
756
14600
/* * Copyright © 2017 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ #include "../i915_selftest.h" #include "i915_random.h" static char * __sync_print(struct i915_syncmap *p, char *buf, unsigned long *sz, unsigned int depth, unsigned int last, unsigned int idx) { unsigned long len; unsigned int i, X; if (depth) { unsigned int d; for (d = 0; d < depth - 1; d++) { if (last & BIT(depth - d - 1)) len = scnprintf(buf, *sz, "| "); else len = scnprintf(buf, *sz, " "); buf += len; *sz -= len; } len = scnprintf(buf, *sz, "%x-> ", idx); buf += len; *sz -= len; } /* We mark bits after the prefix as "X" */ len = scnprintf(buf, *sz, "0x%016llx", p->prefix << p->height << SHIFT); buf += len; *sz -= len; X = (p->height + SHIFT) / 4; scnprintf(buf - X, *sz + X, "%*s", X, "XXXXXXXXXXXXXXXXX"); if (!p->height) { for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) { len = scnprintf(buf, *sz, " %x:%x,", i, __sync_seqno(p)[i]); buf += len; *sz -= len; } buf -= 1; *sz += 1; } len = scnprintf(buf, *sz, "\n"); buf += len; *sz -= len; if (p->height) { for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) { buf = __sync_print(__sync_child(p)[i], buf, sz, depth + 1, last << 1 | !!(p->bitmap >> (i + 1)), i); } } return buf; } static bool i915_syncmap_print_to_buf(struct i915_syncmap *p, char *buf, unsigned long sz) { if (!p) return false; while (p->parent) p = p->parent; __sync_print(p, buf, &sz, 0, 1, 0); return true; } static int check_syncmap_free(struct i915_syncmap **sync) { i915_syncmap_free(sync); if (*sync) { pr_err("sync not cleared after free\n"); return -EINVAL; } return 0; } static int dump_syncmap(struct i915_syncmap *sync, int err) { char *buf; if (!err) return check_syncmap_free(&sync); buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) goto skip; if (i915_syncmap_print_to_buf(sync, buf, PAGE_SIZE)) pr_err("%s", buf); kfree(buf); skip: i915_syncmap_free(&sync); return err; } static int igt_syncmap_init(void *arg) { struct i915_syncmap *sync = (void *)~0ul; /* * Cursory check that we can initialise a random pointer and transform * it into the root pointer of a syncmap. */ i915_syncmap_init(&sync); return check_syncmap_free(&sync); } static int check_seqno(struct i915_syncmap *leaf, unsigned int idx, u32 seqno) { if (leaf->height) { pr_err("%s: not a leaf, height is %d\n", __func__, leaf->height); return -EINVAL; } if (__sync_seqno(leaf)[idx] != seqno) { pr_err("%s: seqno[%d], found %x, expected %x\n", __func__, idx, __sync_seqno(leaf)[idx], seqno); return -EINVAL; } return 0; } static int check_one(struct i915_syncmap **sync, u64 context, u32 seqno) { int err; err = i915_syncmap_set(sync, context, seqno); if (err) return err; if ((*sync)->height) { pr_err("Inserting first context=%llx did not return leaf (height=%d, prefix=%llx\n", context, (*sync)->height, (*sync)->prefix); return -EINVAL; } if ((*sync)->parent) { pr_err("Inserting first context=%llx created branches!\n", context); return -EINVAL; } if (hweight32((*sync)->bitmap) != 1) { pr_err("First bitmap does not contain a single entry, found %x (count=%d)!\n", (*sync)->bitmap, hweight32((*sync)->bitmap)); return -EINVAL; } err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno); if (err) return err; if (!i915_syncmap_is_later(sync, context, seqno)) { pr_err("Lookup of first context=%llx/seqno=%x failed!\n", context, seqno); return -EINVAL; } return 0; } static int igt_syncmap_one(void *arg) { I915_RND_STATE(prng); IGT_TIMEOUT(end_time); struct i915_syncmap *sync; unsigned long max = 1; int err; /* * Check that inserting a new id, creates a leaf and only that leaf. */ i915_syncmap_init(&sync); do { u64 context = i915_prandom_u64_state(&prng); unsigned long loop; err = check_syncmap_free(&sync); if (err) goto out; for (loop = 0; loop <= max; loop++) { err = check_one(&sync, context, prandom_u32_state(&prng)); if (err) goto out; } max++; } while (!__igt_timeout(end_time, NULL)); pr_debug("%s: Completed %lu single insertions\n", __func__, max * (max - 1) / 2); out: return dump_syncmap(sync, err); } static int check_leaf(struct i915_syncmap **sync, u64 context, u32 seqno) { int err; err = i915_syncmap_set(sync, context, seqno); if (err) return err; if ((*sync)->height) { pr_err("Inserting context=%llx did not return leaf (height=%d, prefix=%llx\n", context, (*sync)->height, (*sync)->prefix); return -EINVAL; } if (hweight32((*sync)->bitmap) != 1) { pr_err("First entry into leaf (context=%llx) does not contain a single entry, found %x (count=%d)!\n", context, (*sync)->bitmap, hweight32((*sync)->bitmap)); return -EINVAL; } err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno); if (err) return err; if (!i915_syncmap_is_later(sync, context, seqno)) { pr_err("Lookup of first entry context=%llx/seqno=%x failed!\n", context, seqno); return -EINVAL; } return 0; } static int igt_syncmap_join_above(void *arg) { struct i915_syncmap *sync; unsigned int pass, order; int err; i915_syncmap_init(&sync); /* * When we have a new id that doesn't fit inside the existing tree, * we need to add a new layer above. * * 1: 0x00000001 * 2: 0x00000010 * 3: 0x00000100 * 4: 0x00001000 * ... * Each pass the common prefix shrinks and we have to insert a join. * Each join will only contain two branches, the latest of which * is always a leaf. * * If we then reuse the same set of contexts, we expect to build an * identical tree. */ for (pass = 0; pass < 3; pass++) { for (order = 0; order < 64; order += SHIFT) { u64 context = BIT_ULL(order); struct i915_syncmap *join; err = check_leaf(&sync, context, 0); if (err) goto out; join = sync->parent; if (!join) /* very first insert will have no parents */ continue; if (!join->height) { pr_err("Parent with no height!\n"); err = -EINVAL; goto out; } if (hweight32(join->bitmap) != 2) { pr_err("Join does not have 2 children: %x (%d)\n", join->bitmap, hweight32(join->bitmap)); err = -EINVAL; goto out; } if (__sync_child(join)[__sync_branch_idx(join, context)] != sync) { pr_err("Leaf misplaced in parent!\n"); err = -EINVAL; goto out; } } } out: return dump_syncmap(sync, err); } static int igt_syncmap_join_below(void *arg) { struct i915_syncmap *sync; unsigned int step, order, idx; int err = -ENODEV; i915_syncmap_init(&sync); /* * Check that we can split a compacted branch by replacing it with * a join. */ for (step = 0; step < KSYNCMAP; step++) { for (order = 64 - SHIFT; order > 0; order -= SHIFT) { u64 context = step * BIT_ULL(order); err = i915_syncmap_set(&sync, context, 0); if (err) goto out; if (sync->height) { pr_err("Inserting context=%llx (order=%d, step=%d) did not return leaf (height=%d, prefix=%llx\n", context, order, step, sync->height, sync->prefix); err = -EINVAL; goto out; } } } for (step = 0; step < KSYNCMAP; step++) { for (order = SHIFT; order < 64; order += SHIFT) { u64 context = step * BIT_ULL(order); if (!i915_syncmap_is_later(&sync, context, 0)) { pr_err("1: context %llx (order=%d, step=%d) not found\n", context, order, step); err = -EINVAL; goto out; } for (idx = 1; idx < KSYNCMAP; idx++) { if (i915_syncmap_is_later(&sync, context + idx, 0)) { pr_err("1: context %llx (order=%d, step=%d) should not exist\n", context + idx, order, step); err = -EINVAL; goto out; } } } } for (order = SHIFT; order < 64; order += SHIFT) { for (step = 0; step < KSYNCMAP; step++) { u64 context = step * BIT_ULL(order); if (!i915_syncmap_is_later(&sync, context, 0)) { pr_err("2: context %llx (order=%d, step=%d) not found\n", context, order, step); err = -EINVAL; goto out; } } } out: return dump_syncmap(sync, err); } static int igt_syncmap_neighbours(void *arg) { I915_RND_STATE(prng); IGT_TIMEOUT(end_time); struct i915_syncmap *sync; int err = -ENODEV; /* * Each leaf holds KSYNCMAP seqno. Check that when we create KSYNCMAP * neighbouring ids, they all fit into the same leaf. */ i915_syncmap_init(&sync); do { u64 context = i915_prandom_u64_state(&prng) & ~MASK; unsigned int idx; if (i915_syncmap_is_later(&sync, context, 0)) /* Skip repeats */ continue; for (idx = 0; idx < KSYNCMAP; idx++) { err = i915_syncmap_set(&sync, context + idx, 0); if (err) goto out; if (sync->height) { pr_err("Inserting context=%llx did not return leaf (height=%d, prefix=%llx\n", context, sync->height, sync->prefix); err = -EINVAL; goto out; } if (sync->bitmap != BIT(idx + 1) - 1) { pr_err("Inserting neighbouring context=0x%llx+%d, did not fit into the same leaf bitmap=%x (%d), expected %lx (%d)\n", context, idx, sync->bitmap, hweight32(sync->bitmap), BIT(idx + 1) - 1, idx + 1); err = -EINVAL; goto out; } } } while (!__igt_timeout(end_time, NULL)); out: return dump_syncmap(sync, err); } static int igt_syncmap_compact(void *arg) { struct i915_syncmap *sync; unsigned int idx, order; int err = -ENODEV; i915_syncmap_init(&sync); /* * The syncmap are "space efficient" compressed radix trees - any * branch with only one child is skipped and replaced by the child. * * If we construct a tree with ids that are neighbouring at a non-zero * height, we form a join but each child of that join is directly a * leaf holding the single id. */ for (order = SHIFT; order < 64; order += SHIFT) { err = check_syncmap_free(&sync); if (err) goto out; /* Create neighbours in the parent */ for (idx = 0; idx < KSYNCMAP; idx++) { u64 context = idx * BIT_ULL(order) + idx; err = i915_syncmap_set(&sync, context, 0); if (err) goto out; if (sync->height) { pr_err("Inserting context=%llx (order=%d, idx=%d) did not return leaf (height=%d, prefix=%llx\n", context, order, idx, sync->height, sync->prefix); err = -EINVAL; goto out; } } sync = sync->parent; if (sync->parent) { pr_err("Parent (join) of last leaf was not the sync!\n"); err = -EINVAL; goto out; } if (sync->height != order) { pr_err("Join does not have the expected height, found %d, expected %d\n", sync->height, order); err = -EINVAL; goto out; } if (sync->bitmap != BIT(KSYNCMAP) - 1) { pr_err("Join is not full!, found %x (%d) expected %lx (%d)\n", sync->bitmap, hweight32(sync->bitmap), BIT(KSYNCMAP) - 1, KSYNCMAP); err = -EINVAL; goto out; } /* Each of our children should be a leaf */ for (idx = 0; idx < KSYNCMAP; idx++) { struct i915_syncmap *leaf = __sync_child(sync)[idx]; if (leaf->height) { pr_err("Child %d is a not leaf!\n", idx); err = -EINVAL; goto out; } if (leaf->parent != sync) { pr_err("Child %d is not attached to us!\n", idx); err = -EINVAL; goto out; } if (!is_power_of_2(leaf->bitmap)) { pr_err("Child %d holds more than one id, found %x (%d)\n", idx, leaf->bitmap, hweight32(leaf->bitmap)); err = -EINVAL; goto out; } if (leaf->bitmap != BIT(idx)) { pr_err("Child %d has wrong seqno idx, found %d, expected %d\n", idx, ilog2(leaf->bitmap), idx); err = -EINVAL; goto out; } } } out: return dump_syncmap(sync, err); } static int igt_syncmap_random(void *arg) { I915_RND_STATE(prng); IGT_TIMEOUT(end_time); struct i915_syncmap *sync; unsigned long count, phase, i; u32 seqno; int err; i915_syncmap_init(&sync); /* * Having tried to test the individual operations within i915_syncmap, * run a smoketest exploring the entire u64 space with random * insertions. */ count = 0; phase = jiffies + HZ/100 + 1; do { u64 context = i915_prandom_u64_state(&prng); err = i915_syncmap_set(&sync, context, 0); if (err) goto out; count++; } while (!time_after(jiffies, phase)); seqno = 0; phase = 0; do { I915_RND_STATE(ctx); u32 last_seqno = seqno; bool expect; seqno = prandom_u32_state(&prng); expect = seqno_later(last_seqno, seqno); for (i = 0; i < count; i++) { u64 context = i915_prandom_u64_state(&ctx); if (i915_syncmap_is_later(&sync, context, seqno) != expect) { pr_err("context=%llu, last=%u this=%u did not match expectation (%d)\n", context, last_seqno, seqno, expect); err = -EINVAL; goto out; } err = i915_syncmap_set(&sync, context, seqno); if (err) goto out; } phase++; } while (!__igt_timeout(end_time, NULL)); pr_debug("Completed %lu passes, each of %lu contexts\n", phase, count); out: return dump_syncmap(sync, err); } int i915_syncmap_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_syncmap_init), SUBTEST(igt_syncmap_one), SUBTEST(igt_syncmap_join_above), SUBTEST(igt_syncmap_join_below), SUBTEST(igt_syncmap_neighbours), SUBTEST(igt_syncmap_compact), SUBTEST(igt_syncmap_random), }; return i915_subtests(tests, NULL); }
gpl-2.0
silver-alx/ac100_kernel
fs/xfs/xfs_error.c
756
4637
/* * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_dir2.h" #include "xfs_dmapi.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_dir2_sf.h" #include "xfs_attr_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_utils.h" #include "xfs_error.h" #ifdef DEBUG int xfs_etrap[XFS_ERROR_NTRAP] = { 0, }; int xfs_error_trap(int e) { int i; if (!e) return 0; for (i = 0; i < XFS_ERROR_NTRAP; i++) { if (xfs_etrap[i] == 0) break; if (e != xfs_etrap[i]) continue; cmn_err(CE_NOTE, "xfs_error_trap: error %d", e); BUG(); break; } return e; } int xfs_etest[XFS_NUM_INJECT_ERROR]; int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; int xfs_error_test(int error_tag, int *fsidp, char *expression, int line, char *file, unsigned long randfactor) { int i; int64_t fsid; if (random32() % randfactor) return 0; memcpy(&fsid, fsidp, sizeof(xfs_fsid_t)); for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) { cmn_err(CE_WARN, "Injecting error (%s) at file %s, line %d, on filesystem \"%s\"", expression, file, line, xfs_etest_fsname[i]); return 1; } } return 0; } int xfs_errortag_add(int error_tag, xfs_mount_t *mp) { int i; int len; int64_t fsid; memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t)); for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { cmn_err(CE_WARN, "XFS error tag #%d on", error_tag); return 0; } } for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { if (xfs_etest[i] == 0) { cmn_err(CE_WARN, "Turned on XFS error tag #%d", error_tag); xfs_etest[i] = error_tag; xfs_etest_fsid[i] = fsid; len = strlen(mp->m_fsname); xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP); strcpy(xfs_etest_fsname[i], mp->m_fsname); return 0; } } cmn_err(CE_WARN, "error tag overflow, too many turned on"); return 1; } int xfs_errortag_clearall(xfs_mount_t *mp, int loud) { int64_t fsid; int cleared = 0; int i; memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t)); for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) && xfs_etest[i] != 0) { cleared = 1; cmn_err(CE_WARN, "Clearing XFS error tag #%d", xfs_etest[i]); xfs_etest[i] = 0; xfs_etest_fsid[i] = 0LL; kmem_free(xfs_etest_fsname[i]); xfs_etest_fsname[i] = NULL; } } if (loud || cleared) cmn_err(CE_WARN, "Cleared all XFS error tags for filesystem \"%s\"", mp->m_fsname); return 0; } #endif /* DEBUG */ void xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...) { va_list ap; va_start(ap, fmt); xfs_fs_vcmn_err(level, mp, fmt, ap); va_end(ap); } void xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...) { va_list ap; #ifdef DEBUG xfs_panic_mask |= XFS_PTAG_SHUTDOWN_CORRUPT; #endif if (xfs_panic_mask && (xfs_panic_mask & panic_tag) && (level & CE_ALERT)) { level &= ~CE_ALERT; level |= CE_PANIC; cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG."); } va_start(ap, fmt); xfs_fs_vcmn_err(level, mp, fmt, ap); va_end(ap); } void xfs_error_report( char *tag, int level, xfs_mount_t *mp, char *fname, int linenum, inst_t *ra) { if (level <= xfs_error_level) { xfs_cmn_err(XFS_PTAG_ERROR_REPORT, CE_ALERT, mp, "XFS internal error %s at line %d of file %s. Caller 0x%p\n", tag, linenum, fname, ra); xfs_stack_trace(); } } void xfs_corruption_error( char *tag, int level, xfs_mount_t *mp, void *p, char *fname, int linenum, inst_t *ra) { if (level <= xfs_error_level) xfs_hex_dump(p, 16); xfs_error_report(tag, level, mp, fname, linenum, ra); }
gpl-2.0
pio-masaki/CM10.1_kernel_tostab03
arch/x86/kernel/cpu/cpufreq/sc520_freq.c
1780
4356
/* * sc520_freq.c: cpufreq driver for the AMD Elan sc520 * * Copyright (C) 2005 Sean Young <sean@mess.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Based on elanfreq.c * * 2005-03-30: - initial revision */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/cpufreq.h> #include <linux/timex.h> #include <linux/io.h> #include <asm/msr.h> #define MMCR_BASE 0xfffef000 /* The default base address */ #define OFFS_CPUCTL 0x2 /* CPU Control Register */ static __u8 __iomem *cpuctl; #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ "sc520_freq", msg) #define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { {0x01, 100000}, {0x02, 133000}, {0, CPUFREQ_TABLE_END}, }; static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) { u8 clockspeed_reg = *cpuctl; switch (clockspeed_reg & 0x03) { default: printk(KERN_ERR PFX "error: cpuctl register has unexpected " "value %02x\n", clockspeed_reg); case 0x01: return 100000; case 0x02: return 133000; } } static void sc520_freq_set_cpu_state(unsigned int state) { struct cpufreq_freqs freqs; u8 clockspeed_reg; freqs.old = sc520_freq_get_cpu_frequency(0); freqs.new = sc520_freq_table[state].frequency; freqs.cpu = 0; /* AMD Elan is UP */ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); dprintk("attempting to set frequency to %i kHz\n", sc520_freq_table[state].frequency); local_irq_disable(); clockspeed_reg = *cpuctl & ~0x03; *cpuctl = clockspeed_reg | sc520_freq_table[state].index; local_irq_enable(); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); }; static int sc520_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); } static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; if (cpufreq_frequency_table_target(policy, sc520_freq_table, target_freq, relation, &newstate)) return -EINVAL; sc520_freq_set_cpu_state(newstate); return 0; } /* * Module init and exit code */ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); int result; /* capability check */ if (c->x86_vendor != X86_VENDOR_AMD || c->x86 != 4 || c->x86_model != 9) return -ENODEV; /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 1000000; /* 1ms */ policy->cur = sc520_freq_get_cpu_frequency(0); result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); if (result) return result; cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); return 0; } static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *sc520_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver sc520_freq_driver = { .get = sc520_freq_get_cpu_frequency, .verify = sc520_freq_verify, .target = sc520_freq_target, .init = sc520_freq_cpu_init, .exit = sc520_freq_cpu_exit, .name = "sc520_freq", .owner = THIS_MODULE, .attr = sc520_freq_attr, }; static int __init sc520_freq_init(void) { struct cpuinfo_x86 *c = &cpu_data(0); int err; /* Test if we have the right hardware */ if (c->x86_vendor != X86_VENDOR_AMD || c->x86 != 4 || c->x86_model != 9) { dprintk("no Elan SC520 processor found!\n"); return -ENODEV; } cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); if (!cpuctl) { printk(KERN_ERR "sc520_freq: error: failed to remap memory\n"); return -ENOMEM; } err = cpufreq_register_driver(&sc520_freq_driver); if (err) iounmap(cpuctl); return err; } static void __exit sc520_freq_exit(void) { cpufreq_unregister_driver(&sc520_freq_driver); iounmap(cpuctl); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sean Young <sean@mess.org>"); MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU"); module_init(sc520_freq_init); module_exit(sc520_freq_exit);
gpl-2.0
ASAZING/android_kernel_lanix_l900
drivers/gpu/drm/qxl/qxl_ioctl.c
2036
12041
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alon Levy */ #include "qxl_drv.h" #include "qxl_object.h" /* * TODO: allocating a new gem(in qxl_bo) for each request. * This is wasteful since bo's are page aligned. */ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_alloc *qxl_alloc = data; int ret; struct qxl_bo *qobj; uint32_t handle; u32 domain = QXL_GEM_DOMAIN_VRAM; if (qxl_alloc->size == 0) { DRM_ERROR("invalid size %d\n", qxl_alloc->size); return -EINVAL; } ret = qxl_gem_object_create_with_handle(qdev, file_priv, domain, qxl_alloc->size, NULL, &qobj, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); return -ENOMEM; } qxl_alloc->handle = handle; return 0; } static int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_map *qxl_map = data; return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle, &qxl_map->offset); } /* * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's * are on vram). * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) */ static void apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, struct qxl_bo *src, uint64_t src_off) { void *reloc_page; reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, src, src_off); qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); } static void apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, struct qxl_bo *src) { uint32_t id = 0; void *reloc_page; if (src && !src->is_primary) id = src->surface_id; reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); } /* return holding the reference to this object */ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, struct drm_file *file_priv, uint64_t handle, struct qxl_reloc_list *reloc_list) { struct drm_gem_object *gobj; struct qxl_bo *qobj; int ret; gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); if (!gobj) { DRM_ERROR("bad bo handle %lld\n", handle); return NULL; } qobj = gem_to_qxl_bo(gobj); ret = qxl_bo_list_add(reloc_list, qobj); if (ret) return NULL; return qobj; } /* * Usage of execbuffer: * Relocations need to take into account the full QXLDrawable size. * However, the command as passed from user space must *not* contain the initial * QXLReleaseInfo struct (first XXX bytes) */ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_execbuffer *execbuffer = data; struct drm_qxl_command user_cmd; int cmd_num; struct qxl_bo *reloc_src_bo; struct qxl_bo *reloc_dst_bo; struct drm_qxl_reloc reloc; void *fb_cmd; int i, ret; struct qxl_reloc_list reloc_list; int unwritten; uint32_t reloc_dst_offset; INIT_LIST_HEAD(&reloc_list.bos); for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { struct qxl_release *release; struct qxl_bo *cmd_bo; int release_type; struct drm_qxl_command *commands = (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], sizeof(user_cmd))) return -EFAULT; switch (user_cmd.type) { case QXL_CMD_DRAW: release_type = QXL_RELEASE_DRAWABLE; break; case QXL_CMD_SURFACE: case QXL_CMD_CURSOR: default: DRM_DEBUG("Only draw commands in execbuffers\n"); return -EINVAL; break; } if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) return -EINVAL; if (!access_ok(VERIFY_READ, (void *)(unsigned long)user_cmd.command, user_cmd.command_size)) return -EFAULT; ret = qxl_alloc_release_reserved(qdev, sizeof(union qxl_release_info) + user_cmd.command_size, release_type, &release, &cmd_bo); if (ret) return ret; /* TODO copy slow path code from i915 */ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); if (unwritten) { DRM_ERROR("got unwritten %d\n", unwritten); qxl_release_unreserve(qdev, release); qxl_release_free(qdev, release); return -EFAULT; } for (i = 0 ; i < user_cmd.relocs_num; ++i) { if (DRM_COPY_FROM_USER(&reloc, &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], sizeof(reloc))) { qxl_bo_list_unreserve(&reloc_list, true); qxl_release_unreserve(qdev, release); qxl_release_free(qdev, release); return -EFAULT; } /* add the bos to the list of bos to validate - need to validate first then process relocs? */ if (reloc.dst_handle) { reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, reloc.dst_handle, &reloc_list); if (!reloc_dst_bo) { qxl_bo_list_unreserve(&reloc_list, true); qxl_release_unreserve(qdev, release); qxl_release_free(qdev, release); return -EINVAL; } reloc_dst_offset = 0; } else { reloc_dst_bo = cmd_bo; reloc_dst_offset = release->release_offset; } /* reserve and validate the reloc dst bo */ if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { reloc_src_bo = qxlhw_handle_to_bo(qdev, file_priv, reloc.src_handle, &reloc_list); if (!reloc_src_bo) { if (reloc_dst_bo != cmd_bo) drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); qxl_bo_list_unreserve(&reloc_list, true); qxl_release_unreserve(qdev, release); qxl_release_free(qdev, release); return -EINVAL; } } else reloc_src_bo = NULL; if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo, reloc.src_offset); } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); } else { DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); return -EINVAL; } if (reloc_src_bo && reloc_src_bo != cmd_bo) { qxl_release_add_res(qdev, release, reloc_src_bo); drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); } if (reloc_dst_bo != cmd_bo) drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); } qxl_fence_releaseable(qdev, release); ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); if (ret == -ERESTARTSYS) { qxl_release_unreserve(qdev, release); qxl_release_free(qdev, release); qxl_bo_list_unreserve(&reloc_list, true); return ret; } qxl_release_unreserve(qdev, release); } qxl_bo_list_unreserve(&reloc_list, 0); return 0; } static int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_update_area *update_area = data; struct qxl_rect area = {.left = update_area->left, .top = update_area->top, .right = update_area->right, .bottom = update_area->bottom}; int ret; struct drm_gem_object *gobj = NULL; struct qxl_bo *qobj = NULL; if (update_area->left >= update_area->right || update_area->top >= update_area->bottom) return -EINVAL; gobj = drm_gem_object_lookup(dev, file, update_area->handle); if (gobj == NULL) return -ENOENT; qobj = gem_to_qxl_bo(gobj); ret = qxl_bo_reserve(qobj, false); if (ret) goto out; if (!qobj->pin_count) { qxl_ttm_placement_from_domain(qobj, qobj->type); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, true, false); if (unlikely(ret)) goto out; } ret = qxl_bo_check_id(qdev, qobj); if (ret) goto out2; if (!qobj->surface_id) DRM_ERROR("got update area for surface with no id %d\n", update_area->handle); ret = qxl_io_update_area(qdev, qobj, &area); out2: qxl_bo_unreserve(qobj); out: drm_gem_object_unreference_unlocked(gobj); return ret; } static int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_getparam *param = data; switch (param->param) { case QXL_PARAM_NUM_SURFACES: param->value = qdev->rom->n_surfaces; break; case QXL_PARAM_MAX_RELOCS: param->value = QXL_MAX_RES; break; default: return -EINVAL; } return 0; } static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_clientcap *param = data; int byte, idx; byte = param->index / 8; idx = param->index % 8; if (qdev->pdev->revision < 4) return -ENOSYS; if (byte >= 58) return -ENOSYS; if (qdev->rom->client_capabilities[byte] & (1 << idx)) return 0; return -ENOSYS; } static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_alloc_surf *param = data; struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; struct qxl_surface surf; /* work out size allocate bo with handle */ actual_stride = param->stride < 0 ? -param->stride : param->stride; size = actual_stride * param->height + actual_stride; surf.format = param->format; surf.width = param->width; surf.height = param->height; surf.stride = param->stride; surf.data = 0; ret = qxl_gem_object_create_with_handle(qdev, file, QXL_GEM_DOMAIN_SURFACE, size, &surf, &qobj, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); return -ENOMEM; } else param->handle = handle; return ret; } struct drm_ioctl_desc qxl_ioctls[] = { DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH|DRM_UNLOCKED), }; int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
gpl-2.0
jmc8/linux
drivers/hid/usbhid/usbkbd.c
2036
11775
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * USB HIDBP Keyboard support */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/usb/input.h> #include <linux/hid.h> /* * Version Information */ #define DRIVER_VERSION "" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>" #define DRIVER_DESC "USB HID Boot Protocol keyboard driver" #define DRIVER_LICENSE "GPL" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); static const unsigned char usb_kbd_keycode[256] = { 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, 150,158,159,128,136,177,178,176,142,152,173,140 }; /** * struct usb_kbd - state of each attached keyboard * @dev: input device associated with this keyboard * @usbdev: usb device associated with this keyboard * @old: data received in the past from the @irq URB representing which * keys were pressed. By comparing with the current list of keys * that are pressed, we are able to see key releases. * @irq: URB for receiving a list of keys that are pressed when a * new key is pressed or a key that was pressed is released. * @led: URB for sending LEDs (e.g. numlock, ...) * @newleds: data that will be sent with the @led URB representing which LEDs should be on * @name: Name of the keyboard. @dev's name field points to this buffer * @phys: Physical path of the keyboard. @dev's phys field points to this * buffer * @new: Buffer for the @irq URB * @cr: Control request for @led URB * @leds: Buffer for the @led URB * @new_dma: DMA address for @irq URB * @leds_dma: DMA address for @led URB * @leds_lock: spinlock that protects @leds, @newleds, and @led_urb_submitted * @led_urb_submitted: indicates whether @led is in progress, i.e. it has been * submitted and its completion handler has not returned yet * without resubmitting @led */ struct usb_kbd { struct input_dev *dev; struct usb_device *usbdev; unsigned char old[8]; struct urb *irq, *led; unsigned char newleds; char name[128]; char phys[64]; unsigned char *new; struct usb_ctrlrequest *cr; unsigned char *leds; dma_addr_t new_dma; dma_addr_t leds_dma; spinlock_t leds_lock; bool led_urb_submitted; }; static void usb_kbd_irq(struct urb *urb) { struct usb_kbd *kbd = urb->context; int i; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ goto resubmit; } for (i = 0; i < 8; i++) input_report_key(kbd->dev, usb_kbd_keycode[i + 224], (kbd->new[0] >> i) & 1); for (i = 2; i < 8; i++) { if (kbd->old[i] > 3 && memscan(kbd->new + 2, kbd->old[i], 6) == kbd->new + 8) { if (usb_kbd_keycode[kbd->old[i]]) input_report_key(kbd->dev, usb_kbd_keycode[kbd->old[i]], 0); else hid_info(urb->dev, "Unknown key (scancode %#x) released.\n", kbd->old[i]); } if (kbd->new[i] > 3 && memscan(kbd->old + 2, kbd->new[i], 6) == kbd->old + 8) { if (usb_kbd_keycode[kbd->new[i]]) input_report_key(kbd->dev, usb_kbd_keycode[kbd->new[i]], 1); else hid_info(urb->dev, "Unknown key (scancode %#x) pressed.\n", kbd->new[i]); } } input_sync(kbd->dev); memcpy(kbd->old, kbd->new, 8); resubmit: i = usb_submit_urb (urb, GFP_ATOMIC); if (i) hid_err(urb->dev, "can't resubmit intr, %s-%s/input0, status %d", kbd->usbdev->bus->bus_name, kbd->usbdev->devpath, i); } static int usb_kbd_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned long flags; struct usb_kbd *kbd = input_get_drvdata(dev); if (type != EV_LED) return -1; spin_lock_irqsave(&kbd->leds_lock, flags); kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) | (!!test_bit(LED_NUML, dev->led)); if (kbd->led_urb_submitted){ spin_unlock_irqrestore(&kbd->leds_lock, flags); return 0; } if (*(kbd->leds) == kbd->newleds){ spin_unlock_irqrestore(&kbd->leds_lock, flags); return 0; } *(kbd->leds) = kbd->newleds; kbd->led->dev = kbd->usbdev; if (usb_submit_urb(kbd->led, GFP_ATOMIC)) pr_err("usb_submit_urb(leds) failed\n"); else kbd->led_urb_submitted = true; spin_unlock_irqrestore(&kbd->leds_lock, flags); return 0; } static void usb_kbd_led(struct urb *urb) { unsigned long flags; struct usb_kbd *kbd = urb->context; if (urb->status) hid_warn(urb->dev, "led urb status %d received\n", urb->status); spin_lock_irqsave(&kbd->leds_lock, flags); if (*(kbd->leds) == kbd->newleds){ kbd->led_urb_submitted = false; spin_unlock_irqrestore(&kbd->leds_lock, flags); return; } *(kbd->leds) = kbd->newleds; kbd->led->dev = kbd->usbdev; if (usb_submit_urb(kbd->led, GFP_ATOMIC)){ hid_err(urb->dev, "usb_submit_urb(leds) failed\n"); kbd->led_urb_submitted = false; } spin_unlock_irqrestore(&kbd->leds_lock, flags); } static int usb_kbd_open(struct input_dev *dev) { struct usb_kbd *kbd = input_get_drvdata(dev); kbd->irq->dev = kbd->usbdev; if (usb_submit_urb(kbd->irq, GFP_KERNEL)) return -EIO; return 0; } static void usb_kbd_close(struct input_dev *dev) { struct usb_kbd *kbd = input_get_drvdata(dev); usb_kill_urb(kbd->irq); } static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd) { if (!(kbd->irq = usb_alloc_urb(0, GFP_KERNEL))) return -1; if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL))) return -1; if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &kbd->new_dma))) return -1; if (!(kbd->cr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL))) return -1; if (!(kbd->leds = usb_alloc_coherent(dev, 1, GFP_ATOMIC, &kbd->leds_dma))) return -1; return 0; } static void usb_kbd_free_mem(struct usb_device *dev, struct usb_kbd *kbd) { usb_free_urb(kbd->irq); usb_free_urb(kbd->led); usb_free_coherent(dev, 8, kbd->new, kbd->new_dma); kfree(kbd->cr); usb_free_coherent(dev, 1, kbd->leds, kbd->leds_dma); } static int usb_kbd_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(iface); struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct usb_kbd *kbd; struct input_dev *input_dev; int i, pipe, maxp; int error = -ENOMEM; interface = iface->cur_altsetting; if (interface->desc.bNumEndpoints != 1) return -ENODEV; endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); kbd = kzalloc(sizeof(struct usb_kbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbd || !input_dev) goto fail1; if (usb_kbd_alloc_mem(dev, kbd)) goto fail2; kbd->usbdev = dev; kbd->dev = input_dev; spin_lock_init(&kbd->leds_lock); if (dev->manufacturer) strlcpy(kbd->name, dev->manufacturer, sizeof(kbd->name)); if (dev->product) { if (dev->manufacturer) strlcat(kbd->name, " ", sizeof(kbd->name)); strlcat(kbd->name, dev->product, sizeof(kbd->name)); } if (!strlen(kbd->name)) snprintf(kbd->name, sizeof(kbd->name), "USB HIDBP Keyboard %04x:%04x", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); usb_make_path(dev, kbd->phys, sizeof(kbd->phys)); strlcat(kbd->phys, "/input0", sizeof(kbd->phys)); input_dev->name = kbd->name; input_dev->phys = kbd->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &iface->dev; input_set_drvdata(input_dev, kbd); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_LED) | BIT_MASK(EV_REP); input_dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | BIT_MASK(LED_SCROLLL) | BIT_MASK(LED_COMPOSE) | BIT_MASK(LED_KANA); for (i = 0; i < 255; i++) set_bit(usb_kbd_keycode[i], input_dev->keybit); clear_bit(0, input_dev->keybit); input_dev->event = usb_kbd_event; input_dev->open = usb_kbd_open; input_dev->close = usb_kbd_close; usb_fill_int_urb(kbd->irq, dev, pipe, kbd->new, (maxp > 8 ? 8 : maxp), usb_kbd_irq, kbd, endpoint->bInterval); kbd->irq->transfer_dma = kbd->new_dma; kbd->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; kbd->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; kbd->cr->bRequest = 0x09; kbd->cr->wValue = cpu_to_le16(0x200); kbd->cr->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber); kbd->cr->wLength = cpu_to_le16(1); usb_fill_control_urb(kbd->led, dev, usb_sndctrlpipe(dev, 0), (void *) kbd->cr, kbd->leds, 1, usb_kbd_led, kbd); kbd->led->transfer_dma = kbd->leds_dma; kbd->led->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(kbd->dev); if (error) goto fail2; usb_set_intfdata(iface, kbd); device_set_wakeup_enable(&dev->dev, 1); return 0; fail2: usb_kbd_free_mem(dev, kbd); fail1: input_free_device(input_dev); kfree(kbd); return error; } static void usb_kbd_disconnect(struct usb_interface *intf) { struct usb_kbd *kbd = usb_get_intfdata (intf); usb_set_intfdata(intf, NULL); if (kbd) { usb_kill_urb(kbd->irq); input_unregister_device(kbd->dev); usb_kill_urb(kbd->led); usb_kbd_free_mem(interface_to_usbdev(intf), kbd); kfree(kbd); } } static struct usb_device_id usb_kbd_id_table [] = { { USB_INTERFACE_INFO(USB_INTERFACE_CLASS_HID, USB_INTERFACE_SUBCLASS_BOOT, USB_INTERFACE_PROTOCOL_KEYBOARD) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, usb_kbd_id_table); static struct usb_driver usb_kbd_driver = { .name = "usbkbd", .probe = usb_kbd_probe, .disconnect = usb_kbd_disconnect, .id_table = usb_kbd_id_table, }; module_usb_driver(usb_kbd_driver);
gpl-2.0
Motorhead1991/android_kernel_dell_ventana
drivers/ata/pata_qdi.c
4596
8659
/* * pata_qdi.c - QDI VLB ATA controllers * (C) 2006 Red Hat * * This driver mostly exists as a proof of concept for non PCI devices under * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly * useful. * * Tuning code written from the documentation at * http://www.ryston.cz/petr/vlb/qd6500.html * http://www.ryston.cz/petr/vlb/qd6580.html * * Probe code based on drivers/ide/legacy/qd65xx.c * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by * Samuel Thibault <samuel.thibault@ens-lyon.org> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/platform_device.h> #define DRV_NAME "pata_qdi" #define DRV_VERSION "0.3.1" #define NR_HOST 4 /* Two 6580s */ struct qdi_data { unsigned long timing; u8 clock[2]; u8 last; int fast; struct platform_device *platform_dev; }; static struct ata_host *qdi_host[NR_HOST]; static struct qdi_data qdi_data[NR_HOST]; static int nr_qdi_host; #ifdef MODULE static int probe_qdi = 1; #else static int probe_qdi; #endif static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct ata_timing t; struct qdi_data *qdi = ap->host->private_data; int active, recovery; u8 timing; /* Get the timing data in cycles */ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); if (qdi->fast) { active = 8 - clamp_val(t.active, 1, 8); recovery = 18 - clamp_val(t.recover, 3, 18); } else { active = 9 - clamp_val(t.active, 2, 9); recovery = 15 - clamp_val(t.recover, 0, 15); } timing = (recovery << 4) | active | 0x08; qdi->clock[adev->devno] = timing; outb(timing, qdi->timing); } static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct ata_timing t; struct qdi_data *qdi = ap->host->private_data; int active, recovery; u8 timing; /* Get the timing data in cycles */ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); if (qdi->fast) { active = 8 - clamp_val(t.active, 1, 8); recovery = 18 - clamp_val(t.recover, 3, 18); } else { active = 9 - clamp_val(t.active, 2, 9); recovery = 15 - clamp_val(t.recover, 0, 15); } timing = (recovery << 4) | active | 0x08; qdi->clock[adev->devno] = timing; outb(timing, qdi->timing); /* Clear the FIFO */ if (adev->class != ATA_DEV_ATA) outb(0x5F, (qdi->timing & 0xFFF0) + 3); } /** * qdi_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings. */ static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct qdi_data *qdi = ap->host->private_data; if (qdi->clock[adev->devno] != qdi->last) { if (adev->pio_mode) { qdi->last = qdi->clock[adev->devno]; outb(qdi->clock[adev->devno], qdi->timing); } } return ata_sff_qc_issue(qc); } static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw) { if (ata_id_has_dword_io(dev->id)) { struct ata_port *ap = dev->link->ap; int slop = buflen & 3; if (rw == READ) ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); else iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { __le32 pad; if (rw == READ) { pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); memcpy(buf + buflen - slop, &pad, slop); } else { memcpy(&pad, buf + buflen - slop, slop); iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); } buflen += 4 - slop; } } else buflen = ata_sff_data_xfer(dev, buf, buflen, rw); return buflen; } static struct scsi_host_template qdi_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations qdi6500_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = qdi_qc_issue, .sff_data_xfer = qdi_data_xfer, .cable_detect = ata_cable_40wire, .set_piomode = qdi6500_set_piomode, }; static struct ata_port_operations qdi6580_port_ops = { .inherits = &qdi6500_port_ops, .set_piomode = qdi6580_set_piomode, }; /** * qdi_init_one - attach a qdi interface * @type: Type to display * @io: I/O port start * @irq: interrupt line * @fast: True if on a > 33Mhz VLB * * Register an ISA bus IDE interface. Such interfaces are PIO and we * assume do not support IRQ sharing. */ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast) { unsigned long ctl = io + 0x206; struct platform_device *pdev; struct ata_host *host; struct ata_port *ap; void __iomem *io_addr, *ctl_addr; int ret; /* * Fill in a probe structure first of all */ pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); ret = -ENOMEM; io_addr = devm_ioport_map(&pdev->dev, io, 8); ctl_addr = devm_ioport_map(&pdev->dev, ctl, 1); if (!io_addr || !ctl_addr) goto fail; ret = -ENOMEM; host = ata_host_alloc(&pdev->dev, 1); if (!host) goto fail; ap = host->ports[0]; if (type == 6580) { ap->ops = &qdi6580_port_ops; ap->pio_mask = ATA_PIO4; ap->flags |= ATA_FLAG_SLAVE_POSS; } else { ap->ops = &qdi6500_port_ops; ap->pio_mask = ATA_PIO2; /* Actually PIO3 !IORDY is possible */ ap->flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY; } ap->ioaddr.cmd_addr = io_addr; ap->ioaddr.altstatus_addr = ctl_addr; ap->ioaddr.ctl_addr = ctl_addr; ata_sff_std_ports(&ap->ioaddr); ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl); /* * Hook in a private data structure per channel */ ap->private_data = &qdi_data[nr_qdi_host]; qdi_data[nr_qdi_host].timing = port; qdi_data[nr_qdi_host].fast = fast; qdi_data[nr_qdi_host].platform_dev = pdev; printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io); /* activate */ ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, &qdi_sht); if (ret) goto fail; qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev); return 0; fail: platform_device_unregister(pdev); return ret; } /** * qdi_init - attach qdi interfaces * * Attach qdi IDE interfaces by scanning the ports it may occupy. */ static __init int qdi_init(void) { unsigned long flags; static const unsigned long qd_port[2] = { 0x30, 0xB0 }; static const unsigned long ide_port[2] = { 0x170, 0x1F0 }; static const int ide_irq[2] = { 14, 15 }; int ct = 0; int i; if (probe_qdi == 0) return -ENODEV; /* * Check each possible QD65xx base address */ for (i = 0; i < 2; i++) { unsigned long port = qd_port[i]; u8 r, res; if (request_region(port, 2, "pata_qdi")) { /* Check for a card */ local_irq_save(flags); r = inb_p(port); outb_p(0x19, port); res = inb_p(port); outb_p(r, port); local_irq_restore(flags); /* Fail */ if (res == 0x19) { release_region(port, 2); continue; } /* Passes the presence test */ r = inb_p(port + 1); /* Check port agrees with port set */ if ((r & 2) >> 1 != i) { release_region(port, 2); continue; } /* Check card type */ if ((r & 0xF0) == 0xC0) { /* QD6500: single channel */ if (r & 8) { /* Disabled ? */ release_region(port, 2); continue; } if (qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0) ct++; } if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) { /* QD6580: dual channel */ if (!request_region(port + 2 , 2, "pata_qdi")) { release_region(port, 2); continue; } res = inb(port + 3); if (res & 1) { /* Single channel mode */ if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0) ct++; } else { /* Dual channel mode */ if (qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04) == 0) ct++; if (qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04) == 0) ct++; } } } } if (ct != 0) return 0; return -ENODEV; } static __exit void qdi_exit(void) { int i; for (i = 0; i < nr_qdi_host; i++) { ata_host_detach(qdi_host[i]); /* Free the control resource. The 6580 dual channel has the resources * claimed as a pair of 2 byte resources so we need no special cases... */ release_region(qdi_data[i].timing, 2); platform_device_unregister(qdi_data[i].platform_dev); } } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for qdi ATA"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_init(qdi_init); module_exit(qdi_exit); module_param(probe_qdi, int, 0);
gpl-2.0
IOKP-kitkat/kernel_samsung_hlte
drivers/acpi/acpica/evgpe.c
4852
22405
/****************************************************************************** * * Module Name: evgpe - General Purpose Event handling and dispatch * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpe") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_update_gpe_enable_mask * * PARAMETERS: gpe_event_info - GPE to update * * RETURN: Status * * DESCRIPTION: Updates GPE register enable mask based upon whether there are * runtime references to this GPE * ******************************************************************************/ acpi_status acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) { struct acpi_gpe_register_info *gpe_register_info; u32 register_bit; ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask); gpe_register_info = gpe_event_info->register_info; if (!gpe_register_info) { return_ACPI_STATUS(AE_NOT_EXIST); } register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); /* Clear the run bit up front */ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); /* Set the mask bit only if there are references to this GPE */ if (gpe_event_info->runtime_count) { ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_enable_gpe * * PARAMETERS: gpe_event_info - GPE to enable * * RETURN: Status * * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; ACPI_FUNCTION_TRACE(ev_enable_gpe); /* * We will only allow a GPE to be enabled if it has either an associated * method (_Lxx/_Exx) or a handler, or is using the implicit notify * feature. Otherwise, the GPE will be immediately disabled by * acpi_ev_gpe_dispatch the first time it fires. */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_NONE) { return_ACPI_STATUS(AE_NO_HANDLER); } /* Clear the GPE (of stale events) */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_add_gpe_reference * * PARAMETERS: gpe_event_info - Add a reference to this GPE * * RETURN: Status * * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is * hardware-enabled. * ******************************************************************************/ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_add_gpe_reference); if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count++; if (gpe_event_info->runtime_count == 1) { /* Enable on first reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_ev_enable_gpe(gpe_event_info); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count--; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_remove_gpe_reference * * PARAMETERS: gpe_event_info - Remove a reference to this GPE * * RETURN: Status * * DESCRIPTION: Remove a reference to a GPE. When the last reference is * removed, the GPE is hardware-disabled. * ******************************************************************************/ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); if (!gpe_event_info->runtime_count) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { /* Disable on last reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count++; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_low_get_gpe_info * * PARAMETERS: gpe_number - Raw GPE number * gpe_block - A GPE info block * * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number * is not within the specified GPE block) * * DESCRIPTION: Returns the event_info struct associated with this GPE. This is * the low-level implementation of ev_get_gpe_event_info. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, struct acpi_gpe_block_info *gpe_block) { u32 gpe_index; /* * Validate that the gpe_number is within the specified gpe_block. * (Two steps) */ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { return (NULL); } gpe_index = gpe_number - gpe_block->block_base_number; if (gpe_index >= gpe_block->gpe_count) { return (NULL); } return (&gpe_block->event_info[gpe_index]); } /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_event_info * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_number - Raw GPE number * * RETURN: A GPE event_info struct. NULL if not a valid GPE * * DESCRIPTION: Returns the event_info struct associated with this GPE. * Validates the gpe_block and the gpe_number * * Should be called only when the GPE lists are semaphore locked * and not subject to change. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number) { union acpi_operand_object *obj_desc; struct acpi_gpe_event_info *gpe_info; u32 i; ACPI_FUNCTION_ENTRY(); /* A NULL gpe_device means use the FADT-defined GPE block(s) */ if (!gpe_device) { /* Examine GPE Block 0 and 1 (These blocks are permanent) */ for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { gpe_info = acpi_ev_low_get_gpe_info(gpe_number, acpi_gbl_gpe_fadt_blocks [i]); if (gpe_info) { return (gpe_info); } } /* The gpe_number was not in the range of either FADT GPE block */ return (NULL); } /* A Non-NULL gpe_device means this is a GPE Block Device */ obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) gpe_device); if (!obj_desc || !obj_desc->device.gpe_block) { return (NULL); } return (acpi_ev_low_get_gpe_info (gpe_number, obj_desc->device.gpe_block)); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_detect * * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. * Can have multiple GPE blocks attached. * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Detect if any GP events have occurred. This function is * executed at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) { acpi_status status; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_register_info *gpe_register_info; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u8 enabled_status_byte; u32 status_reg; u32 enable_reg; acpi_cpu_flags flags; u32 i; u32 j; ACPI_FUNCTION_NAME(ev_gpe_detect); /* Check for the case where there are no GPEs */ if (!gpe_xrupt_list) { return (int_status); } /* * We need to obtain the GPE lock for both the data structs and registers * Note: Not necessary to obtain the hardware lock, since the GPE * registers are owned by the gpe_lock. */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Examine all GPE blocks attached to this interrupt level */ gpe_block = gpe_xrupt_list->gpe_block_list_head; while (gpe_block) { /* * Read all of the 8-bit GPE status and enable registers in this GPE * block, saving all of them. Find all currently active GP events. */ for (i = 0; i < gpe_block->register_count; i++) { /* Get the next status/enable pair */ gpe_register_info = &gpe_block->register_info[i]; /* * Optimization: If there are no GPEs enabled within this * register, we can safely ignore the entire register. */ if (!(gpe_register_info->enable_for_run | gpe_register_info->enable_for_wake)) { continue; } /* Read the Status Register */ status = acpi_hw_read(&status_reg, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Read the Enable Register */ status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", gpe_register_info->base_gpe_number, status_reg, enable_reg)); /* Check if there is anything active at all in this register */ enabled_status_byte = (u8) (status_reg & enable_reg); if (!enabled_status_byte) { /* No active GPEs in this register, move on */ continue; } /* Now look at the individual GPEs in this byte register */ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { /* Examine one GPE bit */ if (enabled_status_byte & (1 << j)) { /* * Found an active GPE. Dispatch the event to a handler * or method. */ int_status |= acpi_ev_gpe_dispatch(gpe_block-> node, &gpe_block-> event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } } gpe_block = gpe_block->next; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return (int_status); } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_execute_gpe_method * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * * RETURN: None * * DESCRIPTION: Perform the actual execution of a GPE control method. This * function is called from an invocation of acpi_os_execute and * therefore does NOT execute at interrupt level - so that * the control method itself is not executed in the context of * an interrupt handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; struct acpi_gpe_event_info *local_gpe_event_info; struct acpi_evaluate_info *info; struct acpi_gpe_notify_object *notify_object; ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); /* Allocate a local GPE block */ local_gpe_event_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); if (!local_gpe_event_info) { ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); return_VOID; } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); return_VOID; } /* Must revalidate the gpe_number/gpe_block */ if (!acpi_ev_valid_gpe_event(gpe_event_info)) { status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); ACPI_FREE(local_gpe_event_info); return_VOID; } /* * Take a snapshot of the GPE info for this level - we copy the info to * prevent a race condition with remove_handler/remove_block. */ ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, sizeof(struct acpi_gpe_event_info)); status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_VOID; } /* Do the correct dispatch - normal method or implicit notify */ switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_NOTIFY: /* * Implicit notify. * Dispatch a DEVICE_WAKE notify to the appropriate handler. * NOTE: the request is queued for execution after this method * completes. The notify handlers are NOT invoked synchronously * from this thread -- because handlers may in turn run other * control methods. */ status = acpi_ev_queue_notify_request( local_gpe_event_info->dispatch.device.node, ACPI_NOTIFY_DEVICE_WAKE); notify_object = local_gpe_event_info->dispatch.device.next; while (ACPI_SUCCESS(status) && notify_object) { status = acpi_ev_queue_notify_request( notify_object->node, ACPI_NOTIFY_DEVICE_WAKE); notify_object = notify_object->next; } break; case ACPI_GPE_DISPATCH_METHOD: /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; } else { /* * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx * control method that corresponds to this GPE */ info->prefix_node = local_gpe_event_info->dispatch.method_node; info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info); ACPI_FREE(info); } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "while evaluating GPE method [%4.4s]", acpi_ut_get_node_name (local_gpe_event_info->dispatch. method_node))); } break; default: return_VOID; /* Should never happen */ } /* Defer enabling of GPE until all notify handlers are done */ status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, local_gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_enable_gpe * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * Callback from acpi_os_execute * * RETURN: None * * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to * complete (i.e., finish execution of Notify) * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; (void)acpi_ev_finish_gpe(gpe_event_info); ACPI_FREE(gpe_event_info); return; } /******************************************************************************* * * FUNCTION: acpi_ev_finish_gpe * * PARAMETERS: gpe_event_info - Info for this GPE * * RETURN: Status * * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution * of a GPE method or a synchronous or asynchronous GPE handler. * ******************************************************************************/ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after * handling the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return (status); } } /* * Enable this GPE, conditionally. This means that the GPE will * only be physically enabled if the enable_for_run bit is set * in the event_info. */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_dispatch * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_event_info - Info for this GPE * gpe_number - Number relative to the parent GPE block * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) * or method (e.g. _Lxx/_Exx) handler. * * This function executes at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; u32 return_value; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); /* Invoke global event handler if present */ acpi_gpe_count++; if (acpi_gbl_global_event_handler) { acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, gpe_number, acpi_gbl_global_event_handler_context); } /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Always disable the GPE so that it does not keep firing before * any asynchronous activity completes (either from the execution * of a GPE method or an asynchronous GPE handler.) * * If there is no handler or method to run, just disable the * GPE and leave it disabled permanently to prevent further such * pointless events from firing. */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * Dispatch the GPE to either an installed handler or the control * method associated with this GPE (_Lxx or _Exx). If a handler * exists, we invoke it and do not attempt to run the method. * If there is neither a handler nor a method, leave the GPE * disabled. */ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: /* Invoke the installed handler (at interrupt level) */ return_value = gpe_event_info->dispatch.handler->address(gpe_device, gpe_number, gpe_event_info-> dispatch.handler-> context); /* If requested, clear (if level-triggered) and reenable the GPE */ if (return_value & ACPI_REENABLE_GPE) { (void)acpi_ev_finish_gpe(gpe_event_info); } break; case ACPI_GPE_DISPATCH_METHOD: case ACPI_GPE_DISPATCH_NOTIFY: /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE%2X - event disabled", gpe_number)); } break; default: /* * No handler or method to run! * 03/2010: This case should no longer be possible. We will not allow * a GPE to be enabled if it has no handler or method. */ ACPI_ERROR((AE_INFO, "No handler or method for GPE%02X, disabling event", gpe_number)); break; } return_UINT32(ACPI_INTERRUPT_HANDLED); } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
bio4554/ker.nl
drivers/of/of_net.c
7412
3044
/* * OF helpers for network devices. * * This file is released under the GPLv2 * * Initially copied out of arch/powerpc/kernel/prom_parse.c */ #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/export.h> /** * It maps 'enum phy_interface_t' found in include/linux/phy.h * into the device tree binding of 'phy-mode', so that Ethernet * device driver can get phy interface from device tree. */ static const char *phy_modes[] = { [PHY_INTERFACE_MODE_NA] = "", [PHY_INTERFACE_MODE_MII] = "mii", [PHY_INTERFACE_MODE_GMII] = "gmii", [PHY_INTERFACE_MODE_SGMII] = "sgmii", [PHY_INTERFACE_MODE_TBI] = "tbi", [PHY_INTERFACE_MODE_RMII] = "rmii", [PHY_INTERFACE_MODE_RGMII] = "rgmii", [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", [PHY_INTERFACE_MODE_RTBI] = "rtbi", [PHY_INTERFACE_MODE_SMII] = "smii", }; /** * of_get_phy_mode - Get phy mode for given device_node * @np: Pointer to the given device_node * * The function gets phy interface string from property 'phy-mode', * and return its index in phy_modes table, or errno in error case. */ const int of_get_phy_mode(struct device_node *np) { const char *pm; int err, i; err = of_property_read_string(np, "phy-mode", &pm); if (err < 0) return err; for (i = 0; i < ARRAY_SIZE(phy_modes); i++) if (!strcasecmp(pm, phy_modes[i])) return i; return -ENODEV; } EXPORT_SYMBOL_GPL(of_get_phy_mode); /** * Search the device tree for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC * address. If that isn't set, then 'local-mac-address' is checked next, * because that is the default address. If that isn't set, then the obsolete * 'address' is checked, just in case we're using an old device tree. * * Note that the 'address' property is supposed to contain a virtual address of * the register set, but some DTS files have redefined that property to be the * MAC address. * * All-zero MAC addresses are rejected, because those could be properties that * exist in the device tree, but were not set by U-Boot. For example, the * DTS could define 'mac-address' and 'local-mac-address', with zero MAC * addresses. Some older U-Boots only initialized 'local-mac-address'. In * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists * but is all zeros. */ const void *of_get_mac_address(struct device_node *np) { struct property *pp; pp = of_find_property(np, "mac-address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; pp = of_find_property(np, "local-mac-address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; pp = of_find_property(np, "address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; return NULL; } EXPORT_SYMBOL(of_get_mac_address);
gpl-2.0
jejecrdroid/kernel_samsung_espresso10
arch/microblaze/lib/memmove.c
7668
5442
/* * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2008-2009 PetaLogix * Copyright (C) 2007 John Williams * * Reasonably optimised generic C-code for memcpy on Microblaze * This is generic C code to do efficient, alignment-aware memmove. * * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * * This software has been developed by Intel Corporation. * Intel specifically disclaims all warranties, express or * implied, and all liability, including consequential and * other indirect damages, for the use of this program, including * liability for infringement of any proprietary rights, * and including the warranties of merchantability and fitness * for a particular purpose. Intel does not assume any * responsibility for and errors which may appear in this program * not any responsibility to update it. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/compiler.h> #include <linux/module.h> #include <linux/string.h> #ifdef __HAVE_ARCH_MEMMOVE #ifndef CONFIG_OPT_LIB_FUNCTION void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; if (!c) return v_dst; /* Use memcpy when source is higher than dest */ if (v_dst <= v_src) return memcpy(v_dst, v_src, c); /* copy backwards, from end to beginning */ src += c; dst += c; /* Simple, byte oriented memmove. */ while (c--) *--dst = *--src; return v_dst; } #else /* CONFIG_OPT_LIB_FUNCTION */ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; const uint32_t *i_src; uint32_t *i_dst; if (!c) return v_dst; /* Use memcpy when source is higher than dest */ if (v_dst <= v_src) return memcpy(v_dst, v_src, c); /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on * different boundaries shifts will be necessary. This might result in * bad performance on MicroBlaze systems without a barrel shifter. */ /* FIXME this part needs more test */ /* Do a descending copy - this is a bit trickier! */ dst += c; src += c; if (c >= 4) { unsigned value, buf_hold; /* Align the destination to a word boundary. */ /* This is done in an endian independent manner. */ switch ((unsigned long)dst & 3) { case 3: *--dst = *--src; --c; case 2: *--dst = *--src; --c; case 1: *--dst = *--src; --c; } i_dst = (void *)dst; /* Choose a copy scheme based on the source */ /* alignment relative to dstination. */ switch ((unsigned long)src & 3) { case 0x0: /* Both byte offsets are aligned */ i_src = (const void *)src; for (; c >= 4; c -= 4) *--i_dst = *--i_src; src = (const void *)i_src; break; case 0x1: /* Unaligned - Off by 1 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 8 | value; buf_hold = value >> 24; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFF) << 24; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFFFF00)>>8); buf_hold = (value & 0xFF) << 24; } #endif /* Realign the source */ src = (const void *)i_src; src += 1; break; case 0x2: /* Unaligned - Off by 2 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 16 | value; buf_hold = value >> 16; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFF) << 16; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFFFF0000)>>16); buf_hold = (value & 0xFFFF) << 16; } #endif /* Realign the source */ src = (const void *)i_src; src += 2; break; case 0x3: /* Unaligned - Off by 3 */ /* Word align the source */ i_src = (const void *) (((unsigned)src + 4) & ~3); #ifndef __MICROBLAZEEL__ /* Load the holding buffer */ buf_hold = *--i_src >> 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold << 24 | value; buf_hold = value >> 8; } #else /* Load the holding buffer */ buf_hold = (*--i_src & 0xFFFFFF) << 8; for (; c >= 4; c -= 4) { value = *--i_src; *--i_dst = buf_hold | ((value & 0xFF000000)>> 24); buf_hold = (value & 0xFFFFFF) << 8; } #endif /* Realign the source */ src = (const void *)i_src; src += 3; break; } dst = (void *)i_dst; } /* simple fast copy, ... unless a cache boundary is crossed */ /* Finish off any remaining bytes */ switch (c) { case 4: *--dst = *--src; case 3: *--dst = *--src; case 2: *--dst = *--src; case 1: *--dst = *--src; } return v_dst; } #endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memmove); #endif /* __HAVE_ARCH_MEMMOVE */
gpl-2.0
ronenil/net-next
arch/alpha/oprofile/op_model_ev5.c
7924
5451
/** * @file arch/alpha/oprofile/op_model_ev5.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author Richard Henderson <rth@twiddle.net> */ #include <linux/oprofile.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> #include "op_impl.h" /* Compute all of the registers in preparation for enabling profiling. The 21164 (EV5) and 21164PC (PCA65) vary in the bit placement and meaning of the "CBOX" events. Given that we don't care about meaning at this point, arrange for the difference in bit placement to be handled by common code. */ static void common_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys, int cbox1_ofs, int cbox2_ofs) { int i, ctl, reset, need_reset; /* Select desired events. The event numbers are selected such that they map directly into the event selection fields: PCSEL0: 0, 1 PCSEL1: 24-39 CBOX1: 40-47 PCSEL2: 48-63 CBOX2: 64-71 There are two special cases, in that CYCLES can be measured on PCSEL[02], and SCACHE_WRITE can be measured on CBOX[12]. These event numbers are canonicalizes to their first appearance. */ ctl = 0; for (i = 0; i < 3; ++i) { unsigned long event = ctr[i].event; if (!ctr[i].enabled) continue; /* Remap the duplicate events, as described above. */ if (i == 2) { if (event == 0) event = 12+48; else if (event == 2+41) event = 4+65; } /* Convert the event numbers onto mux_select bit mask. */ if (event < 2) ctl |= event << 31; else if (event < 24) /* error */; else if (event < 40) ctl |= (event - 24) << 4; else if (event < 48) ctl |= (event - 40) << cbox1_ofs | 15 << 4; else if (event < 64) ctl |= event - 48; else if (event < 72) ctl |= (event - 64) << cbox2_ofs | 15; } reg->mux_select = ctl; /* Select processor mode. */ /* ??? Need to come up with some mechanism to trace only selected processes. For now select from pal, kernel and user mode. */ ctl = 0; ctl |= !sys->enable_pal << 9; ctl |= !sys->enable_kernel << 8; ctl |= !sys->enable_user << 30; reg->proc_mode = ctl; /* Select interrupt frequencies. Take the interrupt count selected by the user, and map it onto one of the possible counter widths. If the user value is in between, compute a value to which the counter is reset at each interrupt. */ ctl = reset = need_reset = 0; for (i = 0; i < 3; ++i) { unsigned long max, hilo, count = ctr[i].count; if (!ctr[i].enabled) continue; if (count <= 256) count = 256, hilo = 3, max = 256; else { max = (i == 2 ? 16384 : 65536); hilo = 2; if (count > max) count = max; } ctr[i].count = count; ctl |= hilo << (8 - i*2); reset |= (max - count) << (48 - 16*i); if (count != max) need_reset |= 1 << i; } reg->freq = ctl; reg->reset_values = reset; reg->need_reset = need_reset; } static void ev5_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys) { common_reg_setup(reg, ctr, sys, 19, 22); } static void pca56_reg_setup(struct op_register_config *reg, struct op_counter_config *ctr, struct op_system_config *sys) { common_reg_setup(reg, ctr, sys, 8, 11); } /* Program all of the registers in preparation for enabling profiling. */ static void ev5_cpu_setup (void *x) { struct op_register_config *reg = x; wrperfmon(2, reg->mux_select); wrperfmon(3, reg->proc_mode); wrperfmon(4, reg->freq); wrperfmon(6, reg->reset_values); } /* CTR is a counter for which the user has requested an interrupt count in between one of the widths selectable in hardware. Reset the count for CTR to the value stored in REG->RESET_VALUES. For EV5, this means disabling profiling, reading the current values, masking in the value for the desired register, writing, then turning profiling back on. This can be streamlined if profiling is only enabled for user mode. In that case we know that the counters are not currently incrementing (due to being in kernel mode). */ static void ev5_reset_ctr(struct op_register_config *reg, unsigned long ctr) { unsigned long values, mask, not_pk, reset_values; mask = (ctr == 0 ? 0xfffful << 48 : ctr == 1 ? 0xfffful << 32 : 0x3fff << 16); not_pk = 1 << 9 | 1 << 8; reset_values = reg->reset_values; if ((reg->proc_mode & not_pk) == not_pk) { values = wrperfmon(5, 0); values = (reset_values & mask) | (values & ~mask & -2); wrperfmon(6, values); } else { wrperfmon(0, -1); values = wrperfmon(5, 0); values = (reset_values & mask) | (values & ~mask & -2); wrperfmon(6, values); wrperfmon(1, reg->enable); } } static void ev5_handle_interrupt(unsigned long which, struct pt_regs *regs, struct op_counter_config *ctr) { /* Record the sample. */ oprofile_add_sample(regs, which); } struct op_axp_model op_model_ev5 = { .reg_setup = ev5_reg_setup, .cpu_setup = ev5_cpu_setup, .reset_ctr = ev5_reset_ctr, .handle_interrupt = ev5_handle_interrupt, .cpu_type = "alpha/ev5", .num_counters = 3, .can_set_proc_mode = 1, }; struct op_axp_model op_model_pca56 = { .reg_setup = pca56_reg_setup, .cpu_setup = ev5_cpu_setup, .reset_ctr = ev5_reset_ctr, .handle_interrupt = ev5_handle_interrupt, .cpu_type = "alpha/pca56", .num_counters = 3, .can_set_proc_mode = 1, };
gpl-2.0
kuailexs/Lenovo_A820_kernel_kk
kernel/drivers/w1/slaves/w1_smem.c
7924
1784
/* * w1_smem.c * * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the smems of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); static struct w1_family w1_smem_family_01 = { .fid = W1_FAMILY_SMEM_01, }; static struct w1_family w1_smem_family_81 = { .fid = W1_FAMILY_SMEM_81, }; static int __init w1_smem_init(void) { int err; err = w1_register_family(&w1_smem_family_01); if (err) return err; err = w1_register_family(&w1_smem_family_81); if (err) { w1_unregister_family(&w1_smem_family_01); return err; } return 0; } static void __exit w1_smem_fini(void) { w1_unregister_family(&w1_smem_family_01); w1_unregister_family(&w1_smem_family_81); } module_init(w1_smem_init); module_exit(w1_smem_fini);
gpl-2.0
shankarathi07/linux_lg_lollipop
drivers/media/rc/keymaps/rc-tivo.c
9460
2899
/* rc-tivo.c - Keytable for TiVo remotes * * Copyright (c) 2011 by Jarod Wilson <jarod@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* * Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle, * which also ships with a TiVo-branded IR transceiver, supported by the mceusb * driver. Note that the remote uses an NEC-ish protocol, but instead of having * a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the * NEC extended checksums do pass, so the table presently has the intended * values and the checksum-passed versions for those keys. */ static struct rc_map_table tivo[] = { { 0xa10c900f, KEY_MEDIA }, /* TiVo Button */ { 0xa10c0807, KEY_POWER2 }, /* TV Power */ { 0xa10c8807, KEY_TV }, /* Live TV/Swap */ { 0xa10c2c03, KEY_VIDEO_NEXT }, /* TV Input */ { 0xa10cc807, KEY_INFO }, { 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */ { 0x0085305f, KEY_CYCLEWINDOWS }, { 0xa10c6c03, KEY_EPG }, /* Guide */ { 0xa10c2807, KEY_UP }, { 0xa10c6807, KEY_DOWN }, { 0xa10ce807, KEY_LEFT }, { 0xa10ca807, KEY_RIGHT }, { 0xa10c1807, KEY_SCROLLDOWN }, /* Red Thumbs Down */ { 0xa10c9807, KEY_SELECT }, { 0xa10c5807, KEY_SCROLLUP }, /* Green Thumbs Up */ { 0xa10c3807, KEY_VOLUMEUP }, { 0xa10cb807, KEY_VOLUMEDOWN }, { 0xa10cd807, KEY_MUTE }, { 0xa10c040b, KEY_RECORD }, { 0xa10c7807, KEY_CHANNELUP }, { 0xa10cf807, KEY_CHANNELDOWN }, { 0x0085301f, KEY_CHANNELDOWN }, { 0xa10c840b, KEY_PLAY }, { 0xa10cc40b, KEY_PAUSE }, { 0xa10ca40b, KEY_SLOW }, { 0xa10c440b, KEY_REWIND }, { 0xa10c240b, KEY_FASTFORWARD }, { 0xa10c640b, KEY_PREVIOUS }, { 0xa10ce40b, KEY_NEXT }, /* ->| */ { 0xa10c220d, KEY_ZOOM }, /* Aspect */ { 0xa10c120d, KEY_STOP }, { 0xa10c520d, KEY_DVD }, /* DVD Menu */ { 0xa10c140b, KEY_NUMERIC_1 }, { 0xa10c940b, KEY_NUMERIC_2 }, { 0xa10c540b, KEY_NUMERIC_3 }, { 0xa10cd40b, KEY_NUMERIC_4 }, { 0xa10c340b, KEY_NUMERIC_5 }, { 0xa10cb40b, KEY_NUMERIC_6 }, { 0xa10c740b, KEY_NUMERIC_7 }, { 0xa10cf40b, KEY_NUMERIC_8 }, { 0x0085302f, KEY_NUMERIC_8 }, { 0xa10c0c03, KEY_NUMERIC_9 }, { 0xa10c8c03, KEY_NUMERIC_0 }, { 0xa10ccc03, KEY_ENTER }, { 0xa10c4c03, KEY_CLEAR }, }; static struct rc_map_list tivo_map = { .map = { .scan = tivo, .size = ARRAY_SIZE(tivo), .rc_type = RC_TYPE_NEC, .name = RC_MAP_TIVO, } }; static int __init init_rc_map_tivo(void) { return rc_map_register(&tivo_map); } static void __exit exit_rc_map_tivo(void) { rc_map_unregister(&tivo_map); } module_init(init_rc_map_tivo) module_exit(exit_rc_map_tivo) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
gpl-2.0
lawnn/android_kernel_samsung_d2dcm
drivers/misc/ibmasm/remote.c
14836
10187
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Authors: Max Asböck <amax@us.ibm.com> * Vernon Mauery <vernux@us.ibm.com> * */ /* Remote mouse and keyboard event handling functions */ #include <linux/pci.h> #include "ibmasm.h" #include "remote.h" #define MOUSE_X_MAX 1600 #define MOUSE_Y_MAX 1200 static const unsigned short xlate_high[XLATE_SIZE] = { [KEY_SYM_ENTER & 0xff] = KEY_ENTER, [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH, [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK, [KEY_SYM_KPMINUS & 0xff] = KEY_KPMINUS, [KEY_SYM_KPDOT & 0xff] = KEY_KPDOT, [KEY_SYM_KPPLUS & 0xff] = KEY_KPPLUS, [KEY_SYM_KP0 & 0xff] = KEY_KP0, [KEY_SYM_KP1 & 0xff] = KEY_KP1, [KEY_SYM_KP2 & 0xff] = KEY_KP2, [KEY_SYM_KPDOWN & 0xff] = KEY_KP2, [KEY_SYM_KP3 & 0xff] = KEY_KP3, [KEY_SYM_KP4 & 0xff] = KEY_KP4, [KEY_SYM_KPLEFT & 0xff] = KEY_KP4, [KEY_SYM_KP5 & 0xff] = KEY_KP5, [KEY_SYM_KP6 & 0xff] = KEY_KP6, [KEY_SYM_KPRIGHT & 0xff] = KEY_KP6, [KEY_SYM_KP7 & 0xff] = KEY_KP7, [KEY_SYM_KP8 & 0xff] = KEY_KP8, [KEY_SYM_KPUP & 0xff] = KEY_KP8, [KEY_SYM_KP9 & 0xff] = KEY_KP9, [KEY_SYM_BK_SPC & 0xff] = KEY_BACKSPACE, [KEY_SYM_TAB & 0xff] = KEY_TAB, [KEY_SYM_CTRL & 0xff] = KEY_LEFTCTRL, [KEY_SYM_ALT & 0xff] = KEY_LEFTALT, [KEY_SYM_INSERT & 0xff] = KEY_INSERT, [KEY_SYM_DELETE & 0xff] = KEY_DELETE, [KEY_SYM_SHIFT & 0xff] = KEY_LEFTSHIFT, [KEY_SYM_UARROW & 0xff] = KEY_UP, [KEY_SYM_DARROW & 0xff] = KEY_DOWN, [KEY_SYM_LARROW & 0xff] = KEY_LEFT, [KEY_SYM_RARROW & 0xff] = KEY_RIGHT, [KEY_SYM_ESCAPE & 0xff] = KEY_ESC, [KEY_SYM_PAGEUP & 0xff] = KEY_PAGEUP, [KEY_SYM_PAGEDOWN & 0xff] = KEY_PAGEDOWN, [KEY_SYM_HOME & 0xff] = KEY_HOME, [KEY_SYM_END & 0xff] = KEY_END, [KEY_SYM_F1 & 0xff] = KEY_F1, [KEY_SYM_F2 & 0xff] = KEY_F2, [KEY_SYM_F3 & 0xff] = KEY_F3, [KEY_SYM_F4 & 0xff] = KEY_F4, [KEY_SYM_F5 & 0xff] = KEY_F5, [KEY_SYM_F6 & 0xff] = KEY_F6, [KEY_SYM_F7 & 0xff] = KEY_F7, [KEY_SYM_F8 & 0xff] = KEY_F8, [KEY_SYM_F9 & 0xff] = KEY_F9, [KEY_SYM_F10 & 0xff] = KEY_F10, [KEY_SYM_F11 & 0xff] = KEY_F11, [KEY_SYM_F12 & 0xff] = KEY_F12, [KEY_SYM_CAP_LOCK & 0xff] = KEY_CAPSLOCK, [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK, [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK, }; static const unsigned short xlate[XLATE_SIZE] = { [NO_KEYCODE] = KEY_RESERVED, [KEY_SYM_SPACE] = KEY_SPACE, [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE, [KEY_SYM_ONE] = KEY_1, [KEY_SYM_BANG] = KEY_1, [KEY_SYM_TWO] = KEY_2, [KEY_SYM_AT] = KEY_2, [KEY_SYM_THREE] = KEY_3, [KEY_SYM_POUND] = KEY_3, [KEY_SYM_FOUR] = KEY_4, [KEY_SYM_DOLLAR] = KEY_4, [KEY_SYM_FIVE] = KEY_5, [KEY_SYM_PERCENT] = KEY_5, [KEY_SYM_SIX] = KEY_6, [KEY_SYM_CARAT] = KEY_6, [KEY_SYM_SEVEN] = KEY_7, [KEY_SYM_AMPER] = KEY_7, [KEY_SYM_EIGHT] = KEY_8, [KEY_SYM_STAR] = KEY_8, [KEY_SYM_NINE] = KEY_9, [KEY_SYM_LPAREN] = KEY_9, [KEY_SYM_ZERO] = KEY_0, [KEY_SYM_RPAREN] = KEY_0, [KEY_SYM_MINUS] = KEY_MINUS, [KEY_SYM_USCORE] = KEY_MINUS, [KEY_SYM_EQUAL] = KEY_EQUAL, [KEY_SYM_PLUS] = KEY_EQUAL, [KEY_SYM_LBRKT] = KEY_LEFTBRACE, [KEY_SYM_LCURLY] = KEY_LEFTBRACE, [KEY_SYM_RBRKT] = KEY_RIGHTBRACE, [KEY_SYM_RCURLY] = KEY_RIGHTBRACE, [KEY_SYM_SLASH] = KEY_BACKSLASH, [KEY_SYM_PIPE] = KEY_BACKSLASH, [KEY_SYM_TIC] = KEY_APOSTROPHE, [KEY_SYM_QUOTE] = KEY_APOSTROPHE, [KEY_SYM_SEMIC] = KEY_SEMICOLON, [KEY_SYM_COLON] = KEY_SEMICOLON, [KEY_SYM_COMMA] = KEY_COMMA, [KEY_SYM_LT] = KEY_COMMA, [KEY_SYM_PERIOD] = KEY_DOT, [KEY_SYM_GT] = KEY_DOT, [KEY_SYM_BSLASH] = KEY_SLASH, [KEY_SYM_QMARK] = KEY_SLASH, [KEY_SYM_A] = KEY_A, [KEY_SYM_a] = KEY_A, [KEY_SYM_B] = KEY_B, [KEY_SYM_b] = KEY_B, [KEY_SYM_C] = KEY_C, [KEY_SYM_c] = KEY_C, [KEY_SYM_D] = KEY_D, [KEY_SYM_d] = KEY_D, [KEY_SYM_E] = KEY_E, [KEY_SYM_e] = KEY_E, [KEY_SYM_F] = KEY_F, [KEY_SYM_f] = KEY_F, [KEY_SYM_G] = KEY_G, [KEY_SYM_g] = KEY_G, [KEY_SYM_H] = KEY_H, [KEY_SYM_h] = KEY_H, [KEY_SYM_I] = KEY_I, [KEY_SYM_i] = KEY_I, [KEY_SYM_J] = KEY_J, [KEY_SYM_j] = KEY_J, [KEY_SYM_K] = KEY_K, [KEY_SYM_k] = KEY_K, [KEY_SYM_L] = KEY_L, [KEY_SYM_l] = KEY_L, [KEY_SYM_M] = KEY_M, [KEY_SYM_m] = KEY_M, [KEY_SYM_N] = KEY_N, [KEY_SYM_n] = KEY_N, [KEY_SYM_O] = KEY_O, [KEY_SYM_o] = KEY_O, [KEY_SYM_P] = KEY_P, [KEY_SYM_p] = KEY_P, [KEY_SYM_Q] = KEY_Q, [KEY_SYM_q] = KEY_Q, [KEY_SYM_R] = KEY_R, [KEY_SYM_r] = KEY_R, [KEY_SYM_S] = KEY_S, [KEY_SYM_s] = KEY_S, [KEY_SYM_T] = KEY_T, [KEY_SYM_t] = KEY_T, [KEY_SYM_U] = KEY_U, [KEY_SYM_u] = KEY_U, [KEY_SYM_V] = KEY_V, [KEY_SYM_v] = KEY_V, [KEY_SYM_W] = KEY_W, [KEY_SYM_w] = KEY_W, [KEY_SYM_X] = KEY_X, [KEY_SYM_x] = KEY_X, [KEY_SYM_Y] = KEY_Y, [KEY_SYM_y] = KEY_Y, [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z, }; static void print_input(struct remote_input *input) { if (input->type == INPUT_TYPE_MOUSE) { unsigned char buttons = input->mouse_buttons; dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n", input->data.mouse.x, input->data.mouse.y, (buttons) ? " -- buttons:" : "", (buttons & REMOTE_BUTTON_LEFT) ? "left " : "", (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "", (buttons & REMOTE_BUTTON_RIGHT) ? "right" : "" ); } else { dbg("remote keypress (code, flag, down):" "%d (0x%x) [0x%x] [0x%x]\n", input->data.keyboard.key_code, input->data.keyboard.key_code, input->data.keyboard.key_flag, input->data.keyboard.key_down ); } } static void send_mouse_event(struct input_dev *dev, struct remote_input *input) { unsigned char buttons = input->mouse_buttons; input_report_abs(dev, ABS_X, input->data.mouse.x); input_report_abs(dev, ABS_Y, input->data.mouse.y); input_report_key(dev, BTN_LEFT, buttons & REMOTE_BUTTON_LEFT); input_report_key(dev, BTN_MIDDLE, buttons & REMOTE_BUTTON_MIDDLE); input_report_key(dev, BTN_RIGHT, buttons & REMOTE_BUTTON_RIGHT); input_sync(dev); } static void send_keyboard_event(struct input_dev *dev, struct remote_input *input) { unsigned int key; unsigned short code = input->data.keyboard.key_code; if (code & 0xff00) key = xlate_high[code & 0xff]; else key = xlate[code]; input_report_key(dev, key, input->data.keyboard.key_down); input_sync(dev); } void ibmasm_handle_mouse_interrupt(struct service_processor *sp) { unsigned long reader; unsigned long writer; struct remote_input input; reader = get_queue_reader(sp); writer = get_queue_writer(sp); while (reader != writer) { memcpy_fromio(&input, get_queue_entry(sp, reader), sizeof(struct remote_input)); print_input(&input); if (input.type == INPUT_TYPE_MOUSE) { send_mouse_event(sp->remote.mouse_dev, &input); } else if (input.type == INPUT_TYPE_KEYBOARD) { send_keyboard_event(sp->remote.keybd_dev, &input); } else break; reader = advance_queue_reader(sp, reader); writer = get_queue_writer(sp); } } int ibmasm_init_remote_input_dev(struct service_processor *sp) { /* set up the mouse input device */ struct input_dev *mouse_dev, *keybd_dev; struct pci_dev *pdev = to_pci_dev(sp->dev); int error = -ENOMEM; int i; sp->remote.mouse_dev = mouse_dev = input_allocate_device(); sp->remote.keybd_dev = keybd_dev = input_allocate_device(); if (!mouse_dev || !keybd_dev) goto err_free_devices; mouse_dev->id.bustype = BUS_PCI; mouse_dev->id.vendor = pdev->vendor; mouse_dev->id.product = pdev->device; mouse_dev->id.version = 1; mouse_dev->dev.parent = sp->dev; mouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); mouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); set_bit(BTN_TOUCH, mouse_dev->keybit); mouse_dev->name = "ibmasm RSA I remote mouse"; input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0); input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0); keybd_dev->id.bustype = BUS_PCI; keybd_dev->id.vendor = pdev->vendor; keybd_dev->id.product = pdev->device; keybd_dev->id.version = 2; keybd_dev->dev.parent = sp->dev; keybd_dev->evbit[0] = BIT_MASK(EV_KEY); keybd_dev->name = "ibmasm RSA I remote keyboard"; for (i = 0; i < XLATE_SIZE; i++) { if (xlate_high[i]) set_bit(xlate_high[i], keybd_dev->keybit); if (xlate[i]) set_bit(xlate[i], keybd_dev->keybit); } error = input_register_device(mouse_dev); if (error) goto err_free_devices; error = input_register_device(keybd_dev); if (error) goto err_unregister_mouse_dev; enable_mouse_interrupts(sp); printk(KERN_INFO "ibmasm remote responding to events on RSA card %d\n", sp->number); return 0; err_unregister_mouse_dev: input_unregister_device(mouse_dev); mouse_dev = NULL; /* so we don't try to free it again below */ err_free_devices: input_free_device(mouse_dev); input_free_device(keybd_dev); return error; } void ibmasm_free_remote_input_dev(struct service_processor *sp) { disable_mouse_interrupts(sp); input_unregister_device(sp->remote.mouse_dev); input_unregister_device(sp->remote.keybd_dev); }
gpl-2.0
cosino/openwrt
target/linux/ar71xx/files/arch/mips/ath79/mach-rb2011.c
245
8404
/* * MikroTik RouterBOARD 2011 support * * Copyright (C) 2012 Stijn Tintel <stijn@linux-ipv6.be> * Copyright (C) 2012 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #define pr_fmt(fmt) "rb2011: " fmt #include <linux/phy.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/ath9k_platform.h> #include <linux/ar8216_platform.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/routerboot.h> #include <linux/gpio.h> #include <asm/prom.h> #include <asm/mach-ath79/ath79.h> #include <asm/mach-ath79/ar71xx_regs.h> #include "common.h" #include "dev-eth.h" #include "dev-m25p80.h" #include "dev-nfc.h" #include "dev-usb.h" #include "dev-wmac.h" #include "machtypes.h" #include "routerboot.h" #define RB2011_GPIO_NAND_NCE 14 #define RB2011_GPIO_SFP_LOS 21 #define RB_ROUTERBOOT_OFFSET 0x0000 #define RB_ROUTERBOOT_MIN_SIZE 0xb000 #define RB_HARD_CFG_SIZE 0x1000 #define RB_BIOS_OFFSET 0xd000 #define RB_BIOS_SIZE 0x1000 #define RB_SOFT_CFG_OFFSET 0xf000 #define RB_SOFT_CFG_SIZE 0x1000 #define RB_ART_SIZE 0x10000 #define RB2011_FLAG_SFP BIT(0) #define RB2011_FLAG_USB BIT(1) #define RB2011_FLAG_WLAN BIT(2) static struct mtd_partition rb2011_spi_partitions[] = { { .name = "routerboot", .offset = RB_ROUTERBOOT_OFFSET, .mask_flags = MTD_WRITEABLE, }, { .name = "hard_config", .size = RB_HARD_CFG_SIZE, .mask_flags = MTD_WRITEABLE, }, { .name = "bios", .offset = RB_BIOS_OFFSET, .size = RB_BIOS_SIZE, .mask_flags = MTD_WRITEABLE, }, { .name = "soft_config", .size = RB_SOFT_CFG_SIZE, } }; static void __init rb2011_init_partitions(const struct rb_info *info) { rb2011_spi_partitions[0].size = info->hard_cfg_offs; rb2011_spi_partitions[1].offset = info->hard_cfg_offs; rb2011_spi_partitions[3].offset = info->soft_cfg_offs; } static struct mtd_partition rb2011_nand_partitions[] = { { .name = "booter", .offset = 0, .size = (256 * 1024), .mask_flags = MTD_WRITEABLE, }, { .name = "kernel", .offset = (256 * 1024), .size = (4 * 1024 * 1024) - (256 * 1024), }, { .name = "rootfs", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct flash_platform_data rb2011_spi_flash_data = { .parts = rb2011_spi_partitions, .nr_parts = ARRAY_SIZE(rb2011_spi_partitions), }; static struct ar8327_pad_cfg rb2011_ar8327_pad0_cfg = { .mode = AR8327_PAD_MAC_RGMII, .txclk_delay_en = true, .rxclk_delay_en = true, .txclk_delay_sel = AR8327_CLK_DELAY_SEL3, .rxclk_delay_sel = AR8327_CLK_DELAY_SEL0, }; static struct ar8327_pad_cfg rb2011_ar8327_pad6_cfg; static struct ar8327_sgmii_cfg rb2011_ar8327_sgmii_cfg; static struct ar8327_led_cfg rb2011_ar8327_led_cfg = { .led_ctrl0 = 0xc731c731, .led_ctrl1 = 0x00000000, .led_ctrl2 = 0x00000000, .led_ctrl3 = 0x0030c300, .open_drain = false, }; static const struct ar8327_led_info rb2011_ar8327_leds[] __initconst = { AR8327_LED_INFO(PHY0_0, HW, "rb:green:eth1"), AR8327_LED_INFO(PHY1_0, HW, "rb:green:eth2"), AR8327_LED_INFO(PHY2_0, HW, "rb:green:eth3"), AR8327_LED_INFO(PHY3_0, HW, "rb:green:eth4"), AR8327_LED_INFO(PHY4_0, HW, "rb:green:eth5"), AR8327_LED_INFO(PHY0_1, SW, "rb:green:eth6"), AR8327_LED_INFO(PHY1_1, SW, "rb:green:eth7"), AR8327_LED_INFO(PHY2_1, SW, "rb:green:eth8"), AR8327_LED_INFO(PHY3_1, SW, "rb:green:eth9"), AR8327_LED_INFO(PHY4_1, SW, "rb:green:eth10"), AR8327_LED_INFO(PHY4_2, SW, "rb:green:usr"), }; static struct ar8327_platform_data rb2011_ar8327_data = { .pad0_cfg = &rb2011_ar8327_pad0_cfg, .port0_cfg = { .force_link = 1, .speed = AR8327_PORT_SPEED_1000, .duplex = 1, .txpause = 1, .rxpause = 1, }, .led_cfg = &rb2011_ar8327_led_cfg, .num_leds = ARRAY_SIZE(rb2011_ar8327_leds), .leds = rb2011_ar8327_leds, }; static struct mdio_board_info rb2011_mdio0_info[] = { { .bus_id = "ag71xx-mdio.0", .phy_addr = 0, .platform_data = &rb2011_ar8327_data, }, }; static void __init rb2011_wlan_init(void) { char *art_buf; u8 wlan_mac[ETH_ALEN]; art_buf = rb_get_wlan_data(); if (art_buf == NULL) return; ath79_init_mac(wlan_mac, ath79_mac_base, 11); ath79_register_wmac(art_buf + 0x1000, wlan_mac); kfree(art_buf); } static void rb2011_nand_select_chip(int chip_no) { switch (chip_no) { case 0: gpio_set_value(RB2011_GPIO_NAND_NCE, 0); break; default: gpio_set_value(RB2011_GPIO_NAND_NCE, 1); break; } ndelay(500); } static struct nand_ecclayout rb2011_nand_ecclayout = { .eccbytes = 6, .eccpos = { 8, 9, 10, 13, 14, 15 }, .oobavail = 9, .oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } } }; static int rb2011_nand_scan_fixup(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; if (mtd->writesize == 512) { /* * Use the OLD Yaffs-1 OOB layout, otherwise RouterBoot * will not be able to find the kernel that we load. */ chip->ecc.layout = &rb2011_nand_ecclayout; } return 0; } static void __init rb2011_nand_init(void) { gpio_request_one(RB2011_GPIO_NAND_NCE, GPIOF_OUT_INIT_HIGH, "NAND nCE"); ath79_nfc_set_scan_fixup(rb2011_nand_scan_fixup); ath79_nfc_set_parts(rb2011_nand_partitions, ARRAY_SIZE(rb2011_nand_partitions)); ath79_nfc_set_select_chip(rb2011_nand_select_chip); ath79_nfc_set_swap_dma(true); ath79_register_nfc(); } static int rb2011_get_port_link(unsigned port) { if (port != 6) return -EINVAL; /* The Loss of signal line is active low */ return !gpio_get_value(RB2011_GPIO_SFP_LOS); } static void __init rb2011_sfp_init(void) { gpio_request_one(RB2011_GPIO_SFP_LOS, GPIOF_IN, "SFP LOS"); rb2011_ar8327_pad6_cfg.mode = AR8327_PAD_MAC_SGMII; rb2011_ar8327_data.pad6_cfg = &rb2011_ar8327_pad6_cfg; rb2011_ar8327_sgmii_cfg.sgmii_ctrl = 0xc70167d0; rb2011_ar8327_sgmii_cfg.serdes_aen = true; rb2011_ar8327_data.sgmii_cfg = &rb2011_ar8327_sgmii_cfg; rb2011_ar8327_data.port6_cfg.force_link = 1; rb2011_ar8327_data.port6_cfg.speed = AR8327_PORT_SPEED_1000; rb2011_ar8327_data.port6_cfg.duplex = 1; rb2011_ar8327_data.get_port_link = rb2011_get_port_link; } static int __init rb2011_setup(u32 flags) { const struct rb_info *info; char buf[64]; info = rb_init_info((void *) KSEG1ADDR(0x1f000000), 0x10000); if (!info) return -ENODEV; scnprintf(buf, sizeof(buf), "Mikrotik RouterBOARD %s", (info->board_name) ? info->board_name : ""); mips_set_machine_name(buf); rb2011_init_partitions(info); ath79_register_m25p80(&rb2011_spi_flash_data); rb2011_nand_init(); ath79_setup_ar934x_eth_cfg(AR934X_ETH_CFG_RGMII_GMAC0 | AR934X_ETH_CFG_SW_ONLY_MODE); ath79_register_mdio(1, 0x0); ath79_register_mdio(0, 0x0); mdiobus_register_board_info(rb2011_mdio0_info, ARRAY_SIZE(rb2011_mdio0_info)); /* GMAC0 is connected to an ar8327 switch */ ath79_init_mac(ath79_eth0_data.mac_addr, ath79_mac_base, 0); ath79_eth0_data.phy_if_mode = PHY_INTERFACE_MODE_RGMII; ath79_eth0_data.phy_mask = BIT(0); ath79_eth0_data.mii_bus_dev = &ath79_mdio0_device.dev; ath79_eth0_pll_data.pll_1000 = 0x06000000; ath79_register_eth(0); /* GMAC1 is connected to the internal switch */ ath79_init_mac(ath79_eth1_data.mac_addr, ath79_mac_base, 5); ath79_eth1_data.phy_if_mode = PHY_INTERFACE_MODE_GMII; ath79_eth1_data.speed = SPEED_1000; ath79_eth1_data.duplex = DUPLEX_FULL; ath79_register_eth(1); if (flags & RB2011_FLAG_SFP) rb2011_sfp_init(); if (flags & RB2011_FLAG_WLAN) rb2011_wlan_init(); if (flags & RB2011_FLAG_USB) ath79_register_usb(); return 0; } static void __init rb2011l_setup(void) { rb2011_setup(0); } MIPS_MACHINE_NONAME(ATH79_MACH_RB_2011L, "2011L", rb2011l_setup); static void __init rb2011us_setup(void) { rb2011_setup(RB2011_FLAG_SFP | RB2011_FLAG_USB); } MIPS_MACHINE_NONAME(ATH79_MACH_RB_2011US, "2011US", rb2011us_setup); static void __init rb2011r5_setup(void) { rb2011_setup(RB2011_FLAG_SFP | RB2011_FLAG_USB | RB2011_FLAG_WLAN); } MIPS_MACHINE_NONAME(ATH79_MACH_RB_2011R5, "2011r5", rb2011r5_setup); static void __init rb2011g_setup(void) { rb2011_setup(RB2011_FLAG_SFP | RB2011_FLAG_USB | RB2011_FLAG_WLAN); } MIPS_MACHINE_NONAME(ATH79_MACH_RB_2011G, "2011G", rb2011g_setup);
gpl-2.0
Motorhead1991/android_kernel_samsung_amazing
arch/arm/mach-msm/msm_bus/msm_bus_dbg.c
245
18644
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/hrtimer.h> #include <mach/msm_bus_board.h> #include <mach/msm_bus.h> #include "msm_bus_core.h" #define MAX_BUFF_SIZE 4096 #define FILL_LIMIT 128 static struct dentry *clients; static struct dentry *dir; static DEFINE_MUTEX(msm_bus_dbg_fablist_lock); struct msm_bus_dbg_state { uint32_t cl; uint8_t enable; uint8_t current_index; } clstate; struct msm_bus_cldata { const struct msm_bus_scale_pdata *pdata; int index; uint32_t clid; int size; struct dentry *file; struct list_head list; char buffer[MAX_BUFF_SIZE]; }; struct msm_bus_fab_list { const char *name; int size; struct dentry *file; struct list_head list; char buffer[MAX_BUFF_SIZE]; }; LIST_HEAD(fabdata_list); LIST_HEAD(cl_list); /** * The following structures and funtions are used for * the test-client which can be created at run-time. */ static struct msm_bus_vectors init_vectors[1]; static struct msm_bus_vectors current_vectors[1]; static struct msm_bus_vectors requested_vectors[1]; static struct msm_bus_paths shell_client_usecases[] = { { .num_paths = ARRAY_SIZE(init_vectors), .vectors = init_vectors, }, { .num_paths = ARRAY_SIZE(current_vectors), .vectors = current_vectors, }, { .num_paths = ARRAY_SIZE(requested_vectors), .vectors = requested_vectors, }, }; static struct msm_bus_scale_pdata shell_client = { .usecase = shell_client_usecases, .num_usecases = ARRAY_SIZE(shell_client_usecases), .name = "test-client", }; static void msm_bus_dbg_init_vectors(void) { init_vectors[0].src = -1; init_vectors[0].dst = -1; init_vectors[0].ab = 0; init_vectors[0].ib = 0; current_vectors[0].src = -1; current_vectors[0].dst = -1; current_vectors[0].ab = 0; current_vectors[0].ib = 0; requested_vectors[0].src = -1; requested_vectors[0].dst = -1; requested_vectors[0].ab = 0; requested_vectors[0].ib = 0; clstate.enable = 0; clstate.current_index = 0; } static int msm_bus_dbg_update_cl_request(uint32_t cl) { int ret = 0; if (clstate.current_index < 2) clstate.current_index = 2; else { clstate.current_index = 1; current_vectors[0].ab = requested_vectors[0].ab; current_vectors[0].ib = requested_vectors[0].ib; } if (clstate.enable) { MSM_BUS_DBG("Updating request for shell client, index: %d\n", clstate.current_index); ret = msm_bus_scale_client_update_request(clstate.cl, clstate.current_index); } else MSM_BUS_DBG("Enable bit not set. Skipping update request\n"); return ret; } static void msm_bus_dbg_unregister_client(uint32_t cl) { MSM_BUS_DBG("Unregistering shell client\n"); msm_bus_scale_unregister_client(clstate.cl); clstate.cl = 0; } static uint32_t msm_bus_dbg_register_client(void) { int ret = 0; if (init_vectors[0].src != requested_vectors[0].src) { MSM_BUS_DBG("Shell client master changed. Unregistering\n"); msm_bus_dbg_unregister_client(clstate.cl); } if (init_vectors[0].dst != requested_vectors[0].dst) { MSM_BUS_DBG("Shell client slave changed. Unregistering\n"); msm_bus_dbg_unregister_client(clstate.cl); } if (!clstate.enable) { MSM_BUS_DBG("Enable bit not set, skipping registration: cl " "%d\n", clstate.cl); return 0; } if (clstate.cl) { MSM_BUS_DBG("Client registered, skipping registration\n"); return 0; } current_vectors[0].src = init_vectors[0].src; requested_vectors[0].src = init_vectors[0].src; current_vectors[0].dst = init_vectors[0].dst; requested_vectors[0].dst = init_vectors[0].dst; MSM_BUS_DBG("Registering shell client\n"); ret = msm_bus_scale_register_client(&shell_client); return ret; } static int msm_bus_dbg_mas_get(void *data, u64 *val) { *val = init_vectors[0].src; MSM_BUS_DBG("Get master: %llu\n", *val); return 0; } static int msm_bus_dbg_mas_set(void *data, u64 val) { init_vectors[0].src = val; MSM_BUS_DBG("Set master: %llu\n", val); clstate.cl = msm_bus_dbg_register_client(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get, msm_bus_dbg_mas_set, "%llu\n"); static int msm_bus_dbg_slv_get(void *data, u64 *val) { *val = init_vectors[0].dst; MSM_BUS_DBG("Get slave: %llu\n", *val); return 0; } static int msm_bus_dbg_slv_set(void *data, u64 val) { init_vectors[0].dst = val; MSM_BUS_DBG("Set slave: %llu\n", val); clstate.cl = msm_bus_dbg_register_client(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get, msm_bus_dbg_slv_set, "%llu\n"); static int msm_bus_dbg_ab_get(void *data, u64 *val) { *val = requested_vectors[0].ab; MSM_BUS_DBG("Get ab: %llu\n", *val); return 0; } static int msm_bus_dbg_ab_set(void *data, u64 val) { requested_vectors[0].ab = val; MSM_BUS_DBG("Set ab: %llu\n", val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get, msm_bus_dbg_ab_set, "%llu\n"); static int msm_bus_dbg_ib_get(void *data, u64 *val) { *val = requested_vectors[0].ib; MSM_BUS_DBG("Get ib: %llu\n", *val); return 0; } static int msm_bus_dbg_ib_set(void *data, u64 val) { requested_vectors[0].ib = val; MSM_BUS_DBG("Set ib: %llu\n", val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get, msm_bus_dbg_ib_set, "%llu\n"); static int msm_bus_dbg_en_get(void *data, u64 *val) { *val = clstate.enable; MSM_BUS_DBG("Get enable: %llu\n", *val); return 0; } static int msm_bus_dbg_en_set(void *data, u64 val) { int ret = 0; clstate.enable = val; if (clstate.enable) { if (!clstate.cl) { MSM_BUS_DBG("client: %u\n", clstate.cl); clstate.cl = msm_bus_dbg_register_client(); if (clstate.cl) ret = msm_bus_dbg_update_cl_request(clstate.cl); } else { MSM_BUS_DBG("update request for cl: %u\n", clstate.cl); ret = msm_bus_dbg_update_cl_request(clstate.cl); } } MSM_BUS_DBG("Set enable: %llu\n", val); return ret; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get, msm_bus_dbg_en_set, "%llu\n"); /** * The following funtions are used for viewing the client data * and changing the client request at run-time */ static ssize_t client_data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int bsize = 0; uint32_t cl = (uint32_t)file->private_data; struct msm_bus_cldata *cldata = NULL; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == cl) break; } bsize = cldata->size; return simple_read_from_buffer(buf, count, ppos, cldata->buffer, bsize); } static int client_data_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations client_data_fops = { .open = client_data_open, .read = client_data_read, }; struct dentry *msm_bus_dbg_create(const char *name, mode_t mode, struct dentry *dent, uint32_t clid) { if (dent == NULL) { MSM_BUS_DBG("debugfs not ready yet\n"); return NULL; } return debugfs_create_file(name, mode, dent, (void *)clid, &client_data_fops); } static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata, int index, uint32_t clid, struct dentry *file) { struct msm_bus_cldata *cldata; cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL); if (!cldata) { MSM_BUS_DBG("Failed to allocate memory for client data\n"); return -ENOMEM; } cldata->pdata = pdata; cldata->index = index; cldata->clid = clid; cldata->file = file; cldata->size = 0; list_add_tail(&cldata->list, &cl_list); return 0; } static void msm_bus_dbg_free_client(uint32_t clid) { struct msm_bus_cldata *cldata = NULL; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == clid) { debugfs_remove(cldata->file); list_del(&cldata->list); kfree(cldata); break; } } } static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata, int index, uint32_t clid) { int i = 0, j; char *buf = NULL; struct msm_bus_cldata *cldata = NULL; struct timespec ts; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == clid) break; } if (cldata->file == NULL) { if (pdata->name == NULL) { MSM_BUS_DBG("Client doesn't have a name\n"); return -EINVAL; } cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO, clients, clid); } if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT)) i = cldata->size; else { i = 0; cldata->size = 0; } buf = cldata->buffer; ts = ktime_to_timespec(ktime_get()); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", (int)ts.tv_sec, (int)ts.tv_nsec); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr : %d\n", index); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->usecase[index].vectors[j].src); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->usecase[index].vectors[j].dst); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%u ", pdata->usecase[index].vectors[j].ab); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%u ", pdata->usecase[index].vectors[j].ib); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); cldata->size = i; return i; } static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index) { int ret = 0; if ((index < 0) || (index > cldata->pdata->num_usecases)) { MSM_BUS_DBG("Invalid index!\n"); return -EINVAL; } ret = msm_bus_scale_client_update_request(cldata->clid, index); return ret; } static ssize_t msm_bus_dbg_update_request_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct msm_bus_cldata *cldata; unsigned long index = 0; int ret = 0; char *chid; char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL); if (!buf || IS_ERR(buf)) { MSM_BUS_ERR("Memory allocation for buffer failed\n"); return -ENOMEM; } if (cnt == 0) return 0; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = '\0'; chid = buf; MSM_BUS_DBG("buffer: %s\n size: %d\n", buf, sizeof(ubuf)); list_for_each_entry(cldata, &cl_list, list) { if (strstr(chid, cldata->pdata->name)) { cldata = cldata; strsep(&chid, " "); if (chid) { ret = strict_strtoul(chid, 10, &index); if (ret) { MSM_BUS_DBG("Index conversion" " failed\n"); return -EFAULT; } } else MSM_BUS_DBG("Error parsing input. Index not" " found\n"); break; } } msm_bus_dbg_update_request(cldata, index); kfree(buf); return cnt; } /** * The following funtions are used for viewing the commit data * for each fabric */ static ssize_t fabric_data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct msm_bus_fab_list *fablist = NULL; int bsize = 0; ssize_t ret; const char *name = file->private_data; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, name) == 0) break; } bsize = fablist->size; ret = simple_read_from_buffer(buf, count, ppos, fablist->buffer, bsize); mutex_unlock(&msm_bus_dbg_fablist_lock); return ret; } static const struct file_operations fabric_data_fops = { .open = client_data_open, .read = fabric_data_read, }; static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file) { struct msm_bus_fab_list *fablist; int ret = 0; mutex_lock(&msm_bus_dbg_fablist_lock); fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL); if (!fablist) { MSM_BUS_DBG("Failed to allocate memory for commit data\n"); ret = -ENOMEM; goto err; } fablist->name = fabname; fablist->size = 0; list_add_tail(&fablist->list, &fabdata_list); err: mutex_unlock(&msm_bus_dbg_fablist_lock); return ret; } static void msm_bus_dbg_free_fabric(const char *fabname) { struct msm_bus_fab_list *fablist = NULL; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, fabname) == 0) { debugfs_remove(fablist->file); list_del(&fablist->list); kfree(fablist); break; } } mutex_unlock(&msm_bus_dbg_fablist_lock); } static int msm_bus_dbg_fill_fab_buffer(const char *fabname, void *cdata, int nmasters, int nslaves, int ntslaves) { int i; char *buf = NULL; struct msm_bus_fab_list *fablist = NULL; struct timespec ts; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, fabname) == 0) break; } if (fablist->file == NULL) { MSM_BUS_DBG("Fabric dbg entry does not exist\n"); mutex_unlock(&msm_bus_dbg_fablist_lock); return -EFAULT; } if (fablist->size < MAX_BUFF_SIZE - 256) i = fablist->size; else { i = 0; fablist->size = 0; } buf = fablist->buffer; mutex_unlock(&msm_bus_dbg_fablist_lock); ts = ktime_to_timespec(ktime_get()); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", (int)ts.tv_sec, (int)ts.tv_nsec); msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata, nmasters, nslaves, ntslaves); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); mutex_lock(&msm_bus_dbg_fablist_lock); fablist->size = i; mutex_unlock(&msm_bus_dbg_fablist_lock); return 0; } static const struct file_operations msm_bus_dbg_update_request_fops = { .open = client_data_open, .write = msm_bus_dbg_update_request_write, }; /** * msm_bus_dbg_client_data() - Add debug data for clients * @pdata: Platform data of the client * @index: The current index or operation to be performed * @clid: Client handle obtained during registration */ void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, uint32_t clid) { struct dentry *file = NULL; if (index == MSM_BUS_DBG_REGISTER) { msm_bus_dbg_record_client(pdata, index, clid, file); if (!pdata->name) { MSM_BUS_DBG("Cannot create debugfs entry. Null name\n"); return; } } else if (index == MSM_BUS_DBG_UNREGISTER) { msm_bus_dbg_free_client(clid); MSM_BUS_DBG("Client %d unregistered\n", clid); } else msm_bus_dbg_fill_cl_buffer(pdata, index, clid); } EXPORT_SYMBOL(msm_bus_dbg_client_data); /** * msm_bus_dbg_commit_data() - Add commit data from fabrics * @fabname: Fabric name specified in platform data * @cdata: Commit Data * @nmasters: Number of masters attached to fabric * @nslaves: Number of slaves attached to fabric * @ntslaves: Number of tiered slaves attached to fabric * @op: Operation to be performed */ void msm_bus_dbg_commit_data(const char *fabname, void *cdata, int nmasters, int nslaves, int ntslaves, int op) { struct dentry *file = NULL; if (op == MSM_BUS_DBG_REGISTER) msm_bus_dbg_record_fabric(fabname, file); else if (op == MSM_BUS_DBG_UNREGISTER) msm_bus_dbg_free_fabric(fabname); else msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters, nslaves, ntslaves); } EXPORT_SYMBOL(msm_bus_dbg_commit_data); static int __init msm_bus_debugfs_init(void) { struct dentry *commit, *shell_client; struct msm_bus_fab_list *fablist; struct msm_bus_cldata *cldata = NULL; uint64_t val = 0; dir = debugfs_create_dir("msm-bus-dbg", NULL); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create msm-bus-dbg\n"); goto err; } clients = debugfs_create_dir("client-data", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create clients\n"); goto err; } shell_client = debugfs_create_dir("shell-client", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create clients\n"); goto err; } commit = debugfs_create_dir("commit-data", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create commit\n"); goto err; } if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_en_fops) == NULL) goto err; if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_ib_fops) == NULL) goto err; if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_ab_fops) == NULL) goto err; if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_slv_fops) == NULL) goto err; if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_mas_fops) == NULL) goto err; if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR, clients, NULL, &msm_bus_dbg_update_request_fops) == NULL) goto err; list_for_each_entry(cldata, &cl_list, list) { if (cldata->pdata->name == NULL) { MSM_BUS_DBG("Client name not found\n"); continue; } cldata->file = msm_bus_dbg_create(cldata-> pdata->name, S_IRUGO, clients, cldata->clid); } mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { fablist->file = debugfs_create_file(fablist->name, S_IRUGO, commit, (void *)fablist->name, &fabric_data_fops); if (fablist->file == NULL) { MSM_BUS_DBG("Cannot create files for commit data\n"); goto err; } } mutex_unlock(&msm_bus_dbg_fablist_lock); msm_bus_dbg_init_vectors(); return 0; err: debugfs_remove_recursive(dir); return -ENODEV; } late_initcall(msm_bus_debugfs_init); static void __exit msm_bus_dbg_teardown(void) { struct msm_bus_fab_list *fablist = NULL, *fablist_temp; struct msm_bus_cldata *cldata = NULL, *cldata_temp; debugfs_remove_recursive(dir); list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) { list_del(&cldata->list); kfree(cldata); } mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) { list_del(&fablist->list); kfree(fablist); } mutex_unlock(&msm_bus_dbg_fablist_lock); } module_exit(msm_bus_dbg_teardown); MODULE_DESCRIPTION("Debugfs for msm bus scaling client"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Gagan Mac <gmac@codeaurora.org>");
gpl-2.0
drod2169/drodspeed-bfs
drivers/media/video/davinci/dm644x_ccdc.c
501
25002
/* * Copyright (C) 2006-2009 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * CCDC hardware module for DM6446 * ------------------------------ * * This module is for configuring CCD controller of DM6446 VPFE to capture * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules * such as Defect Pixel Correction, Color Space Conversion etc to * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This * module also allows application to configure individual * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header * files. The setparams() API is called by vpfe_capture driver * to configure module parameters. This file is named DM644x so that other * variants such DM6443 may be supported using the same module. * * TODO: Test Raw bayer parameter settings and bayer capture * Split module parameter structure to module specific ioctl structs * investigate if enum used for user space type definition * to be replaced by #defines or integer */ #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/videodev2.h> #include <media/davinci/dm644x_ccdc.h> #include <media/davinci/vpss.h> #include "dm644x_ccdc_regs.h" #include "ccdc_hw_device.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CCDC Driver for DM6446"); MODULE_AUTHOR("Texas Instruments"); static struct device *dev; /* Object for CCDC raw mode */ static struct ccdc_params_raw ccdc_hw_params_raw = { .pix_fmt = CCDC_PIXFMT_RAW, .frm_fmt = CCDC_FRMFMT_PROGRESSIVE, .win = CCDC_WIN_VGA, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .config_params = { .data_sz = CCDC_DATA_10BITS, }, }; /* Object for CCDC ycbcr mode */ static struct ccdc_params_ycbcr ccdc_hw_params_ycbcr = { .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT, .frm_fmt = CCDC_FRMFMT_INTERLACED, .win = CCDC_WIN_PAL, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .bt656_enable = 1, .pix_order = CCDC_PIXORDER_CBYCRY, .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED }; #define CCDC_MAX_RAW_YUV_FORMATS 2 /* Raw Bayer formats */ static u32 ccdc_raw_bayer_pix_formats[] = {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16}; /* Raw YUV formats */ static u32 ccdc_raw_yuv_pix_formats[] = {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV}; static void *__iomem ccdc_base_addr; static int ccdc_addr_size; static enum vpfe_hw_if_type ccdc_if_type; /* register access routines */ static inline u32 regr(u32 offset) { return __raw_readl(ccdc_base_addr + offset); } static inline void regw(u32 val, u32 offset) { __raw_writel(val, ccdc_base_addr + offset); } static void ccdc_set_ccdc_base(void *addr, int size) { ccdc_base_addr = addr; ccdc_addr_size = size; } static void ccdc_enable(int flag) { regw(flag, CCDC_PCR); } static void ccdc_enable_vport(int flag) { if (flag) /* enable video port */ regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG); else regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG); } /* * ccdc_setwin() * This function will configure the window size * to be capture in CCDC reg */ void ccdc_setwin(struct v4l2_rect *image_win, enum ccdc_frmfmt frm_fmt, int ppc) { int horz_start, horz_nr_pixels; int vert_start, vert_nr_lines; int val = 0, mid_img = 0; dev_dbg(dev, "\nStarting ccdc_setwin..."); /* * ppc - per pixel count. indicates how many pixels per cell * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. * raw capture this is 1 */ horz_start = image_win->left << (ppc - 1); horz_nr_pixels = (image_win->width << (ppc - 1)) - 1; regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels, CCDC_HORZ_INFO); vert_start = image_win->top; if (frm_fmt == CCDC_FRMFMT_INTERLACED) { vert_nr_lines = (image_win->height >> 1) - 1; vert_start >>= 1; /* Since first line doesn't have any data */ vert_start += 1; /* configure VDINT0 */ val = (vert_start << CCDC_VDINT_VDINT0_SHIFT); regw(val, CCDC_VDINT); } else { /* Since first line doesn't have any data */ vert_start += 1; vert_nr_lines = image_win->height - 1; /* * configure VDINT0 and VDINT1. VDINT1 will be at half * of image height */ mid_img = vert_start + (image_win->height / 2); val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) | (mid_img & CCDC_VDINT_VDINT1_MASK); regw(val, CCDC_VDINT); } regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start, CCDC_VERT_START); regw(vert_nr_lines, CCDC_VERT_LINES); dev_dbg(dev, "\nEnd of ccdc_setwin..."); } static void ccdc_readregs(void) { unsigned int val = 0; val = regr(CCDC_ALAW); dev_notice(dev, "\nReading 0x%x to ALAW...\n", val); val = regr(CCDC_CLAMP); dev_notice(dev, "\nReading 0x%x to CLAMP...\n", val); val = regr(CCDC_DCSUB); dev_notice(dev, "\nReading 0x%x to DCSUB...\n", val); val = regr(CCDC_BLKCMP); dev_notice(dev, "\nReading 0x%x to BLKCMP...\n", val); val = regr(CCDC_FPC_ADDR); dev_notice(dev, "\nReading 0x%x to FPC_ADDR...\n", val); val = regr(CCDC_FPC); dev_notice(dev, "\nReading 0x%x to FPC...\n", val); val = regr(CCDC_FMTCFG); dev_notice(dev, "\nReading 0x%x to FMTCFG...\n", val); val = regr(CCDC_COLPTN); dev_notice(dev, "\nReading 0x%x to COLPTN...\n", val); val = regr(CCDC_FMT_HORZ); dev_notice(dev, "\nReading 0x%x to FMT_HORZ...\n", val); val = regr(CCDC_FMT_VERT); dev_notice(dev, "\nReading 0x%x to FMT_VERT...\n", val); val = regr(CCDC_HSIZE_OFF); dev_notice(dev, "\nReading 0x%x to HSIZE_OFF...\n", val); val = regr(CCDC_SDOFST); dev_notice(dev, "\nReading 0x%x to SDOFST...\n", val); val = regr(CCDC_VP_OUT); dev_notice(dev, "\nReading 0x%x to VP_OUT...\n", val); val = regr(CCDC_SYN_MODE); dev_notice(dev, "\nReading 0x%x to SYN_MODE...\n", val); val = regr(CCDC_HORZ_INFO); dev_notice(dev, "\nReading 0x%x to HORZ_INFO...\n", val); val = regr(CCDC_VERT_START); dev_notice(dev, "\nReading 0x%x to VERT_START...\n", val); val = regr(CCDC_VERT_LINES); dev_notice(dev, "\nReading 0x%x to VERT_LINES...\n", val); } static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) { if (ccdcparam->alaw.enable) { if ((ccdcparam->alaw.gama_wd > CCDC_GAMMA_BITS_09_0) || (ccdcparam->alaw.gama_wd < CCDC_GAMMA_BITS_15_6) || (ccdcparam->alaw.gama_wd < ccdcparam->data_sz)) { dev_dbg(dev, "\nInvalid data line select"); return -1; } } return 0; } static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) { struct ccdc_config_params_raw *config_params = &ccdc_hw_params_raw.config_params; unsigned int *fpc_virtaddr = NULL; unsigned int *fpc_physaddr = NULL; memcpy(config_params, raw_params, sizeof(*raw_params)); /* * allocate memory for fault pixel table and copy the user * values to the table */ if (!config_params->fault_pxl.enable) return 0; fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; fpc_virtaddr = (unsigned int *)phys_to_virt( (unsigned long)fpc_physaddr); /* * Allocate memory for FPC table if current * FPC table buffer is not big enough to * accomodate FPC Number requested */ if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { if (fpc_physaddr != NULL) { free_pages((unsigned long)fpc_physaddr, get_order (config_params->fault_pxl.fp_num * FP_NUM_BYTES)); } /* Allocate memory for FPC table */ fpc_virtaddr = (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA, get_order(raw_params-> fault_pxl.fp_num * FP_NUM_BYTES)); if (fpc_virtaddr == NULL) { dev_dbg(dev, "\nUnable to allocate memory for FPC"); return -EFAULT; } fpc_physaddr = (unsigned int *)virt_to_phys((void *)fpc_virtaddr); } /* Copy number of fault pixels and FPC table */ config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num; if (copy_from_user(fpc_virtaddr, (void __user *)raw_params->fault_pxl.fpc_table_addr, config_params->fault_pxl.fp_num * FP_NUM_BYTES)) { dev_dbg(dev, "\n copy_from_user failed"); return -EFAULT; } config_params->fault_pxl.fpc_table_addr = (unsigned int)fpc_physaddr; return 0; } static int ccdc_close(struct device *dev) { struct ccdc_config_params_raw *config_params = &ccdc_hw_params_raw.config_params; unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL; fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; if (fpc_physaddr != NULL) { fpc_virtaddr = (unsigned int *) phys_to_virt((unsigned long)fpc_physaddr); free_pages((unsigned long)fpc_virtaddr, get_order(config_params->fault_pxl.fp_num * FP_NUM_BYTES)); } return 0; } /* * ccdc_restore_defaults() * This function will write defaults to all CCDC registers */ static void ccdc_restore_defaults(void) { int i; /* disable CCDC */ ccdc_enable(0); /* set all registers to default value */ for (i = 4; i <= 0x94; i += 4) regw(0, i); regw(CCDC_NO_CULLING, CCDC_CULLING); regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW); } static int ccdc_open(struct device *device) { dev = device; ccdc_restore_defaults(); if (ccdc_if_type == VPFE_RAW_BAYER) ccdc_enable_vport(1); return 0; } static void ccdc_sbl_reset(void) { vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); } /* Parameter operations */ static int ccdc_set_params(void __user *params) { struct ccdc_config_params_raw ccdc_raw_params; int x; if (ccdc_if_type != VPFE_RAW_BAYER) return -EINVAL; x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); if (x) { dev_dbg(dev, "ccdc_set_params: error in copying" "ccdc params, %d\n", x); return -EFAULT; } if (!validate_ccdc_param(&ccdc_raw_params)) { if (!ccdc_update_raw_params(&ccdc_raw_params)) return 0; } return -EINVAL; } /* * ccdc_config_ycbcr() * This function will configure CCDC for YCbCr video capture */ void ccdc_config_ycbcr(void) { struct ccdc_params_ycbcr *params = &ccdc_hw_params_ycbcr; u32 syn_mode; dev_dbg(dev, "\nStarting ccdc_config_ycbcr..."); /* * first restore the CCDC registers to default values * This is important since we assume default values to be set in * a lot of registers that we didn't touch */ ccdc_restore_defaults(); /* * configure pixel format, frame format, configure video frame * format, enable output to SDRAM, enable internal timing generator * and 8bit pack mode */ syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) << CCDC_SYN_MODE_INPMOD_SHIFT) | ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) << CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE | CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE); /* setup BT.656 sync mode */ if (params->bt656_enable) { regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF); /* * configure the FID, VD, HD pin polarity, * fld,hd pol positive, vd negative, 8-bit data */ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE | CCDC_SYN_MODE_8BITS; } else { /* y/c external sync mode */ syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | ((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT)); } regw(syn_mode, CCDC_SYN_MODE); /* configure video window */ ccdc_setwin(&params->win, params->frm_fmt, 2); /* * configure the order of y cb cr in SDRAM, and disable latch * internal register on vsync */ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) | CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); /* * configure the horizontal line offset. This should be a * on 32 byte bondary. So clear LSB 5 bits */ regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF); /* configure the memory line offset */ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) /* two fields are interleaved in memory */ regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST); ccdc_sbl_reset(); dev_dbg(dev, "\nEnd of ccdc_config_ycbcr...\n"); ccdc_readregs(); } static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp) { u32 val; if (!bclamp->enable) { /* configure DCSub */ val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK; regw(val, CCDC_DCSUB); dev_dbg(dev, "\nWriting 0x%x to DCSUB...\n", val); regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP); dev_dbg(dev, "\nWriting 0x0000 to CLAMP...\n"); return; } /* * Configure gain, Start pixel, No of line to be avg, * No of pixel/line to be avg, & Enable the Black clamping */ val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) | ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) << CCDC_BLK_ST_PXL_SHIFT) | ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) << CCDC_BLK_SAMPLE_LINE_SHIFT) | ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) << CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE); regw(val, CCDC_CLAMP); dev_dbg(dev, "\nWriting 0x%x to CLAMP...\n", val); /* If Black clamping is enable then make dcsub 0 */ regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB); dev_dbg(dev, "\nWriting 0x00000000 to DCSUB...\n"); } static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) { u32 val; val = ((bcomp->b & CCDC_BLK_COMP_MASK) | ((bcomp->gb & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_GB_COMP_SHIFT) | ((bcomp->gr & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_GR_COMP_SHIFT) | ((bcomp->r & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_R_COMP_SHIFT)); regw(val, CCDC_BLKCMP); } static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc) { u32 val; /* Initially disable FPC */ val = CCDC_FPC_DISABLE; regw(val, CCDC_FPC); if (!fpc->enable) return; /* Configure Fault pixel if needed */ regw(fpc->fpc_table_addr, CCDC_FPC_ADDR); dev_dbg(dev, "\nWriting 0x%x to FPC_ADDR...\n", (fpc->fpc_table_addr)); /* Write the FPC params with FPC disable */ val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK; regw(val, CCDC_FPC); dev_dbg(dev, "\nWriting 0x%x to FPC...\n", val); /* read the FPC register */ val = regr(CCDC_FPC) | CCDC_FPC_ENABLE; regw(val, CCDC_FPC); dev_dbg(dev, "\nWriting 0x%x to FPC...\n", val); } /* * ccdc_config_raw() * This function will configure CCDC for Raw capture mode */ void ccdc_config_raw(void) { struct ccdc_params_raw *params = &ccdc_hw_params_raw; struct ccdc_config_params_raw *config_params = &ccdc_hw_params_raw.config_params; unsigned int syn_mode = 0; unsigned int val; dev_dbg(dev, "\nStarting ccdc_config_raw..."); /* Reset CCDC */ ccdc_restore_defaults(); /* Disable latching function registers on VSYNC */ regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); /* * Configure the vertical sync polarity(SYN_MODE.VDPOL), * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity * (SYN_MODE.FLDPOL), frame format(progressive or interlace), * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output * SDRAM, enable internal timing generator */ syn_mode = (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) | ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) | ((config_params->data_sz & CCDC_DATA_SZ_MASK) << CCDC_DATA_SZ_SHIFT) | ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) | CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE); /* Enable and configure aLaw register if needed */ if (config_params->alaw.enable) { val = ((config_params->alaw.gama_wd & CCDC_ALAW_GAMA_WD_MASK) | CCDC_ALAW_ENABLE); regw(val, CCDC_ALAW); dev_dbg(dev, "\nWriting 0x%x to ALAW...\n", val); } /* Configure video window */ ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW); /* Configure Black Clamp */ ccdc_config_black_clamp(&config_params->blk_clamp); /* Configure Black level compensation */ ccdc_config_black_compense(&config_params->blk_comp); /* Configure Fault Pixel Correction */ ccdc_config_fpc(&config_params->fault_pxl); /* If data size is 8 bit then pack the data */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) syn_mode |= CCDC_DATA_PACK_ENABLE; #ifdef CONFIG_DM644X_VIDEO_PORT_ENABLE /* enable video port */ val = CCDC_ENABLE_VIDEO_PORT; #else /* disable video port */ val = CCDC_DISABLE_VIDEO_PORT; #endif if (config_params->data_sz == CCDC_DATA_8BITS) val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK) << CCDC_FMTCFG_VPIN_SHIFT; else val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK) << CCDC_FMTCFG_VPIN_SHIFT; /* Write value in FMTCFG */ regw(val, CCDC_FMTCFG); dev_dbg(dev, "\nWriting 0x%x to FMTCFG...\n", val); /* Configure the color pattern according to mt9t001 sensor */ regw(CCDC_COLPTN_VAL, CCDC_COLPTN); dev_dbg(dev, "\nWriting 0xBB11BB11 to COLPTN...\n"); /* * Configure Data formatter(Video port) pixel selection * (FMT_HORZ, FMT_VERT) */ val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) << CCDC_FMT_HORZ_FMTSPH_SHIFT) | (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK); regw(val, CCDC_FMT_HORZ); dev_dbg(dev, "\nWriting 0x%x to FMT_HORZ...\n", val); val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK) << CCDC_FMT_VERT_FMTSLV_SHIFT; if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK; else val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK; dev_dbg(dev, "\nparams->win.height 0x%x ...\n", params->win.height); regw(val, CCDC_FMT_VERT); dev_dbg(dev, "\nWriting 0x%x to FMT_VERT...\n", val); dev_dbg(dev, "\nbelow regw(val, FMT_VERT)..."); /* * Configure Horizontal offset register. If pack 8 is enabled then * 1 pixel will take 1 byte */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF); else /* else one pixel will take 2 byte */ regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) + CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF); /* Set value for SDOFST */ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { if (params->image_invert_enable) { /* For intelace inverse mode */ regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(dev, "\nWriting 0x4B6D to SDOFST...\n"); } else { /* For intelace non inverse mode */ regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(dev, "\nWriting 0x0249 to SDOFST...\n"); } } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(dev, "\nWriting 0x0000 to SDOFST...\n"); } /* * Configure video port pixel selection (VPOUT) * Here -1 is to make the height value less than FMT_VERT.FMTLNV */ if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK)) << CCDC_VP_OUT_VERT_NUM_SHIFT; else val = ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) - 1) & CCDC_VP_OUT_VERT_NUM_MASK)) << CCDC_VP_OUT_VERT_NUM_SHIFT; val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK) << CCDC_VP_OUT_HORZ_NUM_SHIFT; val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK; regw(val, CCDC_VP_OUT); dev_dbg(dev, "\nWriting 0x%x to VP_OUT...\n", val); regw(syn_mode, CCDC_SYN_MODE); dev_dbg(dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode); ccdc_sbl_reset(); dev_dbg(dev, "\nend of ccdc_config_raw..."); ccdc_readregs(); } static int ccdc_configure(void) { if (ccdc_if_type == VPFE_RAW_BAYER) ccdc_config_raw(); else ccdc_config_ycbcr(); return 0; } static int ccdc_set_buftype(enum ccdc_buftype buf_type) { if (ccdc_if_type == VPFE_RAW_BAYER) ccdc_hw_params_raw.buf_type = buf_type; else ccdc_hw_params_ycbcr.buf_type = buf_type; return 0; } static enum ccdc_buftype ccdc_get_buftype(void) { if (ccdc_if_type == VPFE_RAW_BAYER) return ccdc_hw_params_raw.buf_type; return ccdc_hw_params_ycbcr.buf_type; } static int ccdc_enum_pix(u32 *pix, int i) { int ret = -EINVAL; if (ccdc_if_type == VPFE_RAW_BAYER) { if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) { *pix = ccdc_raw_bayer_pix_formats[i]; ret = 0; } } else { if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) { *pix = ccdc_raw_yuv_pix_formats[i]; ret = 0; } } return ret; } static int ccdc_set_pixel_format(u32 pixfmt) { if (ccdc_if_type == VPFE_RAW_BAYER) { ccdc_hw_params_raw.pix_fmt = CCDC_PIXFMT_RAW; if (pixfmt == V4L2_PIX_FMT_SBGGR8) ccdc_hw_params_raw.config_params.alaw.enable = 1; else if (pixfmt != V4L2_PIX_FMT_SBGGR16) return -EINVAL; } else { if (pixfmt == V4L2_PIX_FMT_YUYV) ccdc_hw_params_ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; else if (pixfmt == V4L2_PIX_FMT_UYVY) ccdc_hw_params_ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; else return -EINVAL; } return 0; } static u32 ccdc_get_pixel_format(void) { struct ccdc_a_law *alaw = &ccdc_hw_params_raw.config_params.alaw; u32 pixfmt; if (ccdc_if_type == VPFE_RAW_BAYER) if (alaw->enable) pixfmt = V4L2_PIX_FMT_SBGGR8; else pixfmt = V4L2_PIX_FMT_SBGGR16; else { if (ccdc_hw_params_ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) pixfmt = V4L2_PIX_FMT_YUYV; else pixfmt = V4L2_PIX_FMT_UYVY; } return pixfmt; } static int ccdc_set_image_window(struct v4l2_rect *win) { if (ccdc_if_type == VPFE_RAW_BAYER) ccdc_hw_params_raw.win = *win; else ccdc_hw_params_ycbcr.win = *win; return 0; } static void ccdc_get_image_window(struct v4l2_rect *win) { if (ccdc_if_type == VPFE_RAW_BAYER) *win = ccdc_hw_params_raw.win; else *win = ccdc_hw_params_ycbcr.win; } static unsigned int ccdc_get_line_length(void) { struct ccdc_config_params_raw *config_params = &ccdc_hw_params_raw.config_params; unsigned int len; if (ccdc_if_type == VPFE_RAW_BAYER) { if ((config_params->alaw.enable) || (config_params->data_sz == CCDC_DATA_8BITS)) len = ccdc_hw_params_raw.win.width; else len = ccdc_hw_params_raw.win.width * 2; } else len = ccdc_hw_params_ycbcr.win.width * 2; return ALIGN(len, 32); } static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt) { if (ccdc_if_type == VPFE_RAW_BAYER) ccdc_hw_params_raw.frm_fmt = frm_fmt; else ccdc_hw_params_ycbcr.frm_fmt = frm_fmt; return 0; } static enum ccdc_frmfmt ccdc_get_frame_format(void) { if (ccdc_if_type == VPFE_RAW_BAYER) return ccdc_hw_params_raw.frm_fmt; else return ccdc_hw_params_ycbcr.frm_fmt; } static int ccdc_getfid(void) { return (regr(CCDC_SYN_MODE) >> 15) & 1; } /* misc operations */ static inline void ccdc_setfbaddr(unsigned long addr) { regw(addr & 0xffffffe0, CCDC_SDR_ADDR); } static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params) { ccdc_if_type = params->if_type; switch (params->if_type) { case VPFE_BT656: case VPFE_YCBCR_SYNC_16: case VPFE_YCBCR_SYNC_8: ccdc_hw_params_ycbcr.vd_pol = params->vdpol; ccdc_hw_params_ycbcr.hd_pol = params->hdpol; break; default: /* TODO add support for raw bayer here */ return -EINVAL; } return 0; } static struct ccdc_hw_device ccdc_hw_dev = { .name = "DM6446 CCDC", .owner = THIS_MODULE, .hw_ops = { .open = ccdc_open, .close = ccdc_close, .set_ccdc_base = ccdc_set_ccdc_base, .reset = ccdc_sbl_reset, .enable = ccdc_enable, .set_hw_if_params = ccdc_set_hw_if_params, .set_params = ccdc_set_params, .configure = ccdc_configure, .set_buftype = ccdc_set_buftype, .get_buftype = ccdc_get_buftype, .enum_pix = ccdc_enum_pix, .set_pixel_format = ccdc_set_pixel_format, .get_pixel_format = ccdc_get_pixel_format, .set_frame_format = ccdc_set_frame_format, .get_frame_format = ccdc_get_frame_format, .set_image_window = ccdc_set_image_window, .get_image_window = ccdc_get_image_window, .get_line_length = ccdc_get_line_length, .setfbaddr = ccdc_setfbaddr, .getfid = ccdc_getfid, }, }; static int dm644x_ccdc_init(void) { printk(KERN_NOTICE "dm644x_ccdc_init\n"); if (vpfe_register_ccdc_device(&ccdc_hw_dev) < 0) return -1; printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name); return 0; } static void dm644x_ccdc_exit(void) { vpfe_unregister_ccdc_device(&ccdc_hw_dev); } module_init(dm644x_ccdc_init); module_exit(dm644x_ccdc_exit);
gpl-2.0
ownhere/samsung-kernel-sgs2-ownhere
drivers/acpi/acpica/evxfevnt.c
757
27820
/****************************************************************************** * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * *****************************************************************************/ /* * Copyright (C) 2000 - 2010, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #include "actables.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxfevnt") /* Local prototypes */ static acpi_status acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context); /******************************************************************************* * * FUNCTION: acpi_enable * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Transfers the system into ACPI mode. * ******************************************************************************/ acpi_status acpi_enable(void) { acpi_status status; int retry; ACPI_FUNCTION_TRACE(acpi_enable); /* ACPI tables must be present */ if (!acpi_tb_tables_loaded()) { return_ACPI_STATUS(AE_NO_ACPI_TABLES); } /* Check current mode */ if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in ACPI mode\n")); return_ACPI_STATUS(AE_OK); } /* Transition to ACPI mode */ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not transition to ACPI mode")); return_ACPI_STATUS(status); } /* Sanity check that transition succeeded */ for (retry = 0; retry < 30000; ++retry) { if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { if (retry != 0) ACPI_WARNING((AE_INFO, "Platform took > %d00 usec to enter ACPI mode", retry)); return_ACPI_STATUS(AE_OK); } acpi_os_stall(100); /* 100 usec */ } ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode")); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } ACPI_EXPORT_SYMBOL(acpi_enable) /******************************************************************************* * * FUNCTION: acpi_disable * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode. * ******************************************************************************/ acpi_status acpi_disable(void) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_disable); if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in legacy (non-ACPI) mode\n")); } else { /* Transition to LEGACY mode */ status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not exit ACPI mode to legacy mode")); return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n")); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable) /******************************************************************************* * * FUNCTION: acpi_enable_event * * PARAMETERS: Event - The fixed eventto be enabled * Flags - Reserved * * RETURN: Status * * DESCRIPTION: Enable an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_enable_event(u32 event, u32 flags) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_enable_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Enable the requested fixed event (by writing a one to the enable * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_ENABLE_EVENT); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Make sure that the hardware responded */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (value != 1) { ACPI_ERROR((AE_INFO, "Could not enable %s event", acpi_ut_get_event_name(event))); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enable_event) /******************************************************************************* * * FUNCTION: acpi_clear_and_enable_gpe * * PARAMETERS: gpe_event_info - GPE to enable * * RETURN: Status * * DESCRIPTION: Clear the given GPE from stale events and enable it. * ******************************************************************************/ static acpi_status acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; /* * We will only allow a GPE to be enabled if it has either an * associated method (_Lxx/_Exx) or a handler. Otherwise, the * GPE will be immediately disabled by acpi_ev_gpe_dispatch the * first time it fires. */ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { return_ACPI_STATUS(AE_NO_HANDLER); } /* Clear the GPE (of stale events) */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_set_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE * * RETURN: Status * * DESCRIPTION: Enable or disable an individual GPE. This function bypasses * the reference count mechanism used in the acpi_enable_gpe and * acpi_disable_gpe interfaces -- and should be used with care. * * Note: Typically used to disable a runtime GPE for short period of time, * then re-enable it, without disturbing the existing reference counts. This * is useful, for example, in the Embedded Controller (EC) driver. * ******************************************************************************/ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) { struct acpi_gpe_event_info *gpe_event_info; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_set_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Perform the action */ switch (action) { case ACPI_GPE_ENABLE: status = acpi_clear_and_enable_gpe(gpe_event_info); break; case ACPI_GPE_DISABLE: status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); break; default: status = AE_BAD_PARAMETER; break; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_set_gpe) /******************************************************************************* * * FUNCTION: acpi_enable_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE * or both * * RETURN: Status * * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is * hardware-enabled (for runtime GPEs), or the GPE register mask * is updated (for wake GPEs). * ******************************************************************************/ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_enable_gpe); /* Parameter validation */ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } if (gpe_type & ACPI_GPE_TYPE_RUNTIME) { if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { status = AE_LIMIT; /* Too many references */ goto unlock_and_exit; } gpe_event_info->runtime_count++; if (gpe_event_info->runtime_count == 1) { status = acpi_ev_update_gpe_enable_masks(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_clear_and_enable_gpe(gpe_event_info); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count--; goto unlock_and_exit; } } } if (gpe_type & ACPI_GPE_TYPE_WAKE) { /* The GPE must have the ability to wake the system */ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { status = AE_TYPE; goto unlock_and_exit; } if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) { status = AE_LIMIT; /* Too many references */ goto unlock_and_exit; } /* * Update the enable mask on the first wakeup reference. Wake GPEs * are only hardware-enabled just before sleeping. */ gpe_event_info->wakeup_count++; if (gpe_event_info->wakeup_count == 1) { status = acpi_ev_update_gpe_enable_masks(gpe_event_info); } } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enable_gpe) /******************************************************************************* * * FUNCTION: acpi_disable_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE * or both * * RETURN: Status * * DESCRIPTION: Remove a reference to a GPE. When the last reference is * removed, only then is the GPE disabled (for runtime GPEs), or * the GPE mask bit disabled (for wake GPEs) * ******************************************************************************/ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_disable_gpe); /* Parameter validation */ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Hardware-disable a runtime GPE on removal of the last reference */ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) { if (!gpe_event_info->runtime_count) { status = AE_LIMIT; /* There are no references to remove */ goto unlock_and_exit; } gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { status = acpi_ev_update_gpe_enable_masks(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count++; goto unlock_and_exit; } } } /* * Update masks for wake GPE on removal of the last reference. * No need to hardware-disable wake GPEs here, they are not currently * enabled. */ if (gpe_type & ACPI_GPE_TYPE_WAKE) { if (!gpe_event_info->wakeup_count) { status = AE_LIMIT; /* There are no references to remove */ goto unlock_and_exit; } gpe_event_info->wakeup_count--; if (!gpe_event_info->wakeup_count) { status = acpi_ev_update_gpe_enable_masks(gpe_event_info); } } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable_gpe) /******************************************************************************* * * FUNCTION: acpi_disable_event * * PARAMETERS: Event - The fixed eventto be enabled * Flags - Reserved * * RETURN: Status * * DESCRIPTION: Disable an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_disable_event(u32 event, u32 flags) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_disable_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Disable the requested fixed event (by writing a zero to the enable * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_DISABLE_EVENT); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (value != 0) { ACPI_ERROR((AE_INFO, "Could not disable %s events", acpi_ut_get_event_name(event))); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable_event) /******************************************************************************* * * FUNCTION: acpi_clear_event * * PARAMETERS: Event - The fixed event to be cleared * * RETURN: Status * * DESCRIPTION: Clear an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_clear_event(u32 event) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_clear_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Clear the requested fixed event (By writing a one to the status * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, ACPI_CLEAR_STATUS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_clear_event) /******************************************************************************* * * FUNCTION: acpi_clear_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Clear an ACPI event (general purpose) * ******************************************************************************/ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_clear_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_hw_clear_gpe(gpe_event_info); unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_clear_gpe) /******************************************************************************* * * FUNCTION: acpi_get_event_status * * PARAMETERS: Event - The fixed event * event_status - Where the current status of the event will * be returned * * RETURN: Status * * DESCRIPTION: Obtains and returns the current status of the event * ******************************************************************************/ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_get_event_status); if (!event_status) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Get the status of the requested fixed event */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); *event_status = value; status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, &value); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); if (value) *event_status |= ACPI_EVENT_FLAG_SET; if (acpi_gbl_fixed_event_handlers[event].handler) *event_status |= ACPI_EVENT_FLAG_HANDLE; return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_event_status) /******************************************************************************* * * FUNCTION: acpi_get_gpe_status * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * event_status - Where the current status of the event will * be returned * * RETURN: Status * * DESCRIPTION: Get status of an event (general purpose) * ******************************************************************************/ acpi_status acpi_get_gpe_status(acpi_handle gpe_device, u32 gpe_number, acpi_event_status *event_status) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_get_gpe_status); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Obtain status on the requested GPE number */ status = acpi_hw_get_gpe_status(gpe_event_info, event_status); if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) *event_status |= ACPI_EVENT_FLAG_HANDLE; unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) /******************************************************************************* * * FUNCTION: acpi_install_gpe_block * * PARAMETERS: gpe_device - Handle to the parent GPE Block Device * gpe_block_address - Address and space_iD * register_count - Number of GPE register pairs in the block * interrupt_number - H/W interrupt for the block * * RETURN: Status * * DESCRIPTION: Create and Install a block of GPE registers * ******************************************************************************/ acpi_status acpi_install_gpe_block(acpi_handle gpe_device, struct acpi_generic_address *gpe_block_address, u32 register_count, u32 interrupt_number) { acpi_status status; union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; struct acpi_gpe_block_info *gpe_block; ACPI_FUNCTION_TRACE(acpi_install_gpe_block); if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } node = acpi_ns_validate_handle(gpe_device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* * For user-installed GPE Block Devices, the gpe_block_base_number * is always zero */ status = acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, interrupt_number, &gpe_block); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Install block in the device_object attached to the node */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { /* * No object, create a new one (Device nodes do not always have * an attached object) */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } } /* Now install the GPE block in the device_object */ obj_desc->device.gpe_block = gpe_block; /* Run the _PRW methods and enable the runtime GPEs in the new block */ status = acpi_ev_initialize_gpe_block(node, gpe_block); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) /******************************************************************************* * * FUNCTION: acpi_remove_gpe_block * * PARAMETERS: gpe_device - Handle to the parent GPE Block Device * * RETURN: Status * * DESCRIPTION: Remove a previously installed block of GPE registers * ******************************************************************************/ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) { union acpi_operand_object *obj_desc; acpi_status status; struct acpi_namespace_node *node; ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); if (!gpe_device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } node = acpi_ns_validate_handle(gpe_device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Get the device_object attached to the node */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc || !obj_desc->device.gpe_block) { return_ACPI_STATUS(AE_NULL_OBJECT); } /* Delete the GPE block (but not the device_object) */ status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); if (ACPI_SUCCESS(status)) { obj_desc->device.gpe_block = NULL; } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) /******************************************************************************* * * FUNCTION: acpi_get_gpe_device * * PARAMETERS: Index - System GPE index (0-current_gpe_count) * gpe_device - Where the parent GPE Device is returned * * RETURN: Status * * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL * gpe device indicates that the gpe number is contained in one of * the FADT-defined gpe blocks. Otherwise, the GPE block device. * ******************************************************************************/ acpi_status acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) { struct acpi_gpe_device_info info; acpi_status status; ACPI_FUNCTION_TRACE(acpi_get_gpe_device); if (!gpe_device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } if (index >= acpi_current_gpe_count) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Setup and walk the GPE list */ info.index = index; info.status = AE_NOT_EXIST; info.gpe_device = NULL; info.next_block_base_index = 0; status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } *gpe_device = info.gpe_device; return_ACPI_STATUS(info.status); } ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_device * * PARAMETERS: GPE_WALK_CALLBACK * * RETURN: Status * * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE * block device. NULL if the GPE is one of the FADT-defined GPEs. * ******************************************************************************/ static acpi_status acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { struct acpi_gpe_device_info *info = context; /* Increment Index by the number of GPEs in this block */ info->next_block_base_index += gpe_block->gpe_count; if (info->index < info->next_block_base_index) { /* * The GPE index is within this block, get the node. Leave the node * NULL for the FADT-defined GPEs */ if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { info->gpe_device = gpe_block->node; } info->status = AE_OK; return (AE_CTRL_END); } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_disable_all_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Disable and clear all GPEs in all GPE blocks * ******************************************************************************/ acpi_status acpi_disable_all_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_disable_all_gpes(); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } /****************************************************************************** * * FUNCTION: acpi_enable_all_runtime_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks * ******************************************************************************/ acpi_status acpi_enable_all_runtime_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_enable_all_runtime_gpes(); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); }
gpl-2.0
kerneldevs/fusX-univa-kernel
drivers/net/wireless/adm8211.c
757
55683
/* * Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP) * * Copyright (c) 2003, Jouni Malinen <j@w1.fi> * Copyright (c) 2004-2007, Michael Wu <flamingice@sourmilk.net> * Some parts copyright (c) 2003 by David Young <dyoung@pobox.com> * and used with permission. * * Much thanks to Infineon-ADMtek for their support of this driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include <linux/init.h> #include <linux/if.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/eeprom_93cx6.h> #include <net/mac80211.h> #include "adm8211.h" MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); MODULE_SUPPORTED_DEVICE("ADM8211"); MODULE_LICENSE("GPL"); static unsigned int tx_ring_size __read_mostly = 16; static unsigned int rx_ring_size __read_mostly = 16; module_param(tx_ring_size, uint, 0); module_param(rx_ring_size, uint, 0); static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = { /* ADMtek ADM8211 */ { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ { PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */ { PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */ { 0 } }; static struct ieee80211_rate adm8211_rates[] = { { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 220, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, /* XX ?? */ }; static const struct ieee80211_channel adm8211_channels[] = { { .center_freq = 2412}, { .center_freq = 2417}, { .center_freq = 2422}, { .center_freq = 2427}, { .center_freq = 2432}, { .center_freq = 2437}, { .center_freq = 2442}, { .center_freq = 2447}, { .center_freq = 2452}, { .center_freq = 2457}, { .center_freq = 2462}, { .center_freq = 2467}, { .center_freq = 2472}, { .center_freq = 2484}, }; static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom) { struct adm8211_priv *priv = eeprom->data; u32 reg = ADM8211_CSR_READ(SPR); eeprom->reg_data_in = reg & ADM8211_SPR_SDI; eeprom->reg_data_out = reg & ADM8211_SPR_SDO; eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK; eeprom->reg_chip_select = reg & ADM8211_SPR_SCS; } static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom) { struct adm8211_priv *priv = eeprom->data; u32 reg = 0x4000 | ADM8211_SPR_SRS; if (eeprom->reg_data_in) reg |= ADM8211_SPR_SDI; if (eeprom->reg_data_out) reg |= ADM8211_SPR_SDO; if (eeprom->reg_data_clock) reg |= ADM8211_SPR_SCLK; if (eeprom->reg_chip_select) reg |= ADM8211_SPR_SCS; ADM8211_CSR_WRITE(SPR, reg); ADM8211_CSR_READ(SPR); /* eeprom_delay */ } static int adm8211_read_eeprom(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int words, i; struct ieee80211_chan_range chan_range; u16 cr49; struct eeprom_93cx6 eeprom = { .data = priv, .register_read = adm8211_eeprom_register_read, .register_write = adm8211_eeprom_register_write }; if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) { /* 256 * 16-bit = 512 bytes */ eeprom.width = PCI_EEPROM_WIDTH_93C66; words = 256; } else { /* 64 * 16-bit = 128 bytes */ eeprom.width = PCI_EEPROM_WIDTH_93C46; words = 64; } priv->eeprom_len = words * 2; priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL); if (!priv->eeprom) return -ENOMEM; eeprom_93cx6_multiread(&eeprom, 0, (__le16 *)priv->eeprom, words); cr49 = le16_to_cpu(priv->eeprom->cr49); priv->rf_type = (cr49 >> 3) & 0x7; switch (priv->rf_type) { case ADM8211_TYPE_INTERSIL: case ADM8211_TYPE_RFMD: case ADM8211_TYPE_MARVEL: case ADM8211_TYPE_AIROHA: case ADM8211_TYPE_ADMTEK: break; default: if (priv->pdev->revision < ADM8211_REV_CA) priv->rf_type = ADM8211_TYPE_RFMD; else priv->rf_type = ADM8211_TYPE_AIROHA; printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n", pci_name(priv->pdev), (cr49 >> 3) & 0x7); } priv->bbp_type = cr49 & 0x7; switch (priv->bbp_type) { case ADM8211_TYPE_INTERSIL: case ADM8211_TYPE_RFMD: case ADM8211_TYPE_MARVEL: case ADM8211_TYPE_AIROHA: case ADM8211_TYPE_ADMTEK: break; default: if (priv->pdev->revision < ADM8211_REV_CA) priv->bbp_type = ADM8211_TYPE_RFMD; else priv->bbp_type = ADM8211_TYPE_ADMTEK; printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n", pci_name(priv->pdev), cr49 >> 3); } if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) { printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n", pci_name(priv->pdev), priv->eeprom->country_code); chan_range = cranges[2]; } else chan_range = cranges[priv->eeprom->country_code]; printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n", pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max); BUILD_BUG_ON(sizeof(priv->channels) != sizeof(adm8211_channels)); memcpy(priv->channels, adm8211_channels, sizeof(priv->channels)); priv->band.channels = priv->channels; priv->band.n_channels = ARRAY_SIZE(adm8211_channels); priv->band.bitrates = adm8211_rates; priv->band.n_bitrates = ARRAY_SIZE(adm8211_rates); for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++) if (i < chan_range.min || i > chan_range.max) priv->channels[i - 1].flags |= IEEE80211_CHAN_DISABLED; switch (priv->eeprom->specific_bbptype) { case ADM8211_BBP_RFMD3000: case ADM8211_BBP_RFMD3002: case ADM8211_BBP_ADM8011: priv->specific_bbptype = priv->eeprom->specific_bbptype; break; default: if (priv->pdev->revision < ADM8211_REV_CA) priv->specific_bbptype = ADM8211_BBP_RFMD3000; else priv->specific_bbptype = ADM8211_BBP_ADM8011; printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n", pci_name(priv->pdev), priv->eeprom->specific_bbptype); } switch (priv->eeprom->specific_rftype) { case ADM8211_RFMD2948: case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: case ADM8211_MAX2820: case ADM8211_AL2210L: priv->transceiver_type = priv->eeprom->specific_rftype; break; default: if (priv->pdev->revision == ADM8211_REV_BA) priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER; else if (priv->pdev->revision == ADM8211_REV_CA) priv->transceiver_type = ADM8211_AL2210L; else if (priv->pdev->revision == ADM8211_REV_AB) priv->transceiver_type = ADM8211_RFMD2948; printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n", pci_name(priv->pdev), priv->eeprom->specific_rftype); break; } printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d " "Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type, priv->bbp_type, priv->specific_bbptype, priv->transceiver_type); return 0; } static inline void adm8211_write_sram(struct ieee80211_hw *dev, u32 addr, u32 data) { struct adm8211_priv *priv = dev->priv; ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR | (priv->pdev->revision < ADM8211_REV_BA ? 0 : ADM8211_WEPCTL_SEL_WEPTABLE )); ADM8211_CSR_READ(WEPCTL); msleep(1); ADM8211_CSR_WRITE(WESK, data); ADM8211_CSR_READ(WESK); msleep(1); } static void adm8211_write_sram_bytes(struct ieee80211_hw *dev, unsigned int addr, u8 *buf, unsigned int len) { struct adm8211_priv *priv = dev->priv; u32 reg = ADM8211_CSR_READ(WEPCTL); unsigned int i; if (priv->pdev->revision < ADM8211_REV_BA) { for (i = 0; i < len; i += 2) { u16 val = buf[i] | (buf[i + 1] << 8); adm8211_write_sram(dev, addr + i / 2, val); } } else { for (i = 0; i < len; i += 4) { u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) | (buf[i + 2] << 16) | (buf[i + 3] << 24); adm8211_write_sram(dev, addr + i / 4, val); } } ADM8211_CSR_WRITE(WEPCTL, reg); } static void adm8211_clear_sram(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg = ADM8211_CSR_READ(WEPCTL); unsigned int addr; for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++) adm8211_write_sram(dev, addr, 0); ADM8211_CSR_WRITE(WEPCTL, reg); } static int adm8211_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats) { struct adm8211_priv *priv = dev->priv; memcpy(stats, &priv->stats, sizeof(*stats)); return 0; } static void adm8211_interrupt_tci(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int dirty_tx; spin_lock(&priv->lock); for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { unsigned int entry = dirty_tx % priv->tx_ring_size; u32 status = le32_to_cpu(priv->tx_ring[entry].status); struct ieee80211_tx_info *txi; struct adm8211_tx_ring_info *info; struct sk_buff *skb; if (status & TDES0_CONTROL_OWN || !(status & TDES0_CONTROL_DONE)) break; info = &priv->tx_buffers[entry]; skb = info->skb; txi = IEEE80211_SKB_CB(skb); /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ pci_unmap_single(priv->pdev, info->mapping, info->skb->len, PCI_DMA_TODEVICE); ieee80211_tx_info_clear_status(txi); skb_pull(skb, sizeof(struct adm8211_tx_hdr)); memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && !(status & TDES0_STATUS_ES)) txi->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(dev, skb); info->skb = NULL; } if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2) ieee80211_wake_queue(dev, 0); priv->dirty_tx = dirty_tx; spin_unlock(&priv->lock); } static void adm8211_interrupt_rci(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int entry = priv->cur_rx % priv->rx_ring_size; u32 status; unsigned int pktlen; struct sk_buff *skb, *newskb; unsigned int limit = priv->rx_ring_size; u8 rssi, rate; while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) { if (!limit--) break; status = le32_to_cpu(priv->rx_ring[entry].status); rate = (status & RDES0_STATUS_RXDR) >> 12; rssi = le32_to_cpu(priv->rx_ring[entry].length) & RDES1_STATUS_RSSI; pktlen = status & RDES0_STATUS_FL; if (pktlen > RX_PKT_SIZE) { if (net_ratelimit()) printk(KERN_DEBUG "%s: frame too long (%d)\n", wiphy_name(dev->wiphy), pktlen); pktlen = RX_PKT_SIZE; } if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) { skb = NULL; /* old buffer will be reused */ /* TODO: update RX error stats */ /* TODO: check RDES0_STATUS_CRC*E */ } else if (pktlen < RX_COPY_BREAK) { skb = dev_alloc_skb(pktlen); if (skb) { pci_dma_sync_single_for_cpu( priv->pdev, priv->rx_buffers[entry].mapping, pktlen, PCI_DMA_FROMDEVICE); memcpy(skb_put(skb, pktlen), skb_tail_pointer(priv->rx_buffers[entry].skb), pktlen); pci_dma_sync_single_for_device( priv->pdev, priv->rx_buffers[entry].mapping, RX_PKT_SIZE, PCI_DMA_FROMDEVICE); } } else { newskb = dev_alloc_skb(RX_PKT_SIZE); if (newskb) { skb = priv->rx_buffers[entry].skb; skb_put(skb, pktlen); pci_unmap_single( priv->pdev, priv->rx_buffers[entry].mapping, RX_PKT_SIZE, PCI_DMA_FROMDEVICE); priv->rx_buffers[entry].skb = newskb; priv->rx_buffers[entry].mapping = pci_map_single(priv->pdev, skb_tail_pointer(newskb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); } else { skb = NULL; /* TODO: update rx dropped stats */ } priv->rx_ring[entry].buffer1 = cpu_to_le32(priv->rx_buffers[entry].mapping); } priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); priv->rx_ring[entry].length = cpu_to_le32(RX_PKT_SIZE | (entry == priv->rx_ring_size - 1 ? RDES1_CONTROL_RER : 0)); if (skb) { struct ieee80211_rx_status rx_status = {0}; if (priv->pdev->revision < ADM8211_REV_CA) rx_status.signal = rssi; else rx_status.signal = 100 - rssi; rx_status.rate_idx = rate; rx_status.freq = adm8211_channels[priv->channel - 1].center_freq; rx_status.band = IEEE80211_BAND_2GHZ; memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(dev, skb); } entry = (++priv->cur_rx) % priv->rx_ring_size; } /* TODO: check LPC and update stats? */ } static irqreturn_t adm8211_interrupt(int irq, void *dev_id) { #define ADM8211_INT(x) \ do { \ if (unlikely(stsr & ADM8211_STSR_ ## x)) \ printk(KERN_DEBUG "%s: " #x "\n", wiphy_name(dev->wiphy)); \ } while (0) struct ieee80211_hw *dev = dev_id; struct adm8211_priv *priv = dev->priv; u32 stsr = ADM8211_CSR_READ(STSR); ADM8211_CSR_WRITE(STSR, stsr); if (stsr == 0xffffffff) return IRQ_HANDLED; if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS))) return IRQ_HANDLED; if (stsr & ADM8211_STSR_RCI) adm8211_interrupt_rci(dev); if (stsr & ADM8211_STSR_TCI) adm8211_interrupt_tci(dev); ADM8211_INT(PCF); ADM8211_INT(BCNTC); ADM8211_INT(GPINT); ADM8211_INT(ATIMTC); ADM8211_INT(TSFTF); ADM8211_INT(TSCZ); ADM8211_INT(SQL); ADM8211_INT(WEPTD); ADM8211_INT(ATIME); ADM8211_INT(TEIS); ADM8211_INT(FBE); ADM8211_INT(REIS); ADM8211_INT(GPTT); ADM8211_INT(RPS); ADM8211_INT(RDU); ADM8211_INT(TUF); ADM8211_INT(TPS); return IRQ_HANDLED; #undef ADM8211_INT } #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\ static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \ u16 addr, u32 value) { \ struct adm8211_priv *priv = dev->priv; \ unsigned int i; \ u32 reg, bitbuf; \ \ value &= v_mask; \ addr &= a_mask; \ bitbuf = (value << v_shift) | (addr << a_shift); \ \ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \ ADM8211_CSR_READ(SYNRF); \ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \ ADM8211_CSR_READ(SYNRF); \ \ if (prewrite) { \ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \ ADM8211_CSR_READ(SYNRF); \ } \ \ for (i = 0; i <= bits; i++) { \ if (bitbuf & (1 << (bits - i))) \ reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \ else \ reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \ \ ADM8211_CSR_WRITE(SYNRF, reg); \ ADM8211_CSR_READ(SYNRF); \ \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \ ADM8211_CSR_READ(SYNRF); \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \ ADM8211_CSR_READ(SYNRF); \ } \ \ if (postwrite == 1) { \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \ ADM8211_CSR_READ(SYNRF); \ } \ if (postwrite == 2) { \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \ ADM8211_CSR_READ(SYNRF); \ } \ \ ADM8211_CSR_WRITE(SYNRF, 0); \ ADM8211_CSR_READ(SYNRF); \ } WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1) WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1) WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1) WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2) #undef WRITE_SYN static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data) { struct adm8211_priv *priv = dev->priv; unsigned int timeout; u32 reg; timeout = 10; while (timeout > 0) { reg = ADM8211_CSR_READ(BBPCTL); if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD))) break; timeout--; msleep(2); } if (timeout == 0) { printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" " prewrite (reg=0x%08x)\n", wiphy_name(dev->wiphy), addr, data, reg); return -ETIMEDOUT; } switch (priv->bbp_type) { case ADM8211_TYPE_INTERSIL: reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */ break; case ADM8211_TYPE_RFMD: reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | (0x01 << 18); break; case ADM8211_TYPE_ADMTEK: reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | (0x05 << 18); break; } reg |= ADM8211_BBPCTL_WR | (addr << 8) | data; ADM8211_CSR_WRITE(BBPCTL, reg); timeout = 10; while (timeout > 0) { reg = ADM8211_CSR_READ(BBPCTL); if (!(reg & ADM8211_BBPCTL_WR)) break; timeout--; msleep(2); } if (timeout == 0) { ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) & ~ADM8211_BBPCTL_WR); printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" " postwrite (reg=0x%08x)\n", wiphy_name(dev->wiphy), addr, data, reg); return -ETIMEDOUT; } return 0; } static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) { static const u32 adm8211_rfmd2958_reg5[] = {0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340, 0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7}; static const u32 adm8211_rfmd2958_reg6[] = {0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000, 0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745}; struct adm8211_priv *priv = dev->priv; u8 ant_power = priv->ant_power > 0x3F ? priv->eeprom->antenna_power[chan - 1] : priv->ant_power; u8 tx_power = priv->tx_power > 0x3F ? priv->eeprom->tx_power[chan - 1] : priv->tx_power; u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ? priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff; u8 lnags_thresh = priv->lnags_threshold == 0xFF ? priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold; u32 reg; ADM8211_IDLE(); /* Program synthesizer to new channel */ switch (priv->transceiver_type) { case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007); adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033); adm8211_rf_write_syn_rfmd2958(dev, 0x05, adm8211_rfmd2958_reg5[chan - 1]); adm8211_rf_write_syn_rfmd2958(dev, 0x06, adm8211_rfmd2958_reg6[chan - 1]); break; case ADM8211_RFMD2948: adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF, SI4126_MAIN_XINDIV2); adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN, SI4126_POWERDOWN_PDIB | SI4126_POWERDOWN_PDRB); adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0); adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV, (chan == 14 ? 2110 : (2033 + (chan * 5)))); adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496); adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44); adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44); break; case ADM8211_MAX2820: adm8211_rf_write_syn_max2820(dev, 0x3, (chan == 14 ? 0x054 : (0x7 + (chan * 5)))); break; case ADM8211_AL2210L: adm8211_rf_write_syn_al2210l(dev, 0x0, (chan == 14 ? 0x229B4 : (0x22967 + (chan * 5)))); break; default: printk(KERN_DEBUG "%s: unsupported transceiver type %d\n", wiphy_name(dev->wiphy), priv->transceiver_type); break; } /* write BBP regs */ if (priv->bbp_type == ADM8211_TYPE_RFMD) { /* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */ /* TODO: remove if SMC 2635W doesn't need this */ if (priv->transceiver_type == ADM8211_RFMD2948) { reg = ADM8211_CSR_READ(GPIO); reg &= 0xfffc0000; reg |= ADM8211_CSR_GPIO_EN0; if (chan != 14) reg |= ADM8211_CSR_GPIO_O0; ADM8211_CSR_WRITE(GPIO, reg); } if (priv->transceiver_type == ADM8211_RFMD2958) { /* set PCNT2 */ adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100); /* set PCNT1 P_DESIRED/MID_BIAS */ reg = le16_to_cpu(priv->eeprom->cr49); reg >>= 13; reg <<= 15; reg |= ant_power << 9; adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg); /* set TXRX TX_GAIN */ adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 | (priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0)); } else { reg = ADM8211_CSR_READ(PLCPHD); reg &= 0xff00ffff; reg |= tx_power << 18; ADM8211_CSR_WRITE(PLCPHD, reg); } ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); ADM8211_CSR_READ(SYNRF); msleep(30); /* RF3000 BBP */ if (priv->transceiver_type != ADM8211_RFMD2958) adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, tx_power<<2); adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff); adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh); adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ? priv->eeprom->cr28 : 0); adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); ADM8211_CSR_WRITE(SYNRF, 0); /* Nothing to do for ADMtek BBP */ } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) printk(KERN_DEBUG "%s: unsupported BBP type %d\n", wiphy_name(dev->wiphy), priv->bbp_type); ADM8211_RESTORE(); /* update current channel for adhoc (and maybe AP mode) */ reg = ADM8211_CSR_READ(CAP0); reg &= ~0xF; reg |= chan; ADM8211_CSR_WRITE(CAP0, reg); return 0; } static void adm8211_update_mode(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; ADM8211_IDLE(); priv->soft_rx_crc = 0; switch (priv->mode) { case NL80211_IFTYPE_STATION: priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA); priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR; break; case NL80211_IFTYPE_ADHOC: priv->nar &= ~ADM8211_NAR_PR; priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR; /* don't trust the error bits on rev 0x20 and up in adhoc */ if (priv->pdev->revision >= ADM8211_REV_BA) priv->soft_rx_crc = 1; break; case NL80211_IFTYPE_MONITOR: priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST); priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR; break; } ADM8211_RESTORE(); } static void adm8211_hw_init_syn(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; switch (priv->transceiver_type) { case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: /* comments taken from ADMtek vendor driver */ /* Reset RF2958 after power on */ adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000); /* Initialize RF VCO Core Bias to maximum */ adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F); /* Initialize IF PLL */ adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03); /* Initialize IF PLL Coarse Tuning */ adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F); /* Initialize RF PLL */ adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403); /* Initialize RF PLL Coarse Tuning */ adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F); /* Initialize TX gain and filter BW (R9) */ adm8211_rf_write_syn_rfmd2958(dev, 0x09, (priv->transceiver_type == ADM8211_RFMD2958 ? 0x10050 : 0x00050)); /* Initialize CAL register */ adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8); break; case ADM8211_MAX2820: adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E); adm8211_rf_write_syn_max2820(dev, 0x2, 0x001); adm8211_rf_write_syn_max2820(dev, 0x3, 0x054); adm8211_rf_write_syn_max2820(dev, 0x4, 0x310); adm8211_rf_write_syn_max2820(dev, 0x5, 0x000); break; case ADM8211_AL2210L: adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C); adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB); adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F); adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9); adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280); adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641); adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130); adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000); adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F); adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C); adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000); adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000); break; case ADM8211_RFMD2948: default: break; } } static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg; /* write addresses */ if (priv->bbp_type == ADM8211_TYPE_INTERSIL) { ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A); ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E); ADM8211_CSR_WRITE(MMIRD1, 0x00100000); } else if (priv->bbp_type == ADM8211_TYPE_RFMD || priv->bbp_type == ADM8211_TYPE_ADMTEK) { /* check specific BBP type */ switch (priv->specific_bbptype) { case ADM8211_BBP_RFMD3000: case ADM8211_BBP_RFMD3002: ADM8211_CSR_WRITE(MMIWA, 0x00009101); ADM8211_CSR_WRITE(MMIRD0, 0x00000301); break; case ADM8211_BBP_ADM8011: ADM8211_CSR_WRITE(MMIWA, 0x00008903); ADM8211_CSR_WRITE(MMIRD0, 0x00001716); reg = ADM8211_CSR_READ(BBPCTL); reg &= ~ADM8211_BBPCTL_TYPE; reg |= 0x5 << 18; ADM8211_CSR_WRITE(BBPCTL, reg); break; } switch (priv->pdev->revision) { case ADM8211_REV_CA: if (priv->transceiver_type == ADM8211_RFMD2958 || priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || priv->transceiver_type == ADM8211_RFMD2948) ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22); else if (priv->transceiver_type == ADM8211_MAX2820 || priv->transceiver_type == ADM8211_AL2210L) ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22); break; case ADM8211_REV_BA: reg = ADM8211_CSR_READ(MMIRD1); reg &= 0x0000FFFF; reg |= 0x7e100000; ADM8211_CSR_WRITE(MMIRD1, reg); break; case ADM8211_REV_AB: case ADM8211_REV_AF: default: ADM8211_CSR_WRITE(MMIRD1, 0x7e100000); break; } /* For RFMD */ ADM8211_CSR_WRITE(MACTEST, 0x800); } adm8211_hw_init_syn(dev); /* Set RF Power control IF pin to PE1+PHYRST# */ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); ADM8211_CSR_READ(SYNRF); msleep(20); /* write BBP regs */ if (priv->bbp_type == ADM8211_TYPE_RFMD) { /* RF3000 BBP */ /* another set: * 11: c8 * 14: 14 * 15: 50 (chan 1..13; chan 14: d0) * 1c: 00 * 1d: 84 */ adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80); /* antenna selection: diversity */ adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80); adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74); adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38); adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40); if (priv->eeprom->major_version < 2) { adm8211_write_bbp(dev, 0x1c, 0x00); adm8211_write_bbp(dev, 0x1d, 0x80); } else { if (priv->pdev->revision == ADM8211_REV_BA) adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28); else adm8211_write_bbp(dev, 0x1c, 0x00); adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); } } else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) { /* reset baseband */ adm8211_write_bbp(dev, 0x00, 0xFF); /* antenna selection: diversity */ adm8211_write_bbp(dev, 0x07, 0x0A); /* TODO: find documentation for this */ switch (priv->transceiver_type) { case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: adm8211_write_bbp(dev, 0x00, 0x00); adm8211_write_bbp(dev, 0x01, 0x00); adm8211_write_bbp(dev, 0x02, 0x00); adm8211_write_bbp(dev, 0x03, 0x00); adm8211_write_bbp(dev, 0x06, 0x0f); adm8211_write_bbp(dev, 0x09, 0x00); adm8211_write_bbp(dev, 0x0a, 0x00); adm8211_write_bbp(dev, 0x0b, 0x00); adm8211_write_bbp(dev, 0x0c, 0x00); adm8211_write_bbp(dev, 0x0f, 0xAA); adm8211_write_bbp(dev, 0x10, 0x8c); adm8211_write_bbp(dev, 0x11, 0x43); adm8211_write_bbp(dev, 0x18, 0x40); adm8211_write_bbp(dev, 0x20, 0x23); adm8211_write_bbp(dev, 0x21, 0x02); adm8211_write_bbp(dev, 0x22, 0x28); adm8211_write_bbp(dev, 0x23, 0x30); adm8211_write_bbp(dev, 0x24, 0x2d); adm8211_write_bbp(dev, 0x28, 0x35); adm8211_write_bbp(dev, 0x2a, 0x8c); adm8211_write_bbp(dev, 0x2b, 0x81); adm8211_write_bbp(dev, 0x2c, 0x44); adm8211_write_bbp(dev, 0x2d, 0x0A); adm8211_write_bbp(dev, 0x29, 0x40); adm8211_write_bbp(dev, 0x60, 0x08); adm8211_write_bbp(dev, 0x64, 0x01); break; case ADM8211_MAX2820: adm8211_write_bbp(dev, 0x00, 0x00); adm8211_write_bbp(dev, 0x01, 0x00); adm8211_write_bbp(dev, 0x02, 0x00); adm8211_write_bbp(dev, 0x03, 0x00); adm8211_write_bbp(dev, 0x06, 0x0f); adm8211_write_bbp(dev, 0x09, 0x05); adm8211_write_bbp(dev, 0x0a, 0x02); adm8211_write_bbp(dev, 0x0b, 0x00); adm8211_write_bbp(dev, 0x0c, 0x0f); adm8211_write_bbp(dev, 0x0f, 0x55); adm8211_write_bbp(dev, 0x10, 0x8d); adm8211_write_bbp(dev, 0x11, 0x43); adm8211_write_bbp(dev, 0x18, 0x4a); adm8211_write_bbp(dev, 0x20, 0x20); adm8211_write_bbp(dev, 0x21, 0x02); adm8211_write_bbp(dev, 0x22, 0x23); adm8211_write_bbp(dev, 0x23, 0x30); adm8211_write_bbp(dev, 0x24, 0x2d); adm8211_write_bbp(dev, 0x2a, 0x8c); adm8211_write_bbp(dev, 0x2b, 0x81); adm8211_write_bbp(dev, 0x2c, 0x44); adm8211_write_bbp(dev, 0x29, 0x4a); adm8211_write_bbp(dev, 0x60, 0x2b); adm8211_write_bbp(dev, 0x64, 0x01); break; case ADM8211_AL2210L: adm8211_write_bbp(dev, 0x00, 0x00); adm8211_write_bbp(dev, 0x01, 0x00); adm8211_write_bbp(dev, 0x02, 0x00); adm8211_write_bbp(dev, 0x03, 0x00); adm8211_write_bbp(dev, 0x06, 0x0f); adm8211_write_bbp(dev, 0x07, 0x05); adm8211_write_bbp(dev, 0x08, 0x03); adm8211_write_bbp(dev, 0x09, 0x00); adm8211_write_bbp(dev, 0x0a, 0x00); adm8211_write_bbp(dev, 0x0b, 0x00); adm8211_write_bbp(dev, 0x0c, 0x10); adm8211_write_bbp(dev, 0x0f, 0x55); adm8211_write_bbp(dev, 0x10, 0x8d); adm8211_write_bbp(dev, 0x11, 0x43); adm8211_write_bbp(dev, 0x18, 0x4a); adm8211_write_bbp(dev, 0x20, 0x20); adm8211_write_bbp(dev, 0x21, 0x02); adm8211_write_bbp(dev, 0x22, 0x23); adm8211_write_bbp(dev, 0x23, 0x30); adm8211_write_bbp(dev, 0x24, 0x2d); adm8211_write_bbp(dev, 0x2a, 0xaa); adm8211_write_bbp(dev, 0x2b, 0x81); adm8211_write_bbp(dev, 0x2c, 0x44); adm8211_write_bbp(dev, 0x29, 0xfa); adm8211_write_bbp(dev, 0x60, 0x2d); adm8211_write_bbp(dev, 0x64, 0x01); break; case ADM8211_RFMD2948: break; default: printk(KERN_DEBUG "%s: unsupported transceiver %d\n", wiphy_name(dev->wiphy), priv->transceiver_type); break; } } else printk(KERN_DEBUG "%s: unsupported BBP %d\n", wiphy_name(dev->wiphy), priv->bbp_type); ADM8211_CSR_WRITE(SYNRF, 0); /* Set RF CAL control source to MAC control */ reg = ADM8211_CSR_READ(SYNCTL); reg |= ADM8211_SYNCTL_SELCAL; ADM8211_CSR_WRITE(SYNCTL, reg); return 0; } /* configures hw beacons/probe responses */ static int adm8211_set_rate(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg; int i = 0; u8 rate_buf[12] = {0}; /* write supported rates */ if (priv->pdev->revision != ADM8211_REV_BA) { rate_buf[0] = ARRAY_SIZE(adm8211_rates); for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++) rate_buf[i + 1] = (adm8211_rates[i].bitrate / 5) | 0x80; } else { /* workaround for rev BA specific bug */ rate_buf[0] = 0x04; rate_buf[1] = 0x82; rate_buf[2] = 0x04; rate_buf[3] = 0x0b; rate_buf[4] = 0x16; } adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf, ARRAY_SIZE(adm8211_rates) + 1); reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */ reg |= 1 << 15; /* short preamble */ reg |= 110 << 24; ADM8211_CSR_WRITE(PLCPHD, reg); /* MTMLT = 512 TU (max TX MSDU lifetime) * BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate) * SRTYLIM = 224 (short retry limit, TX header value is default) */ ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0)); return 0; } static void adm8211_hw_init(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg; u8 cline; reg = ADM8211_CSR_READ(PAR); reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME; reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL); if (!pci_set_mwi(priv->pdev)) { reg |= 0x1 << 24; pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline); switch (cline) { case 0x8: reg |= (0x1 << 14); break; case 0x16: reg |= (0x2 << 14); break; case 0x32: reg |= (0x3 << 14); break; default: reg |= (0x0 << 14); break; } } ADM8211_CSR_WRITE(PAR, reg); reg = ADM8211_CSR_READ(CSR_TEST1); reg &= ~(0xF << 28); reg |= (1 << 28) | (1 << 31); ADM8211_CSR_WRITE(CSR_TEST1, reg); /* lose link after 4 lost beacons */ reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE; ADM8211_CSR_WRITE(WCSR, reg); /* Disable APM, enable receive FIFO threshold, and set drain receive * threshold to store-and-forward */ reg = ADM8211_CSR_READ(CMDR); reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT); reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF; ADM8211_CSR_WRITE(CMDR, reg); adm8211_set_rate(dev); /* 4-bit values: * PWR1UP = 8 * 2 ms * PWR0PAPE = 8 us or 5 us * PWR1PAPE = 1 us or 3 us * PWR0TRSW = 5 us * PWR1TRSW = 12 us * PWR0PE2 = 13 us * PWR1PE2 = 1 us * PWR0TXPE = 8 or 6 */ if (priv->pdev->revision < ADM8211_REV_CA) ADM8211_CSR_WRITE(TOFS2, 0x8815cd18); else ADM8211_CSR_WRITE(TOFS2, 0x8535cd16); /* Enable store and forward for transmit */ priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB; ADM8211_CSR_WRITE(NAR, priv->nar); /* Reset RF */ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO); ADM8211_CSR_READ(SYNRF); msleep(10); ADM8211_CSR_WRITE(SYNRF, 0); ADM8211_CSR_READ(SYNRF); msleep(5); /* Set CFP Max Duration to 0x10 TU */ reg = ADM8211_CSR_READ(CFPP); reg &= ~(0xffff << 8); reg |= 0x0010 << 8; ADM8211_CSR_WRITE(CFPP, reg); /* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us * TUCNT = 0x3ff - Tu counter 1024 us */ ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff); /* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us), * DIFS=50 us, EIFS=100 us */ if (priv->pdev->revision < ADM8211_REV_CA) ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) | (50 << 9) | 100); else ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) | (50 << 9) | 100); /* PCNT = 1 (MAC idle time awake/sleep, unit S) * RMRD = 2346 * 8 + 1 us (max RX duration) */ ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769); /* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */ ADM8211_CSR_WRITE(RSPT, 0xffffff00); /* Initialize BBP (and SYN) */ adm8211_hw_init_bbp(dev); /* make sure interrupts are off */ ADM8211_CSR_WRITE(IER, 0); /* ACK interrupts */ ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR)); /* Setup WEP (turns it off for now) */ reg = ADM8211_CSR_READ(MACTEST); reg &= ~(7 << 20); ADM8211_CSR_WRITE(MACTEST, reg); reg = ADM8211_CSR_READ(WEPCTL); reg &= ~ADM8211_WEPCTL_WEPENABLE; reg |= ADM8211_WEPCTL_WEPRXBYP; ADM8211_CSR_WRITE(WEPCTL, reg); /* Clear the missed-packet counter. */ ADM8211_CSR_READ(LPC); } static int adm8211_hw_reset(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg, tmp; int timeout = 100; /* Power-on issue */ /* TODO: check if this is necessary */ ADM8211_CSR_WRITE(FRCTL, 0); /* Reset the chip */ tmp = ADM8211_CSR_READ(PAR); ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR); while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--) msleep(50); if (timeout <= 0) return -ETIMEDOUT; ADM8211_CSR_WRITE(PAR, tmp); if (priv->pdev->revision == ADM8211_REV_BA && (priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || priv->transceiver_type == ADM8211_RFMD2958)) { reg = ADM8211_CSR_READ(CSR_TEST1); reg |= (1 << 4) | (1 << 5); ADM8211_CSR_WRITE(CSR_TEST1, reg); } else if (priv->pdev->revision == ADM8211_REV_CA) { reg = ADM8211_CSR_READ(CSR_TEST1); reg &= ~((1 << 4) | (1 << 5)); ADM8211_CSR_WRITE(CSR_TEST1, reg); } ADM8211_CSR_WRITE(FRCTL, 0); reg = ADM8211_CSR_READ(CSR_TEST0); reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */ ADM8211_CSR_WRITE(CSR_TEST0, reg); adm8211_clear_sram(dev); return 0; } static u64 adm8211_get_tsft(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 tsftl; u64 tsft; tsftl = ADM8211_CSR_READ(TSFTL); tsft = ADM8211_CSR_READ(TSFTH); tsft <<= 32; tsft |= tsftl; return tsft; } static void adm8211_set_interval(struct ieee80211_hw *dev, unsigned short bi, unsigned short li) { struct adm8211_priv *priv = dev->priv; u32 reg; /* BP (beacon interval) = data->beacon_interval * LI (listen interval) = data->listen_interval (in beacon intervals) */ reg = (bi << 16) | li; ADM8211_CSR_WRITE(BPLI, reg); } static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid) { struct adm8211_priv *priv = dev->priv; u32 reg; ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid)); reg = ADM8211_CSR_READ(ABDA1); reg &= 0x0000ffff; reg |= (bssid[4] << 16) | (bssid[5] << 24); ADM8211_CSR_WRITE(ABDA1, reg); } static int adm8211_config(struct ieee80211_hw *dev, u32 changed) { struct adm8211_priv *priv = dev->priv; struct ieee80211_conf *conf = &dev->conf; int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); if (channel != priv->channel) { priv->channel = channel; adm8211_rf_set_channel(dev, priv->channel); } return 0; } static void adm8211_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changes) { struct adm8211_priv *priv = dev->priv; if (!(changes & BSS_CHANGED_BSSID)) return; if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) { adm8211_set_bssid(dev, conf->bssid); memcpy(priv->bssid, conf->bssid, ETH_ALEN); } } static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { unsigned int bit_nr; u32 mc_filter[2]; struct netdev_hw_addr *ha; mc_filter[1] = mc_filter[0] = 0; netdev_hw_addr_list_for_each(ha, mc_list) { bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; bit_nr &= 0x3F; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } return mc_filter[0] | ((u64)(mc_filter[1]) << 32); } static void adm8211_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; struct adm8211_priv *priv = dev->priv; unsigned int new_flags; u32 mc_filter[2]; mc_filter[0] = multicast; mc_filter[1] = multicast >> 32; new_flags = 0; if (*total_flags & FIF_PROMISC_IN_BSS) { new_flags |= FIF_PROMISC_IN_BSS; priv->nar |= ADM8211_NAR_PR; priv->nar &= ~ADM8211_NAR_MM; mc_filter[1] = mc_filter[0] = ~0; } else if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) { new_flags |= FIF_ALLMULTI; priv->nar &= ~ADM8211_NAR_PR; priv->nar |= ADM8211_NAR_MM; mc_filter[1] = mc_filter[0] = ~0; } else { priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR); } ADM8211_IDLE_RX(); ADM8211_CSR_WRITE(MAR0, mc_filter[0]); ADM8211_CSR_WRITE(MAR1, mc_filter[1]); ADM8211_CSR_READ(NAR); if (priv->nar & ADM8211_NAR_PR) dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS; else dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; if (*total_flags & FIF_BCN_PRBRESP_PROMISC) adm8211_set_bssid(dev, bcast); else adm8211_set_bssid(dev, priv->bssid); ADM8211_RESTORE(); *total_flags = new_flags; } static int adm8211_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct adm8211_priv *priv = dev->priv; if (priv->mode != NL80211_IFTYPE_MONITOR) return -EOPNOTSUPP; switch (vif->type) { case NL80211_IFTYPE_STATION: priv->mode = vif->type; break; default: return -EOPNOTSUPP; } ADM8211_IDLE(); ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr)); ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4))); adm8211_update_mode(dev); ADM8211_RESTORE(); return 0; } static void adm8211_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct adm8211_priv *priv = dev->priv; priv->mode = NL80211_IFTYPE_MONITOR; } static int adm8211_init_rings(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; struct adm8211_desc *desc = NULL; struct adm8211_rx_ring_info *rx_info; struct adm8211_tx_ring_info *tx_info; unsigned int i; for (i = 0; i < priv->rx_ring_size; i++) { desc = &priv->rx_ring[i]; desc->status = 0; desc->length = cpu_to_le32(RX_PKT_SIZE); priv->rx_buffers[i].skb = NULL; } /* Mark the end of RX ring; hw returns to base address after this * descriptor */ desc->length |= cpu_to_le32(RDES1_CONTROL_RER); for (i = 0; i < priv->rx_ring_size; i++) { desc = &priv->rx_ring[i]; rx_info = &priv->rx_buffers[i]; rx_info->skb = dev_alloc_skb(RX_PKT_SIZE); if (rx_info->skb == NULL) break; rx_info->mapping = pci_map_single(priv->pdev, skb_tail_pointer(rx_info->skb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); desc->buffer1 = cpu_to_le32(rx_info->mapping); desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); } /* Setup TX ring. TX buffers descriptors will be filled in as needed */ for (i = 0; i < priv->tx_ring_size; i++) { desc = &priv->tx_ring[i]; tx_info = &priv->tx_buffers[i]; tx_info->skb = NULL; tx_info->mapping = 0; desc->status = 0; } desc->length = cpu_to_le32(TDES1_CONTROL_TER); priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0; ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma); ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma); return 0; } static void adm8211_free_rings(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int i; for (i = 0; i < priv->rx_ring_size; i++) { if (!priv->rx_buffers[i].skb) continue; pci_unmap_single( priv->pdev, priv->rx_buffers[i].mapping, RX_PKT_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(priv->rx_buffers[i].skb); } for (i = 0; i < priv->tx_ring_size; i++) { if (!priv->tx_buffers[i].skb) continue; pci_unmap_single(priv->pdev, priv->tx_buffers[i].mapping, priv->tx_buffers[i].skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(priv->tx_buffers[i].skb); } } static int adm8211_start(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; int retval; /* Power up MAC and RF chips */ retval = adm8211_hw_reset(dev); if (retval) { printk(KERN_ERR "%s: hardware reset failed\n", wiphy_name(dev->wiphy)); goto fail; } retval = adm8211_init_rings(dev); if (retval) { printk(KERN_ERR "%s: failed to initialize rings\n", wiphy_name(dev->wiphy)); goto fail; } /* Init hardware */ adm8211_hw_init(dev); adm8211_rf_set_channel(dev, priv->channel); retval = request_irq(priv->pdev->irq, adm8211_interrupt, IRQF_SHARED, "adm8211", dev); if (retval) { printk(KERN_ERR "%s: failed to register IRQ handler\n", wiphy_name(dev->wiphy)); goto fail; } ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE | ADM8211_IER_RCIE | ADM8211_IER_TCIE | ADM8211_IER_TDUIE | ADM8211_IER_GPTIE); priv->mode = NL80211_IFTYPE_MONITOR; adm8211_update_mode(dev); ADM8211_CSR_WRITE(RDR, 0); adm8211_set_interval(dev, 100, 10); return 0; fail: return retval; } static void adm8211_stop(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->nar = 0; ADM8211_CSR_WRITE(NAR, 0); ADM8211_CSR_WRITE(IER, 0); ADM8211_CSR_READ(NAR); free_irq(priv->pdev->irq, dev); adm8211_free_rings(dev); } static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len, int plcp_signal, int short_preamble) { /* Alternative calculation from NetBSD: */ /* IEEE 802.11b durations for DSSS PHY in microseconds */ #define IEEE80211_DUR_DS_LONG_PREAMBLE 144 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48 #define IEEE80211_DUR_DS_SLOW_ACK 112 #define IEEE80211_DUR_DS_FAST_ACK 56 #define IEEE80211_DUR_DS_SLOW_CTS 112 #define IEEE80211_DUR_DS_FAST_CTS 56 #define IEEE80211_DUR_DS_SLOT 20 #define IEEE80211_DUR_DS_SIFS 10 int remainder; *dur = (80 * (24 + payload_len) + plcp_signal - 1) / plcp_signal; if (plcp_signal <= PLCP_SIGNAL_2M) /* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */ *dur += 3 * (IEEE80211_DUR_DS_SIFS + IEEE80211_DUR_DS_SHORT_PREAMBLE + IEEE80211_DUR_DS_FAST_PLCPHDR) + IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK; else /* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */ *dur += 3 * (IEEE80211_DUR_DS_SIFS + IEEE80211_DUR_DS_SHORT_PREAMBLE + IEEE80211_DUR_DS_FAST_PLCPHDR) + IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK; /* lengthen duration if long preamble */ if (!short_preamble) *dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE - IEEE80211_DUR_DS_SHORT_PREAMBLE) + 3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR - IEEE80211_DUR_DS_FAST_PLCPHDR); *plcp = (80 * len) / plcp_signal; remainder = (80 * len) % plcp_signal; if (plcp_signal == PLCP_SIGNAL_11M && remainder <= 30 && remainder > 0) *plcp = (*plcp | 0x8000) + 1; else if (remainder) (*plcp)++; } /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, u16 plcp_signal, size_t hdrlen) { struct adm8211_priv *priv = dev->priv; unsigned long flags; dma_addr_t mapping; unsigned int entry; u32 flag; mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2) flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS; else flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS; if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2) ieee80211_stop_queue(dev, 0); entry = priv->cur_tx % priv->tx_ring_size; priv->tx_buffers[entry].skb = skb; priv->tx_buffers[entry].mapping = mapping; priv->tx_buffers[entry].hdrlen = hdrlen; priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); if (entry == priv->tx_ring_size - 1) flag |= TDES1_CONTROL_TER; priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len); /* Set TX rate (SIGNAL field in PLCP PPDU format) */ flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */; priv->tx_ring[entry].status = cpu_to_le32(flag); priv->cur_tx++; spin_unlock_irqrestore(&priv->lock, flags); /* Trigger transmit poll */ ADM8211_CSR_WRITE(TDR, 0); } /* Put adm8211_tx_hdr on skb and transmit */ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct adm8211_tx_hdr *txhdr; size_t payload_len, hdrlen; int plcp, dur, len, plcp_signal, short_preamble; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info); u8 rc_flags; rc_flags = info->control.rates[0].flags; short_preamble = !!(rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); plcp_signal = txrate->bitrate; hdr = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); memcpy(skb->cb, skb->data, hdrlen); hdr = (struct ieee80211_hdr *)skb->cb; skb_pull(skb, hdrlen); payload_len = skb->len; txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr)); memset(txhdr, 0, sizeof(*txhdr)); memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN); txhdr->signal = plcp_signal; txhdr->frame_body_size = cpu_to_le16(payload_len); txhdr->frame_control = hdr->frame_control; len = hdrlen + payload_len + FCS_LEN; txhdr->frag = cpu_to_le16(0x0FFF); adm8211_calc_durations(&dur, &plcp, payload_len, len, plcp_signal, short_preamble); txhdr->plcp_frag_head_len = cpu_to_le16(plcp); txhdr->plcp_frag_tail_len = cpu_to_le16(plcp); txhdr->dur_frag_head = cpu_to_le16(dur); txhdr->dur_frag_tail = cpu_to_le16(dur); txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER); if (short_preamble) txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); txhdr->retry_limit = info->control.rates[0].count; adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); return NETDEV_TX_OK; } static int adm8211_alloc_rings(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int ring_size; priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size + sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL); if (!priv->rx_buffers) return -ENOMEM; priv->tx_buffers = (void *)priv->rx_buffers + sizeof(*priv->rx_buffers) * priv->rx_ring_size; /* Allocate TX/RX descriptors */ ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size + sizeof(struct adm8211_desc) * priv->tx_ring_size; priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size, &priv->rx_ring_dma); if (!priv->rx_ring) { kfree(priv->rx_buffers); priv->rx_buffers = NULL; priv->tx_buffers = NULL; return -ENOMEM; } priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring + priv->rx_ring_size); priv->tx_ring_dma = priv->rx_ring_dma + sizeof(struct adm8211_desc) * priv->rx_ring_size; return 0; } static const struct ieee80211_ops adm8211_ops = { .tx = adm8211_tx, .start = adm8211_start, .stop = adm8211_stop, .add_interface = adm8211_add_interface, .remove_interface = adm8211_remove_interface, .config = adm8211_config, .bss_info_changed = adm8211_bss_info_changed, .prepare_multicast = adm8211_prepare_multicast, .configure_filter = adm8211_configure_filter, .get_stats = adm8211_get_stats, .get_tsf = adm8211_get_tsft }; static int __devinit adm8211_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ieee80211_hw *dev; struct adm8211_priv *priv; unsigned long mem_addr, mem_len; unsigned int io_addr, io_len; int err; u32 reg; u8 perm_addr[ETH_ALEN]; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n", pci_name(pdev)); return err; } io_addr = pci_resource_start(pdev, 0); io_len = pci_resource_len(pdev, 0); mem_addr = pci_resource_start(pdev, 1); mem_len = pci_resource_len(pdev, 1); if (io_len < 256 || mem_len < 1024) { printk(KERN_ERR "%s (adm8211): Too short PCI resources\n", pci_name(pdev)); goto err_disable_pdev; } /* check signature */ pci_read_config_dword(pdev, 0x80 /* CR32 */, &reg); if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) { printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n", pci_name(pdev), reg); goto err_disable_pdev; } err = pci_request_regions(pdev, "adm8211"); if (err) { printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n", pci_name(pdev)); return err; /* someone else grabbed it? don't disable it */ } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "%s (adm8211): No suitable DMA available\n", pci_name(pdev)); goto err_free_reg; } pci_set_master(pdev); dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops); if (!dev) { printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; spin_lock_init(&priv->lock); SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = pci_iomap(pdev, 1, mem_len); if (!priv->map) priv->map = pci_iomap(pdev, 0, io_len); if (!priv->map) { printk(KERN_ERR "%s (adm8211): Cannot map device memory\n", pci_name(pdev)); goto err_free_dev; } priv->rx_ring_size = rx_ring_size; priv->tx_ring_size = tx_ring_size; if (adm8211_alloc_rings(dev)) { printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n", pci_name(pdev)); goto err_iounmap; } *(__le32 *)perm_addr = cpu_to_le32(ADM8211_CSR_READ(PAR0)); *(__le16 *)&perm_addr[4] = cpu_to_le16(ADM8211_CSR_READ(PAR1) & 0xFFFF); if (!is_valid_ether_addr(perm_addr)) { printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n", pci_name(pdev)); random_ether_addr(perm_addr); } SET_IEEE80211_PERM_ADDR(dev, perm_addr); dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ dev->flags = IEEE80211_HW_SIGNAL_UNSPEC; dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); dev->channel_change_time = 1000; dev->max_signal = 100; /* FIXME: find better value */ dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ priv->retry_limit = 3; priv->ant_power = 0x40; priv->tx_power = 0x40; priv->lpf_cutoff = 0xFF; priv->lnags_threshold = 0xFF; priv->mode = NL80211_IFTYPE_UNSPECIFIED; /* Power-on issue. EEPROM won't read correctly without */ if (pdev->revision >= ADM8211_REV_BA) { ADM8211_CSR_WRITE(FRCTL, 0); ADM8211_CSR_READ(FRCTL); ADM8211_CSR_WRITE(FRCTL, 1); ADM8211_CSR_READ(FRCTL); msleep(100); } err = adm8211_read_eeprom(dev); if (err) { printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n", pci_name(pdev)); goto err_free_desc; } priv->channel = 1; dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; err = ieee80211_register_hw(dev); if (err) { printk(KERN_ERR "%s (adm8211): Cannot register device\n", pci_name(pdev)); goto err_free_desc; } printk(KERN_INFO "%s: hwaddr %pM, Rev 0x%02x\n", wiphy_name(dev->wiphy), dev->wiphy->perm_addr, pdev->revision); return 0; err_free_desc: pci_free_consistent(pdev, sizeof(struct adm8211_desc) * priv->rx_ring_size + sizeof(struct adm8211_desc) * priv->tx_ring_size, priv->rx_ring, priv->rx_ring_dma); kfree(priv->rx_buffers); err_iounmap: pci_iounmap(pdev, priv->map); err_free_dev: pci_set_drvdata(pdev, NULL); ieee80211_free_hw(dev); err_free_reg: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); return err; } static void __devexit adm8211_remove(struct pci_dev *pdev) { struct ieee80211_hw *dev = pci_get_drvdata(pdev); struct adm8211_priv *priv; if (!dev) return; ieee80211_unregister_hw(dev); priv = dev->priv; pci_free_consistent(pdev, sizeof(struct adm8211_desc) * priv->rx_ring_size + sizeof(struct adm8211_desc) * priv->tx_ring_size, priv->rx_ring, priv->rx_ring_dma); kfree(priv->rx_buffers); kfree(priv->eeprom); pci_iounmap(pdev, priv->map); pci_release_regions(pdev); pci_disable_device(pdev); ieee80211_free_hw(dev); } #ifdef CONFIG_PM static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state) { pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int adm8211_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return 0; } #endif /* CONFIG_PM */ MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table); /* TODO: implement enable_wake */ static struct pci_driver adm8211_driver = { .name = "adm8211", .id_table = adm8211_pci_id_table, .probe = adm8211_probe, .remove = __devexit_p(adm8211_remove), #ifdef CONFIG_PM .suspend = adm8211_suspend, .resume = adm8211_resume, #endif /* CONFIG_PM */ }; static int __init adm8211_init(void) { return pci_register_driver(&adm8211_driver); } static void __exit adm8211_exit(void) { pci_unregister_driver(&adm8211_driver); } module_init(adm8211_init); module_exit(adm8211_exit);
gpl-2.0
LorDClockaN/DesireC-eclair
arch/sh/math-emu/math.c
757
13215
/* * arch/sh/math-emu/math.c * * Copyright (C) 2006 Takashi YOSHII <takasi-y@ops.dti.ne.jp> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/signal.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <asm/io.h> #include "sfp-util.h" #include <math-emu/soft-fp.h> #include <math-emu/single.h> #include <math-emu/double.h> #define FPUL (fregs->fpul) #define FPSCR (fregs->fpscr) #define FPSCR_RM (FPSCR&3) #define FPSCR_DN ((FPSCR>>18)&1) #define FPSCR_PR ((FPSCR>>19)&1) #define FPSCR_SZ ((FPSCR>>20)&1) #define FPSCR_FR ((FPSCR>>21)&1) #define FPSCR_MASK 0x003fffffUL #define BANK(n) (n^(FPSCR_FR?16:0)) #define FR ((unsigned long*)(fregs->fp_regs)) #define FR0 (FR[BANK(0)]) #define FRn (FR[BANK(n)]) #define FRm (FR[BANK(m)]) #define DR ((unsigned long long*)(fregs->fp_regs)) #define DRn (DR[BANK(n)/2]) #define DRm (DR[BANK(m)/2]) #define XREG(n) (n^16) #define XFn (FR[BANK(XREG(n))]) #define XFm (FR[BANK(XREG(m))]) #define XDn (DR[BANK(XREG(n))/2]) #define XDm (DR[BANK(XREG(m))/2]) #define R0 (regs->regs[0]) #define Rn (regs->regs[n]) #define Rm (regs->regs[m]) #define WRITE(d,a) ({if(put_user(d, (typeof (d)*)a)) return -EFAULT;}) #define READ(d,a) ({if(get_user(d, (typeof (d)*)a)) return -EFAULT;}) #define PACK_S(r,f) FP_PACK_SP(&r,f) #define UNPACK_S(f,r) FP_UNPACK_SP(f,&r) #define PACK_D(r,f) \ {u32 t[2]; FP_PACK_DP(t,f); ((u32*)&r)[0]=t[1]; ((u32*)&r)[1]=t[0];} #define UNPACK_D(f,r) \ {u32 t[2]; t[0]=((u32*)&r)[1]; t[1]=((u32*)&r)[0]; FP_UNPACK_DP(f,t);} // 2 args instructions. #define BOTH_PRmn(op,x) \ FP_DECL_EX; if(FPSCR_PR) op(D,x,DRm,DRn); else op(S,x,FRm,FRn); #define CMP_X(SZ,R,M,N) do{ \ FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \ UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \ FP_CMP_##SZ(R, Fn, Fm, 2); }while(0) #define EQ_X(SZ,R,M,N) do{ \ FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \ UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \ FP_CMP_EQ_##SZ(R, Fn, Fm); }while(0) #define CMP(OP) ({ int r; BOTH_PRmn(OP##_X,r); r; }) static int fcmp_gt(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (CMP(CMP) > 0) regs->sr |= 1; else regs->sr &= ~1; return 0; } static int fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (CMP(CMP /*EQ*/) == 0) regs->sr |= 1; else regs->sr &= ~1; return 0; } #define ARITH_X(SZ,OP,M,N) do{ \ FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); FP_DECL_##SZ(Fr); \ UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \ FP_##OP##_##SZ(Fr, Fn, Fm); \ PACK_##SZ(N, Fr); }while(0) static int fadd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, ADD); return 0; } static int fsub(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, SUB); return 0; } static int fmul(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, MUL); return 0; } static int fdiv(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, DIV); return 0; } static int fmac(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { FP_DECL_EX; FP_DECL_S(Fr); FP_DECL_S(Ft); FP_DECL_S(F0); FP_DECL_S(Fm); FP_DECL_S(Fn); UNPACK_S(F0, FR0); UNPACK_S(Fm, FRm); UNPACK_S(Fn, FRn); FP_MUL_S(Ft, Fm, F0); FP_ADD_S(Fr, Fn, Ft); PACK_S(FRn, Fr); return 0; } // to process fmov's extension (odd n for DR access XD). #define FMOV_EXT(x) if(x&1) x+=16-1 static int fmov_idx_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(n); READ(FRn, Rm + R0 + 4); n++; READ(FRn, Rm + R0); } else { READ(FRn, Rm + R0); } return 0; } static int fmov_mem_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(n); READ(FRn, Rm + 4); n++; READ(FRn, Rm); } else { READ(FRn, Rm); } return 0; } static int fmov_inc_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(n); READ(FRn, Rm + 4); n++; READ(FRn, Rm); Rm += 8; } else { READ(FRn, Rm); Rm += 4; } return 0; } static int fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); WRITE(FRm, Rn + R0 + 4); m++; WRITE(FRm, Rn + R0); } else { WRITE(FRm, Rn + R0); } return 0; } static int fmov_reg_mem(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); WRITE(FRm, Rn + 4); m++; WRITE(FRm, Rn); } else { WRITE(FRm, Rn); } return 0; } static int fmov_reg_dec(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); Rn -= 8; WRITE(FRm, Rn + 4); m++; WRITE(FRm, Rn); } else { Rn -= 4; WRITE(FRm, Rn); } return 0; } static int fmov_reg_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); FMOV_EXT(n); DRn = DRm; } else { FRn = FRm; } return 0; } static int fnop_mn(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { return -EINVAL; } // 1 arg instructions. #define NOTYETn(i) static int i(struct sh_fpu_soft_struct *fregs, int n) \ { printk( #i " not yet done.\n"); return 0; } NOTYETn(ftrv) NOTYETn(fsqrt) NOTYETn(fipr) NOTYETn(fsca) NOTYETn(fsrra) #define EMU_FLOAT_X(SZ,N) do { \ FP_DECL_##SZ(Fn); \ FP_FROM_INT_##SZ(Fn, FPUL, 32, int); \ PACK_##SZ(N, Fn); }while(0) static int ffloat(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; if (FPSCR_PR) EMU_FLOAT_X(D, DRn); else EMU_FLOAT_X(S, FRn); return 0; } #define EMU_FTRC_X(SZ,N) do { \ FP_DECL_##SZ(Fn); \ UNPACK_##SZ(Fn, N); \ FP_TO_INT_##SZ(FPUL, Fn, 32, 1); }while(0) static int ftrc(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; if (FPSCR_PR) EMU_FTRC_X(D, DRn); else EMU_FTRC_X(S, FRn); return 0; } static int fcnvsd(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; FP_DECL_S(Fn); FP_DECL_D(Fr); UNPACK_S(Fn, FPUL); FP_CONV(D, S, 2, 1, Fr, Fn); PACK_D(DRn, Fr); return 0; } static int fcnvds(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; FP_DECL_D(Fn); FP_DECL_S(Fr); UNPACK_D(Fn, DRn); FP_CONV(S, D, 1, 2, Fr, Fn); PACK_S(FPUL, Fr); return 0; } static int fxchg(struct sh_fpu_soft_struct *fregs, int flag) { FPSCR ^= flag; return 0; } static int fsts(struct sh_fpu_soft_struct *fregs, int n) { FRn = FPUL; return 0; } static int flds(struct sh_fpu_soft_struct *fregs, int n) { FPUL = FRn; return 0; } static int fneg(struct sh_fpu_soft_struct *fregs, int n) { FRn ^= (1 << (_FP_W_TYPE_SIZE - 1)); return 0; } static int fabs(struct sh_fpu_soft_struct *fregs, int n) { FRn &= ~(1 << (_FP_W_TYPE_SIZE - 1)); return 0; } static int fld0(struct sh_fpu_soft_struct *fregs, int n) { FRn = 0; return 0; } static int fld1(struct sh_fpu_soft_struct *fregs, int n) { FRn = (_FP_EXPBIAS_S << (_FP_FRACBITS_S - 1)); return 0; } static int fnop_n(struct sh_fpu_soft_struct *fregs, int n) { return -EINVAL; } /// Instruction decoders. static int id_fxfd(struct sh_fpu_soft_struct *, int); static int id_fnxd(struct sh_fpu_soft_struct *, struct pt_regs *, int, int); static int (*fnxd[])(struct sh_fpu_soft_struct *, int) = { fsts, flds, ffloat, ftrc, fneg, fabs, fsqrt, fsrra, fld0, fld1, fcnvsd, fcnvds, fnop_n, fnop_n, fipr, id_fxfd }; static int (*fnmx[])(struct sh_fpu_soft_struct *, struct pt_regs *, int, int) = { fadd, fsub, fmul, fdiv, fcmp_eq, fcmp_gt, fmov_idx_reg, fmov_reg_idx, fmov_mem_reg, fmov_inc_reg, fmov_reg_mem, fmov_reg_dec, fmov_reg_reg, id_fnxd, fmac, fnop_mn}; static int id_fxfd(struct sh_fpu_soft_struct *fregs, int x) { const int flag[] = { FPSCR_SZ, FPSCR_PR, FPSCR_FR, 0 }; switch (x & 3) { case 3: fxchg(fregs, flag[x >> 2]); break; case 1: ftrv(fregs, x - 1); break; default: fsca(fregs, x); } return 0; } static int id_fnxd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int x, int n) { return (fnxd[x])(fregs, n); } static int id_fnmx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code) { int n = (code >> 8) & 0xf, m = (code >> 4) & 0xf, x = code & 0xf; return (fnmx[x])(fregs, regs, m, n); } static int id_sys(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code) { int n = ((code >> 8) & 0xf); unsigned long *reg = (code & 0x0010) ? &FPUL : &FPSCR; switch (code & 0xf0ff) { case 0x005a: case 0x006a: Rn = *reg; break; case 0x405a: case 0x406a: *reg = Rn; break; case 0x4052: case 0x4062: Rn -= 4; WRITE(*reg, Rn); break; case 0x4056: case 0x4066: READ(*reg, Rn); Rn += 4; break; default: return -EINVAL; } return 0; } static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_regs *regs) { if ((code & 0xf000) == 0xf000) return id_fnmx(fregs, regs, code); else return id_sys(fregs, regs, code); } /** * denormal_to_double - Given denormalized float number, * store double float * * @fpu: Pointer to sh_fpu_hard structure * @n: Index to FP register */ static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n) { unsigned long du, dl; unsigned long x = fpu->fpul; int exp = 1023 - 126; if (x != 0 && (x & 0x7f800000) == 0) { du = (x & 0x80000000); while ((x & 0x00800000) == 0) { x <<= 1; exp--; } x &= 0x007fffff; du |= (exp << 20) | (x >> 3); dl = x << 29; fpu->fp_regs[n] = du; fpu->fp_regs[n+1] = dl; } } /** * ieee_fpe_handler - Handle denormalized number exception * * @regs: Pointer to register structure * * Returns 1 when it's handled (should not cause exception). */ static int ieee_fpe_handler(struct pt_regs *regs) { unsigned short insn = *(unsigned short *)regs->pc; unsigned short finsn; unsigned long nextpc; siginfo_t info; int nib[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf}; if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */ regs->pr = regs->pc + 4; if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */ nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3); finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */ if (regs->sr & 1) nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); else nextpc = regs->pc + 4; finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */ if (regs->sr & 1) nextpc = regs->pc + 4; else nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x4 && nib[3] == 0xb && (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */ nextpc = regs->regs[nib[1]]; finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x0 && nib[3] == 0x3 && (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */ nextpc = regs->pc + 4 + regs->regs[nib[1]]; finsn = *(unsigned short *) (regs->pc + 2); } else if (insn == 0x000b) { /* rts */ nextpc = regs->pr; finsn = *(unsigned short *) (regs->pc + 2); } else { nextpc = regs->pc + 2; finsn = insn; } if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ struct task_struct *tsk = current; if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) { /* FPU error */ denormal_to_double (&tsk->thread.fpu.hard, (finsn >> 8) & 0xf); tsk->thread.fpu.hard.fpscr &= ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); set_tsk_thread_flag(tsk, TIF_USEDFPU); } else { info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = FPE_FLTINV; info.si_addr = (void __user *)regs->pc; force_sig_info(SIGFPE, &info, tsk); } regs->pc = nextpc; return 1; } return 0; } asmlinkage void do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs regs) { struct task_struct *tsk = current; siginfo_t info; if (ieee_fpe_handler (&regs)) return; regs.pc += 2; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = FPE_FLTINV; info.si_addr = (void __user *)regs.pc; force_sig_info(SIGFPE, &info, tsk); } /** * fpu_init - Initialize FPU registers * @fpu: Pointer to software emulated FPU registers. */ static void fpu_init(struct sh_fpu_soft_struct *fpu) { int i; fpu->fpscr = FPSCR_INIT; fpu->fpul = 0; for (i = 0; i < 16; i++) { fpu->fp_regs[i] = 0; fpu->xfp_regs[i]= 0; } } /** * do_fpu_inst - Handle reserved instructions for FPU emulation * @inst: instruction code. * @regs: registers on stack. */ int do_fpu_inst(unsigned short inst, struct pt_regs *regs) { struct task_struct *tsk = current; struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft); if (!test_tsk_thread_flag(tsk, TIF_USEDFPU)) { /* initialize once. */ fpu_init(fpu); set_tsk_thread_flag(tsk, TIF_USEDFPU); } return fpu_emulate(inst, fpu, regs); }
gpl-2.0
buglabs/bug20-2.6.35-linaro
drivers/ata/pata_cs5520.c
1013
8367
/* * IDE tuning and bus mastering support for the CS5510/CS5520 * chipsets * * The CS5510/CS5520 are slightly unusual devices. Unlike the * typical IDE controllers they do bus mastering with the drive in * PIO mode and smarter silicon. * * The practical upshot of this is that we must always tune the * drive for the right PIO mode. We must also ignore all the blacklists * and the drive bus mastering DMA information. Also to confuse matters * further we can do DMA on PIO only drives. * * DMA on the 5510 also requires we disable_hlt() during DMA on early * revisions. * * *** This driver is strictly experimental *** * * (c) Copyright Red Hat Inc 2002 * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Documentation: * Not publically available. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_cs5520" #define DRV_VERSION "0.6.6" struct pio_clocks { int address; int assert; int recovery; }; static const struct pio_clocks cs5520_pio_clocks[]={ {3, 6, 11}, {2, 5, 6}, {1, 4, 3}, {1, 3, 2}, {1, 2, 1} }; /** * cs5520_set_timings - program PIO timings * @ap: ATA port * @adev: ATA device * * Program the PIO mode timings for the controller according to the pio * clocking table. */ static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int slave = adev->devno; pio -= XFER_PIO_0; /* Channel command timing */ pci_write_config_byte(pdev, 0x62 + ap->port_no, (cs5520_pio_clocks[pio].recovery << 4) | (cs5520_pio_clocks[pio].assert)); /* FIXME: should these use address ? */ /* Read command timing */ pci_write_config_byte(pdev, 0x64 + 4*ap->port_no + slave, (cs5520_pio_clocks[pio].recovery << 4) | (cs5520_pio_clocks[pio].assert)); /* Write command timing */ pci_write_config_byte(pdev, 0x66 + 4*ap->port_no + slave, (cs5520_pio_clocks[pio].recovery << 4) | (cs5520_pio_clocks[pio].assert)); } /** * cs5520_set_piomode - program PIO timings * @ap: ATA port * @adev: ATA device * * Program the PIO mode timings for the controller according to the pio * clocking table. */ static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev) { cs5520_set_timings(ap, adev, adev->pio_mode); } static struct scsi_host_template cs5520_sht = { ATA_BMDMA_SHT(DRV_NAME), .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations cs5520_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .cable_detect = ata_cable_40wire, .set_piomode = cs5520_set_piomode, }; static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const unsigned int cmd_port[] = { 0x1F0, 0x170 }; static const unsigned int ctl_port[] = { 0x3F6, 0x376 }; struct ata_port_info pi = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &cs5520_port_ops, }; const struct ata_port_info *ppi[2]; u8 pcicfg; void __iomem *iomap[5]; struct ata_host *host; struct ata_ioports *ioaddr; int i, rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* IDE port enable bits */ pci_read_config_byte(pdev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; ppi[0] = ppi[1] = &ata_dummy_port_info; if (pcicfg & 1) ppi[0] = &pi; if (pcicfg & 2) ppi[1] = &pi; if ((pcicfg & 0x40) == 0) { dev_printk(KERN_WARNING, &pdev->dev, "DMA mode disabled. Enabling.\n"); pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); } pi.mwdma_mask = id->driver_data; host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; /* Perform set up for DMA */ if (pci_enable_device_io(pdev)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } /* Map IO ports and initialize host accordingly */ iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8); iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1); iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8); iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1); iomap[4] = pcim_iomap(pdev, 2, 0); if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) return -ENOMEM; ioaddr = &host->ports[0]->ioaddr; ioaddr->cmd_addr = iomap[0]; ioaddr->ctl_addr = iomap[1]; ioaddr->altstatus_addr = iomap[1]; ioaddr->bmdma_addr = iomap[4]; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[0], "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); ioaddr = &host->ports[1]->ioaddr; ioaddr->cmd_addr = iomap[2]; ioaddr->ctl_addr = iomap[3]; ioaddr->altstatus_addr = iomap[3]; ioaddr->bmdma_addr = iomap[4] + 8; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[1], "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); /* activate the host */ pci_set_master(pdev); rc = ata_host_start(host); if (rc) return rc; for (i = 0; i < 2; i++) { static const int irq[] = { 14, 15 }; struct ata_port *ap = host->ports[i]; if (ata_port_is_dummy(ap)) continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], ata_bmdma_interrupt, 0, DRV_NAME, host); if (rc) return rc; ata_port_desc(ap, "irq %d", irq[i]); } return ata_host_register(host, &cs5520_sht); } #ifdef CONFIG_PM /** * cs5520_reinit_one - device resume * @pdev: PCI device * * Do any reconfiguration work needed by a resume from RAM. We need * to restore DMA mode support on BIOSen which disabled it */ static int cs5520_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); u8 pcicfg; int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; pci_read_config_byte(pdev, 0x60, &pcicfg); if ((pcicfg & 0x40) == 0) pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); ata_host_resume(host); return 0; } /** * cs5520_pci_device_suspend - device suspend * @pdev: PCI device * * We have to cut and waste bits from the standard method because * the 5520 is a bit odd and not just a pure ATA device. As a result * we must not disable it. The needed code is short and this avoids * chip specific mess in the core code. */ static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc = 0; rc = ata_host_suspend(host, mesg); if (rc) return rc; pci_save_state(pdev); return 0; } #endif /* CONFIG_PM */ /* For now keep DMA off. We can set it for all but A rev CS5510 once the core ATA code can handle it */ static const struct pci_device_id pata_cs5520[] = { { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, { }, }; static struct pci_driver cs5520_pci_driver = { .name = DRV_NAME, .id_table = pata_cs5520, .probe = cs5520_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = cs5520_pci_device_suspend, .resume = cs5520_reinit_one, #endif }; static int __init cs5520_init(void) { return pci_register_driver(&cs5520_pci_driver); } static void __exit cs5520_exit(void) { pci_unregister_driver(&cs5520_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pata_cs5520); MODULE_VERSION(DRV_VERSION); module_init(cs5520_init); module_exit(cs5520_exit);
gpl-2.0
cfriedt/bluetooth-next
drivers/media/pci/cx18/cx18-gpio.c
1781
9373
/* * cx18 gpio functions * * Derived from ivtv-gpio.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" #include "cx18-gpio.h" #include "tuner-xc2028.h" /********************* GPIO stuffs *********************/ /* GPIO registers */ #define CX18_REG_GPIO_IN 0xc72010 #define CX18_REG_GPIO_OUT1 0xc78100 #define CX18_REG_GPIO_DIR1 0xc78108 #define CX18_REG_GPIO_OUT2 0xc78104 #define CX18_REG_GPIO_DIR2 0xc7810c /* * HVR-1600 GPIO pins, courtesy of Hauppauge: * * gpio0: zilog ir process reset pin * gpio1: zilog programming pin (you should never use this) * gpio12: cx24227 reset pin * gpio13: cs5345 reset pin */ /* * File scope utility functions */ static void gpio_write(struct cx18 *cx) { u32 dir_lo = cx->gpio_dir & 0xffff; u32 val_lo = cx->gpio_val & 0xffff; u32 dir_hi = cx->gpio_dir >> 16; u32 val_hi = cx->gpio_val >> 16; cx18_write_reg_expect(cx, dir_lo << 16, CX18_REG_GPIO_DIR1, ~dir_lo, dir_lo); cx18_write_reg_expect(cx, (dir_lo << 16) | val_lo, CX18_REG_GPIO_OUT1, val_lo, dir_lo); cx18_write_reg_expect(cx, dir_hi << 16, CX18_REG_GPIO_DIR2, ~dir_hi, dir_hi); cx18_write_reg_expect(cx, (dir_hi << 16) | val_hi, CX18_REG_GPIO_OUT2, val_hi, dir_hi); } static void gpio_update(struct cx18 *cx, u32 mask, u32 data) { if (mask == 0) return; mutex_lock(&cx->gpio_lock); cx->gpio_val = (cx->gpio_val & ~mask) | (data & mask); gpio_write(cx); mutex_unlock(&cx->gpio_lock); } static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi, unsigned int assert_msecs, unsigned int recovery_msecs) { u32 mask; mask = active_lo | active_hi; if (mask == 0) return; /* * Assuming that active_hi and active_lo are a subsets of the bits in * gpio_dir. Also assumes that active_lo and active_hi don't overlap * in any bit position */ /* Assert */ gpio_update(cx, mask, ~active_lo); schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs)); /* Deassert */ gpio_update(cx, mask, ~active_hi); schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs)); } /* * GPIO Multiplexer - logical device */ static int gpiomux_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); mutex_lock(&cx->gpio_lock); CX18_INFO_DEV(sd, "GPIO: direction 0x%08x, value 0x%08x\n", cx->gpio_dir, cx->gpio_val); mutex_unlock(&cx->gpio_lock); return 0; } static int gpiomux_s_radio(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); /* * FIXME - work out the cx->active/audio_input mess - this is * intended to handle the switch to radio mode and set the * audio routing, but we need to update the state in cx */ gpio_update(cx, cx->card->gpio_audio_input.mask, cx->card->gpio_audio_input.radio); return 0; } static int gpiomux_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct cx18 *cx = v4l2_get_subdevdata(sd); u32 data; switch (cx->card->audio_inputs[cx->audio_input].muxer_input) { case 1: data = cx->card->gpio_audio_input.linein; break; case 0: data = cx->card->gpio_audio_input.tuner; break; default: /* * FIXME - work out the cx->active/audio_input mess - this is * intended to handle the switch from radio mode and set the * audio routing, but we need to update the state in cx */ data = cx->card->gpio_audio_input.tuner; break; } gpio_update(cx, cx->card->gpio_audio_input.mask, data); return 0; } static int gpiomux_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18 *cx = v4l2_get_subdevdata(sd); u32 data; switch (input) { case 0: data = cx->card->gpio_audio_input.tuner; break; case 1: data = cx->card->gpio_audio_input.linein; break; case 2: data = cx->card->gpio_audio_input.radio; break; default: return -EINVAL; } gpio_update(cx, cx->card->gpio_audio_input.mask, data); return 0; } static const struct v4l2_subdev_core_ops gpiomux_core_ops = { .log_status = gpiomux_log_status, }; static const struct v4l2_subdev_tuner_ops gpiomux_tuner_ops = { .s_radio = gpiomux_s_radio, }; static const struct v4l2_subdev_audio_ops gpiomux_audio_ops = { .s_routing = gpiomux_s_audio_routing, }; static const struct v4l2_subdev_video_ops gpiomux_video_ops = { .s_std = gpiomux_s_std, }; static const struct v4l2_subdev_ops gpiomux_ops = { .core = &gpiomux_core_ops, .tuner = &gpiomux_tuner_ops, .audio = &gpiomux_audio_ops, .video = &gpiomux_video_ops, }; /* * GPIO Reset Controller - logical device */ static int resetctrl_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); mutex_lock(&cx->gpio_lock); CX18_INFO_DEV(sd, "GPIO: direction 0x%08x, value 0x%08x\n", cx->gpio_dir, cx->gpio_val); mutex_unlock(&cx->gpio_lock); return 0; } static int resetctrl_reset(struct v4l2_subdev *sd, u32 val) { struct cx18 *cx = v4l2_get_subdevdata(sd); const struct cx18_gpio_i2c_slave_reset *p; p = &cx->card->gpio_i2c_slave_reset; switch (val) { case CX18_GPIO_RESET_I2C: gpio_reset_seq(cx, p->active_lo_mask, p->active_hi_mask, p->msecs_asserted, p->msecs_recovery); break; case CX18_GPIO_RESET_Z8F0811: /* * Assert timing for the Z8F0811 on HVR-1600 boards: * 1. Assert RESET for min of 4 clock cycles at 18.432 MHz to * initiate * 2. Reset then takes 66 WDT cycles at 10 kHz + 16 xtal clock * cycles (6,601,085 nanoseconds ~= 7 milliseconds) * 3. DBG pin must be high before chip exits reset for normal * operation. DBG is open drain and hopefully pulled high * since we don't normally drive it (GPIO 1?) for the * HVR-1600 * 4. Z8F0811 won't exit reset until RESET is deasserted * 5. Zilog comes out of reset, loads reset vector address and * executes from there. Required recovery delay unknown. */ gpio_reset_seq(cx, p->ir_reset_mask, 0, p->msecs_asserted, p->msecs_recovery); break; case CX18_GPIO_RESET_XC2028: if (cx->card->tuners[0].tuner == TUNER_XC2028) gpio_reset_seq(cx, (1 << cx->card->xceive_pin), 0, 1, 1); break; } return 0; } static const struct v4l2_subdev_core_ops resetctrl_core_ops = { .log_status = resetctrl_log_status, .reset = resetctrl_reset, }; static const struct v4l2_subdev_ops resetctrl_ops = { .core = &resetctrl_core_ops, }; /* * External entry points */ void cx18_gpio_init(struct cx18 *cx) { mutex_lock(&cx->gpio_lock); cx->gpio_dir = cx->card->gpio_init.direction; cx->gpio_val = cx->card->gpio_init.initial_value; if (cx->card->tuners[0].tuner == TUNER_XC2028) { cx->gpio_dir |= 1 << cx->card->xceive_pin; cx->gpio_val |= 1 << cx->card->xceive_pin; } if (cx->gpio_dir == 0) { mutex_unlock(&cx->gpio_lock); return; } CX18_DEBUG_INFO("GPIO initial dir: %08x/%08x out: %08x/%08x\n", cx18_read_reg(cx, CX18_REG_GPIO_DIR1), cx18_read_reg(cx, CX18_REG_GPIO_DIR2), cx18_read_reg(cx, CX18_REG_GPIO_OUT1), cx18_read_reg(cx, CX18_REG_GPIO_OUT2)); gpio_write(cx); mutex_unlock(&cx->gpio_lock); } int cx18_gpio_register(struct cx18 *cx, u32 hw) { struct v4l2_subdev *sd; const struct v4l2_subdev_ops *ops; char *str; switch (hw) { case CX18_HW_GPIO_MUX: sd = &cx->sd_gpiomux; ops = &gpiomux_ops; str = "gpio-mux"; break; case CX18_HW_GPIO_RESET_CTRL: sd = &cx->sd_resetctrl; ops = &resetctrl_ops; str = "gpio-reset-ctrl"; break; default: return -EINVAL; } v4l2_subdev_init(sd, ops); v4l2_set_subdevdata(sd, cx); snprintf(sd->name, sizeof(sd->name), "%s %s", cx->v4l2_dev.name, str); sd->grp_id = hw; return v4l2_device_register_subdev(&cx->v4l2_dev, sd); } void cx18_reset_ir_gpio(void *data) { struct cx18 *cx = to_cx18((struct v4l2_device *)data); if (cx->card->gpio_i2c_slave_reset.ir_reset_mask == 0) return; CX18_DEBUG_INFO("Resetting IR microcontroller\n"); v4l2_subdev_call(&cx->sd_resetctrl, core, reset, CX18_GPIO_RESET_Z8F0811); } EXPORT_SYMBOL(cx18_reset_ir_gpio); /* This symbol is exported for use by lirc_pvr150 for the IR-blaster */ /* Xceive tuner reset function */ int cx18_reset_tuner_gpio(void *dev, int component, int cmd, int value) { struct i2c_algo_bit_data *algo = dev; struct cx18_i2c_algo_callback_data *cb_data = algo->data; struct cx18 *cx = cb_data->cx; if (cmd != XC2028_TUNER_RESET || cx->card->tuners[0].tuner != TUNER_XC2028) return 0; CX18_DEBUG_INFO("Resetting XCeive tuner\n"); return v4l2_subdev_call(&cx->sd_resetctrl, core, reset, CX18_GPIO_RESET_XC2028); }
gpl-2.0
TripNRaVeR/tripndroid-endeavoru-3.1.10
drivers/char/mem.c
2037
20615
/* * linux/drivers/char/mem.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Added devfs support. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> */ #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/random.h> #include <linux/init.h> #include <linux/raw.h> #include <linux/tty.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/device.h> #include <linux/highmem.h> #include <linux/crash_dump.h> #include <linux/backing-dev.h> #include <linux/bootmem.h> #include <linux/splice.h> #include <linux/pfn.h> #include <asm/uaccess.h> #include <asm/io.h> #ifdef CONFIG_IA64 # include <linux/efi.h> #endif static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { unsigned long sz; sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); return min(sz, size); } #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE static inline int valid_phys_addr_range(unsigned long addr, size_t count) { return addr + count <= __pa(high_memory); } static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; } #endif #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) #ifdef CONFIG_STRICT_DEVMEM static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; while (cursor < to) { if (!devmem_is_allowed(pfn)) { printk(KERN_INFO "Program %s tried to access /dev/mem between %Lx->%Lx.\n", current->comm, from, to); return 0; } cursor += PAGE_SIZE; pfn++; } return 1; } #else static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #endif #endif #ifdef CONFIG_DEVMEM void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) { } /* * This funcion reads the *physical* memory. The f_pos points directly to the * memory location. */ static ssize_t read_mem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t read, sz; char *ptr; if (!valid_phys_addr_range(p, count)) return -EFAULT; read = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); if (sz > 0) { if (clear_user(buf, sz)) return -EFAULT; buf += sz; p += sz; count -= sz; read += sz; } } #endif while (count > 0) { unsigned long remaining; sz = size_inside_page(p, count); if (!range_is_allowed(p >> PAGE_SHIFT, count)) return -EPERM; /* * On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) return -EFAULT; remaining = copy_to_user(buf, ptr, sz); unxlate_dev_mem_ptr(p, ptr); if (remaining) return -EFAULT; buf += sz; p += sz; count -= sz; read += sz; } *ppos += read; return read; } static ssize_t write_mem(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t written, sz; unsigned long copied; void *ptr; if (!valid_phys_addr_range(p, count)) return -EFAULT; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { sz = size_inside_page(p, count); if (!range_is_allowed(p >> PAGE_SHIFT, sz)) return -EPERM; /* * On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) { if (written) break; return -EFAULT; } copied = copy_from_user(ptr, buf, sz); unxlate_dev_mem_ptr(p, ptr); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } buf += sz; p += sz; count -= sz; written += sz; } *ppos += written; return written; } #endif /* CONFIG_DEVMEM */ #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) int __weak phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { return 1; } #ifndef __HAVE_PHYS_MEM_ACCESS_PROT /* * Architectures vary in how they handle caching for addresses * outside of main memory. * */ #ifdef pgprot_noncached static int uncached_access(struct file *file, unsigned long addr) { #if defined(CONFIG_IA64) /* * On ia64, we ignore O_DSYNC because we cannot tolerate memory * attribute aliases. */ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); #elif defined(CONFIG_MIPS) { extern int __uncached_access(struct file *file, unsigned long addr); return __uncached_access(file, addr); } #else /* * Accessing memory above the top the kernel knows about or through a * file pointer * that was marked O_DSYNC will be done non-cached. */ if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); #endif } #endif static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { #ifdef pgprot_noncached unsigned long offset = pfn << PAGE_SHIFT; if (uncached_access(file, offset)) return pgprot_noncached(vma_prot); #endif return vma_prot; } #endif #ifndef CONFIG_MMU static unsigned long get_unmapped_area_mem(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (!valid_mmap_phys_addr_range(pgoff, len)) return (unsigned long) -EINVAL; return pgoff << PAGE_SHIFT; } /* can't do an in-place private mapping if there's no MMU */ static inline int private_mapping_ok(struct vm_area_struct *vma) { return vma->vm_flags & VM_MAYSHARE; } #else #define get_unmapped_area_mem NULL static inline int private_mapping_ok(struct vm_area_struct *vma) { return 1; } #endif static const struct vm_operations_struct mmap_mem_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys #endif }; static int mmap_mem(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) return -EINVAL; if (!private_mapping_ok(vma)) return -ENOSYS; if (!range_is_allowed(vma->vm_pgoff, size)) return -EPERM; if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, &vma->vm_page_prot)) return -EINVAL; vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); vma->vm_ops = &mmap_mem_ops; /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, vma->vm_page_prot)) { return -EAGAIN; } return 0; } #endif /* CONFIG_DEVMEM */ #ifdef CONFIG_DEVKMEM static int mmap_kmem(struct file *file, struct vm_area_struct *vma) { unsigned long pfn; /* Turn a kernel-virtual address into a physical page frame */ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; /* * RED-PEN: on some architectures there is more mapped memory than * available in mem_map which pfn_valid checks for. Perhaps should add a * new macro here. * * RED-PEN: vmalloc is not supported right now. */ if (!pfn_valid(pfn)) return -EIO; vma->vm_pgoff = pfn; return mmap_mem(file, vma); } #endif #ifdef CONFIG_CRASH_DUMP /* * Read memory corresponding to the old kernel. */ static ssize_t read_oldmem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn, offset; size_t read = 0, csize; int rc = 0; while (count) { pfn = *ppos / PAGE_SIZE; if (pfn > saved_max_pfn) return read; offset = (unsigned long)(*ppos % PAGE_SIZE); if (count > PAGE_SIZE - offset) csize = PAGE_SIZE - offset; else csize = count; rc = copy_oldmem_page(pfn, buf, csize, offset, 1); if (rc < 0) return rc; buf += csize; *ppos += csize; read += csize; count -= csize; } return read; } #endif #ifdef CONFIG_DEVKMEM /* * This function reads the *virtual* memory as seen by the kernel. */ static ssize_t read_kmem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t low_count, read, sz; char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ int err = 0; read = 0; if (p < (unsigned long) high_memory) { low_count = count; if (count > (unsigned long)high_memory - p) low_count = (unsigned long)high_memory - p; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE && low_count > 0) { sz = size_inside_page(p, low_count); if (clear_user(buf, sz)) return -EFAULT; buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } #endif while (low_count > 0) { sz = size_inside_page(p, low_count); /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur */ kbuf = xlate_dev_kmem_ptr((char *)p); if (copy_to_user(buf, kbuf, sz)) return -EFAULT; buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return -ENOMEM; while (count > 0) { sz = size_inside_page(p, count); if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } sz = vread(kbuf, (char *)p, sz); if (!sz) break; if (copy_to_user(buf, kbuf, sz)) { err = -EFAULT; break; } count -= sz; buf += sz; read += sz; p += sz; } free_page((unsigned long)kbuf); } *ppos = p; return read ? read : err; } static ssize_t do_write_kmem(unsigned long p, const char __user *buf, size_t count, loff_t *ppos) { ssize_t written, sz; unsigned long copied; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { sz = size_inside_page(p, count); /* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { char *ptr; sz = size_inside_page(p, count); /* * On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. */ ptr = xlate_dev_kmem_ptr((char *)p); copied = copy_from_user(ptr, buf, sz); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } buf += sz; p += sz; count -= sz; written += sz; } *ppos += written; return written; } /* * This function writes to the *virtual* memory as seen by the kernel. */ static ssize_t write_kmem(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t wrote = 0; ssize_t virtr = 0; char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ int err = 0; if (p < (unsigned long) high_memory) { unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); wrote = do_write_kmem(p, buf, to_write, ppos); if (wrote != to_write) return wrote; p += wrote; buf += wrote; count -= wrote; } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return wrote ? wrote : -ENOMEM; while (count > 0) { unsigned long sz = size_inside_page(p, count); unsigned long n; if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } n = copy_from_user(kbuf, buf, sz); if (n) { err = -EFAULT; break; } vwrite(kbuf, (char *)p, sz); count -= sz; buf += sz; virtr += sz; p += sz; } free_page((unsigned long)kbuf); } *ppos = p; return virtr + wrote ? : err; } #endif #ifdef CONFIG_DEVPORT static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; char __user *tmp = buf; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { if (__put_user(inb(i), tmp) < 0) return -EFAULT; i++; tmp++; } *ppos = i; return tmp-buf; } static ssize_t write_port(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long i = *ppos; const char __user * tmp = buf; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { char c; if (__get_user(c, tmp)) { if (tmp > buf) break; return -EFAULT; } outb(c, i); i++; tmp++; } *ppos = i; return tmp-buf; } #endif static ssize_t read_null(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t write_null(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return count; } static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, struct splice_desc *sd) { return sd->len; } static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); } static ssize_t read_zero(struct file *file, char __user *buf, size_t count, loff_t *ppos) { size_t written; if (!count) return 0; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; written = 0; while (count) { unsigned long unwritten; size_t chunk = count; if (chunk > PAGE_SIZE) chunk = PAGE_SIZE; /* Just for latency reasons */ unwritten = __clear_user(buf, chunk); written += chunk - unwritten; if (unwritten) break; if (signal_pending(current)) return written ? written : -ERESTARTSYS; buf += chunk; count -= chunk; cond_resched(); } return written ? written : -EFAULT; } static int mmap_zero(struct file *file, struct vm_area_struct *vma) { #ifndef CONFIG_MMU return -ENOSYS; #endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return -ENOSPC; } /* * Special lseek() function for /dev/null and /dev/zero. Most notably, you * can fopen() both devices with "a" now. This was previously impossible. * -- SRB. */ static loff_t null_lseek(struct file *file, loff_t offset, int orig) { return file->f_pos = 0; } #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT) /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; mutex_lock(&file->f_path.dentry->d_inode->i_mutex); switch (orig) { case SEEK_CUR: offset += file->f_pos; case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ if ((unsigned long long)offset >= ~0xFFFULL) { ret = -EOVERFLOW; break; } file->f_pos = offset; ret = file->f_pos; force_successful_syscall_return(); break; default: ret = -EINVAL; } mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); return ret; } #endif #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT) static int open_port(struct inode * inode, struct file * filp) { return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } #endif #define zero_lseek null_lseek #define full_lseek null_lseek #define write_zero write_null #define read_full read_zero #define open_mem open_port #define open_kmem open_mem #define open_oldmem open_mem #ifdef CONFIG_DEVMEM static const struct file_operations mem_fops = { .llseek = memory_lseek, .read = read_mem, .write = write_mem, .mmap = mmap_mem, .open = open_mem, .get_unmapped_area = get_unmapped_area_mem, }; #endif #ifdef CONFIG_DEVKMEM static const struct file_operations kmem_fops = { .llseek = memory_lseek, .read = read_kmem, .write = write_kmem, .mmap = mmap_kmem, .open = open_kmem, .get_unmapped_area = get_unmapped_area_mem, }; #endif static const struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, .write = write_null, .splice_write = splice_write_null, }; #ifdef CONFIG_DEVPORT static const struct file_operations port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; #endif static const struct file_operations zero_fops = { .llseek = zero_lseek, .read = read_zero, .write = write_zero, .mmap = mmap_zero, }; /* * capabilities for /dev/zero * - permits private mappings, "copies" are taken of the source of zeros * - no writeback happens */ static struct backing_dev_info zero_bdi = { .name = "char/mem", .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK, }; static const struct file_operations full_fops = { .llseek = full_lseek, .read = read_full, .write = write_full, }; #ifdef CONFIG_CRASH_DUMP static const struct file_operations oldmem_fops = { .read = read_oldmem, .open = open_oldmem, .llseek = default_llseek, }; #endif static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv, unsigned long count, loff_t pos) { char *line, *p; int i; ssize_t ret = -EFAULT; size_t len = iov_length(iv, count); line = kmalloc(len + 1, GFP_KERNEL); if (line == NULL) return -ENOMEM; /* * copy all vectors into a single string, to ensure we do * not interleave our log line with other printk calls */ p = line; for (i = 0; i < count; i++) { if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len)) goto out; p += iv[i].iov_len; } p[0] = '\0'; ret = printk("%s", line); /* printk can add a prefix */ if (ret > len) ret = len; out: kfree(line); return ret; } static const struct file_operations kmsg_fops = { .aio_write = kmsg_writev, .llseek = noop_llseek, }; static const struct memdev { const char *name; mode_t mode; const struct file_operations *fops; struct backing_dev_info *dev_info; } devlist[] = { #ifdef CONFIG_DEVMEM [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, #endif #ifdef CONFIG_DEVKMEM [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, #endif [3] = { "null", 0666, &null_fops, NULL }, #ifdef CONFIG_DEVPORT [4] = { "port", 0, &port_fops, NULL }, #endif [5] = { "zero", 0666, &zero_fops, &zero_bdi }, [7] = { "full", 0666, &full_fops, NULL }, [8] = { "random", 0666, &random_fops, NULL }, [9] = { "urandom", 0666, &urandom_fops, NULL }, [11] = { "kmsg", 0, &kmsg_fops, NULL }, #ifdef CONFIG_CRASH_DUMP [12] = { "oldmem", 0, &oldmem_fops, NULL }, #endif }; static int memory_open(struct inode *inode, struct file *filp) { int minor; const struct memdev *dev; minor = iminor(inode); if (minor >= ARRAY_SIZE(devlist)) return -ENXIO; dev = &devlist[minor]; if (!dev->fops) return -ENXIO; filp->f_op = dev->fops; if (dev->dev_info) filp->f_mapping->backing_dev_info = dev->dev_info; /* Is /dev/mem or /dev/kmem ? */ if (dev->dev_info == &directly_mappable_cdev_bdi) filp->f_mode |= FMODE_UNSIGNED_OFFSET; if (dev->fops->open) return dev->fops->open(inode, filp); return 0; } static const struct file_operations memory_fops = { .open = memory_open, .llseek = noop_llseek, }; static char *mem_devnode(struct device *dev, mode_t *mode) { if (mode && devlist[MINOR(dev->devt)].mode) *mode = devlist[MINOR(dev->devt)].mode; return NULL; } static struct class *mem_class; static int __init chr_dev_init(void) { int minor; int err; err = bdi_init(&zero_bdi); if (err) return err; if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) printk("unable to get major %d for memory devs\n", MEM_MAJOR); mem_class = class_create(THIS_MODULE, "mem"); if (IS_ERR(mem_class)) return PTR_ERR(mem_class); mem_class->devnode = mem_devnode; for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { if (!devlist[minor].name) continue; device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), NULL, devlist[minor].name); } return tty_init(); } fs_initcall(chr_dev_init);
gpl-2.0
Renesas-EMEV2/Kernel
drivers/scsi/nsp32_debug.c
2037
7494
/* * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver * Debug routine * * This software may be used and distributed according to the terms of * the GNU General Public License. */ /* * Show the command data of a command */ static const char unknown[] = "UNKNOWN"; static const char * group_0_commands[] = { /* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense", /* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reasssign Blocks", /* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown, /* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry", /* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve", /* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit", /* 1c-1d */ "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", unknown, }; static const char *group_1_commands[] = { /* 20-22 */ unknown, unknown, unknown, /* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)", /* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown, /* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal", /* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position", /* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data", /* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer", /* 3d-3f */ "Update Block", "Read Long", "Write Long", }; static const char *group_2_commands[] = { /* 40-41 */ "Change Definition", "Write Same", /* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)", /* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown, /* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)", /* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown, /* 5c-5f */ unknown, unknown, unknown, }; #define group(opcode) (((opcode) >> 5) & 7) #define RESERVED_GROUP 0 #define VENDOR_GROUP 1 #define NOTEXT_GROUP 2 static const char **commands[] = { group_0_commands, group_1_commands, group_2_commands, (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP, (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP, (const char **) VENDOR_GROUP }; static const char reserved[] = "RESERVED"; static const char vendor[] = "VENDOR SPECIFIC"; static void print_opcodek(unsigned char opcode) { const char **table = commands[ group(opcode) ]; switch ((unsigned long) table) { case RESERVED_GROUP: printk("%s[%02x] ", reserved, opcode); break; case NOTEXT_GROUP: printk("%s(notext)[%02x] ", unknown, opcode); break; case VENDOR_GROUP: printk("%s[%02x] ", vendor, opcode); break; default: if (table[opcode & 0x1f] != unknown) printk("%s[%02x] ", table[opcode & 0x1f], opcode); else printk("%s[%02x] ", unknown, opcode); break; } } static void print_commandk (unsigned char *command) { int i,s; // printk(KERN_DEBUG); print_opcodek(command[0]); /*printk(KERN_DEBUG "%s ", __func__);*/ if ((command[0] >> 5) == 6 || (command[0] >> 5) == 7 ) { s = 12; /* vender specific */ } else { s = COMMAND_SIZE(command[0]); } for ( i = 1; i < s; ++i) { printk("%02x ", command[i]); } switch (s) { case 6: printk("LBA=%d len=%d", (((unsigned int)command[1] & 0x0f) << 16) | ( (unsigned int)command[2] << 8) | ( (unsigned int)command[3] ), (unsigned int)command[4] ); break; case 10: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[7] << 8) | ((unsigned int)command[8] ) ); break; case 12: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[6] << 24) | ((unsigned int)command[7] << 16) | ((unsigned int)command[8] << 8) | ((unsigned int)command[9] ) ); break; default: break; } printk("\n"); } static void show_command(Scsi_Cmnd *SCpnt) { print_commandk(SCpnt->cmnd); } static void show_busphase(unsigned char stat) { switch(stat) { case BUSPHASE_COMMAND: printk( "BUSPHASE_COMMAND\n"); break; case BUSPHASE_MESSAGE_IN: printk( "BUSPHASE_MESSAGE_IN\n"); break; case BUSPHASE_MESSAGE_OUT: printk( "BUSPHASE_MESSAGE_OUT\n"); break; case BUSPHASE_DATA_IN: printk( "BUSPHASE_DATA_IN\n"); break; case BUSPHASE_DATA_OUT: printk( "BUSPHASE_DATA_OUT\n"); break; case BUSPHASE_STATUS: printk( "BUSPHASE_STATUS\n"); break; case BUSPHASE_SELECT: printk( "BUSPHASE_SELECT\n"); break; default: printk( "BUSPHASE_other: 0x%x\n", stat); break; } } static void show_autophase(unsigned short i) { printk("auto: 0x%x,", i); if(i & COMMAND_PHASE) { printk(" cmd"); } if(i & DATA_IN_PHASE) { printk(" din"); } if(i & DATA_OUT_PHASE) { printk(" dout"); } if(i & MSGOUT_PHASE) { printk(" mout"); } if(i & STATUS_PHASE) { printk(" stat"); } if(i & ILLEGAL_PHASE) { printk(" ill"); } if(i & BUS_FREE_OCCUER) { printk(" bfree-o"); } if(i & MSG_IN_OCCUER) { printk(" min-o"); } if(i & MSG_OUT_OCCUER) { printk(" mout-o"); } if(i & SELECTION_TIMEOUT) { printk(" sel"); } if(i & MSGIN_00_VALID) { printk(" m0"); } if(i & MSGIN_02_VALID) { printk(" m2"); } if(i & MSGIN_03_VALID) { printk(" m3"); } if(i & MSGIN_04_VALID) { printk(" m4"); } if(i & AUTOSCSI_BUSY) { printk(" busy"); } printk("\n"); } static void nsp32_print_register(int base) { if (!(NSP32_DEBUG_MASK & NSP32_SPECIAL_PRINT_REGISTER)) return; printk("Phase=0x%x, ", nsp32_read1(base, SCSI_BUS_MONITOR)); printk("OldPhase=0x%x, ", nsp32_index_read1(base, OLD_SCSI_PHASE)); printk("syncreg=0x%x, ", nsp32_read1(base, SYNC_REG)); printk("ackwidth=0x%x, ", nsp32_read1(base, ACK_WIDTH)); printk("sgtpaddr=0x%lx, ", nsp32_read4(base, SGT_ADR)); printk("scsioutlatch=0x%x, ", nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); printk("msgout=0x%lx, ", nsp32_read4(base, SCSI_MSG_OUT)); printk("miscrd=0x%x, ", nsp32_index_read2(base, MISC_WR)); printk("seltimeout=0x%x, ", nsp32_read2(base, SEL_TIME_OUT)); printk("sreqrate=0x%x, ", nsp32_read1(base, SREQ_SMPL_RATE)); printk("transStatus=0x%x, ", nsp32_read2(base, TRANSFER_STATUS)); printk("reselectid=0x%x, ", nsp32_read2(base, COMMAND_CONTROL)); printk("arbit=0x%x, ", nsp32_read1(base, ARBIT_STATUS)); printk("BmStart=0x%lx, ", nsp32_read4(base, BM_START_ADR)); printk("BmCount=0x%lx, ", nsp32_read4(base, BM_CNT)); printk("SackCnt=0x%lx, ", nsp32_read4(base, SACK_CNT)); printk("SReqCnt=0x%lx, ", nsp32_read4(base, SREQ_CNT)); printk("SavedSackCnt=0x%lx, ", nsp32_read4(base, SAVED_SACK_CNT)); printk("ScsiBusControl=0x%x, ", nsp32_read1(base, SCSI_BUS_CONTROL)); printk("FifoRestCnt=0x%x, ", nsp32_read2(base, FIFO_REST_CNT)); printk("CdbIn=0x%x, ", nsp32_read1(base, SCSI_CSB_IN)); printk("\n"); if (0) { printk("execph=0x%x, ", nsp32_read2(base, SCSI_EXECUTE_PHASE)); printk("IrqStatus=0x%x, ", nsp32_read2(base, IRQ_STATUS)); printk("\n"); } } /* end */
gpl-2.0
EPDCenter/android_kernel_archos_97b_Titan
arch/arm/mach-at91/board-eco920.c
2293
3619
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/board.h> #include <mach/at91rm9200_mc.h> #include <mach/cpu.h> #include "generic.h" static void __init eco920_init_early(void) { /* Set cpu type: PQFP */ at91rm9200_set_type(ARCH_REVISON_9200_PQFP); at91rm9200_initialize(18432000); /* Setup the LEDs */ at91_init_leds(AT91_PIN_PB0, AT91_PIN_PB1); /* DBGU on ttyS0. (Rx & Tx only */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init eco920_init_irq(void) { at91rm9200_init_interrupts(NULL); } static struct at91_eth_data __initdata eco920_eth_data = { .phy_irq_pin = AT91_PIN_PC2, .is_rmii = 1, }; static struct at91_usbh_data __initdata eco920_usbh_data = { .ports = 1, }; static struct at91_udc_data __initdata eco920_udc_data = { .vbus_pin = AT91_PIN_PB12, .pullup_pin = AT91_PIN_PB13, }; static struct at91_mmc_data __initdata eco920_mmc_data = { .slot_b = 0, .wire4 = 0, }; static struct physmap_flash_data eco920_flash_data = { .width = 2, }; static struct resource eco920_flash_resource = { .start = 0x11000000, .end = 0x11ffffff, .flags = IORESOURCE_MEM, }; static struct platform_device eco920_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &eco920_flash_data, }, .resource = &eco920_flash_resource, .num_resources = 1, }; static struct spi_board_info eco920_spi_devices[] = { { /* CAN controller */ .modalias = "tlv5638", .chip_select = 3, .max_speed_hz = 20 * 1000 * 1000, .mode = SPI_CPHA, }, }; static void __init eco920_board_init(void) { at91_add_device_serial(); at91_add_device_eth(&eco920_eth_data); at91_add_device_usbh(&eco920_usbh_data); at91_add_device_udc(&eco920_udc_data); at91_add_device_mmc(0, &eco920_mmc_data); platform_device_register(&eco920_flash); at91_sys_write(AT91_SMC_CSR(7), AT91_SMC_RWHOLD_(1) | AT91_SMC_RWSETUP_(1) | AT91_SMC_DBW_8 | AT91_SMC_WSEN | AT91_SMC_NWS_(15)); at91_set_A_periph(AT91_PIN_PC6, 1); at91_set_gpio_input(AT91_PIN_PA23, 0); at91_set_deglitch(AT91_PIN_PA23, 1); /* Initialization of the Static Memory Controller for Chip Select 3 */ at91_sys_write(AT91_SMC_CSR(3), AT91_SMC_DBW_16 | /* 16 bit */ AT91_SMC_WSEN | AT91_SMC_NWS_(5) | /* wait states */ AT91_SMC_TDF_(1) /* float time */ ); at91_add_device_spi(eco920_spi_devices, ARRAY_SIZE(eco920_spi_devices)); } MACHINE_START(ECO920, "eco920") /* Maintainer: Sascha Hauer */ .timer = &at91rm9200_timer, .map_io = at91rm9200_map_io, .init_early = eco920_init_early, .init_irq = eco920_init_irq, .init_machine = eco920_board_init, MACHINE_END
gpl-2.0
XileForce/Vindicator-S6-Uni-Old
drivers/net/phy/mdio_bus.c
2293
10842
/* * drivers/net/phy/mdio_bus.c * * MDIO Bus interface * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> /** * mdiobus_alloc_size - allocate a mii_bus structure * @size: extra amount of memory to allocate for private storage. * If non-zero, then bus->priv is points to that memory. * * Description: called by a bus driver to allocate an mii_bus * structure to fill in. */ struct mii_bus *mdiobus_alloc_size(size_t size) { struct mii_bus *bus; size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN); size_t alloc_size; /* If we alloc extra space, it should be aligned */ if (size) alloc_size = aligned_size + size; else alloc_size = sizeof(*bus); bus = kzalloc(alloc_size, GFP_KERNEL); if (bus) { bus->state = MDIOBUS_ALLOCATED; if (size) bus->priv = (void *)bus + aligned_size; } return bus; } EXPORT_SYMBOL(mdiobus_alloc_size); /** * mdiobus_release - mii_bus device release callback * @d: the target struct device that contains the mii_bus * * Description: called when the last reference to an mii_bus is * dropped, to free the underlying memory. */ static void mdiobus_release(struct device *d) { struct mii_bus *bus = to_mii_bus(d); BUG_ON(bus->state != MDIOBUS_RELEASED && /* for compatibility with error handling in drivers */ bus->state != MDIOBUS_ALLOCATED); kfree(bus); } static struct class mdio_bus_class = { .name = "mdio_bus", .dev_release = mdiobus_release, }; #if IS_ENABLED(CONFIG_OF_MDIO) /* Helper function for of_mdio_find_bus */ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np) { return dev->of_node == mdio_bus_np; } /** * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. * @mdio_bus_np: Pointer to the mii_bus. * * Returns a pointer to the mii_bus, or NULL if none found. * * Because the association of a device_node and mii_bus is made via * of_mdiobus_register(), the mii_bus cannot be found before it is * registered with of_mdiobus_register(). * */ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) { struct device *d; if (!mdio_bus_np) return NULL; d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np, of_mdio_bus_match); return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(of_mdio_find_bus); #endif /** * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus * * Description: Called by a bus driver to bring up all the PHYs * on a given bus, and attach them to the bus. * * Returns 0 on success or < 0 on error. */ int mdiobus_register(struct mii_bus *bus) { int i, err; if (NULL == bus || NULL == bus->name || NULL == bus->read || NULL == bus->write) return -EINVAL; BUG_ON(bus->state != MDIOBUS_ALLOCATED && bus->state != MDIOBUS_UNREGISTERED); bus->dev.parent = bus->parent; bus->dev.class = &mdio_bus_class; bus->dev.groups = NULL; dev_set_name(&bus->dev, "%s", bus->id); err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); return -EINVAL; } mutex_init(&bus->mdio_lock); if (bus->reset) bus->reset(bus); for (i = 0; i < PHY_MAX_ADDR; i++) { if ((bus->phy_mask & (1 << i)) == 0) { struct phy_device *phydev; phydev = mdiobus_scan(bus, i); if (IS_ERR(phydev)) { err = PTR_ERR(phydev); goto error; } } } bus->state = MDIOBUS_REGISTERED; pr_info("%s: probed\n", bus->name); return 0; error: while (--i >= 0) { if (bus->phy_map[i]) device_unregister(&bus->phy_map[i]->dev); } device_del(&bus->dev); return err; } EXPORT_SYMBOL(mdiobus_register); void mdiobus_unregister(struct mii_bus *bus) { int i; BUG_ON(bus->state != MDIOBUS_REGISTERED); bus->state = MDIOBUS_UNREGISTERED; device_del(&bus->dev); for (i = 0; i < PHY_MAX_ADDR; i++) { if (bus->phy_map[i]) device_unregister(&bus->phy_map[i]->dev); bus->phy_map[i] = NULL; } } EXPORT_SYMBOL(mdiobus_unregister); /** * mdiobus_free - free a struct mii_bus * @bus: mii_bus to free * * This function releases the reference to the underlying device * object in the mii_bus. If this is the last reference, the mii_bus * will be freed. */ void mdiobus_free(struct mii_bus *bus) { /* * For compatibility with error handling in drivers. */ if (bus->state == MDIOBUS_ALLOCATED) { kfree(bus); return; } BUG_ON(bus->state != MDIOBUS_UNREGISTERED); bus->state = MDIOBUS_RELEASED; put_device(&bus->dev); } EXPORT_SYMBOL(mdiobus_free); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { struct phy_device *phydev; int err; phydev = get_phy_device(bus, addr, false); if (IS_ERR(phydev) || phydev == NULL) return phydev; err = phy_device_register(phydev); if (err) { phy_device_free(phydev); return NULL; } return phydev; } EXPORT_SYMBOL(mdiobus_scan); /** * mdiobus_read - Convenience function for reading a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) { int retval; BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); retval = bus->read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_read); /** * mdiobus_write - Convenience function for writing a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); err = bus->write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_write); /** * mdio_bus_match - determine if given PHY driver supports the given PHY device * @dev: target PHY device * @drv: given PHY driver * * Description: Given a PHY device, and a PHY driver, return 1 if * the driver supports the device. Otherwise, return 0. */ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { struct phy_device *phydev = to_phy_device(dev); struct phy_driver *phydrv = to_phy_driver(drv); if (of_driver_match_device(dev, drv)) return 1; if (phydrv->match_phy_device) return phydrv->match_phy_device(phydev); return ((phydrv->phy_id & phydrv->phy_id_mask) == (phydev->phy_id & phydrv->phy_id_mask)); } #ifdef CONFIG_PM static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { struct device_driver *drv = phydev->dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); struct net_device *netdev = phydev->attached_dev; if (!drv || !phydrv->suspend) return false; /* PHY not attached? May suspend. */ if (!netdev) return true; /* * Don't suspend PHY if the attched netdev parent may wakeup. * The parent may point to a PCI device, as in tg3 driver. */ if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) return false; /* * Also don't suspend PHY if the netdev itself may wakeup. This * is the case for devices w/o underlaying pwr. mgmt. aware bus, * e.g. SoC devices. */ if (device_may_wakeup(&netdev->dev)) return false; return true; } static int mdio_bus_suspend(struct device *dev) { struct phy_driver *phydrv = to_phy_driver(dev->driver); struct phy_device *phydev = to_phy_device(dev); /* * We must stop the state machine manually, otherwise it stops out of * control, possibly with the phydev->lock held. Upon resume, netdev * may call phy routines that try to grab the same lock, and that may * lead to a deadlock. */ if (phydev->attached_dev && phydev->adjust_link) phy_stop_machine(phydev); if (!mdio_bus_phy_may_suspend(phydev)) return 0; return phydrv->suspend(phydev); } static int mdio_bus_resume(struct device *dev) { struct phy_driver *phydrv = to_phy_driver(dev->driver); struct phy_device *phydev = to_phy_device(dev); int ret; if (!mdio_bus_phy_may_suspend(phydev)) goto no_resume; ret = phydrv->resume(phydev); if (ret < 0) return ret; no_resume: if (phydev->attached_dev && phydev->adjust_link) phy_start_machine(phydev, NULL); return 0; } static int mdio_bus_restore(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); struct net_device *netdev = phydev->attached_dev; int ret; if (!netdev) return 0; ret = phy_init_hw(phydev); if (ret < 0) return ret; /* The PHY needs to renegotiate. */ phydev->link = 0; phydev->state = PHY_UP; phy_start_machine(phydev, NULL); return 0; } static struct dev_pm_ops mdio_bus_pm_ops = { .suspend = mdio_bus_suspend, .resume = mdio_bus_resume, .freeze = mdio_bus_suspend, .thaw = mdio_bus_resume, .restore = mdio_bus_restore, }; #define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops) #else #define MDIO_BUS_PM_OPS NULL #endif /* CONFIG_PM */ static ssize_t phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); } static struct device_attribute mdio_dev_attrs[] = { __ATTR_RO(phy_id), __ATTR_NULL }; struct bus_type mdio_bus_type = { .name = "mdio_bus", .match = mdio_bus_match, .pm = MDIO_BUS_PM_OPS, .dev_attrs = mdio_dev_attrs, }; EXPORT_SYMBOL(mdio_bus_type); int __init mdio_bus_init(void) { int ret; ret = class_register(&mdio_bus_class); if (!ret) { ret = bus_register(&mdio_bus_type); if (ret) class_unregister(&mdio_bus_class); } return ret; } void mdio_bus_exit(void) { class_unregister(&mdio_bus_class); bus_unregister(&mdio_bus_type); }
gpl-2.0
RenderBroken/msm8974_G2-CAF_render_kernel
drivers/scsi/lpfc/lpfc_attr.c
4853
151251
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2012 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/ctype.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/aer.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #define LPFC_DEF_DEVLOSS_TMO 30 #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 /* * Write key size should be multiple of 4. If write key is changed * make sure that library write key is also changed. */ #define LPFC_REG_WRITE_KEY_SIZE 4 #define LPFC_REG_WRITE_KEY "EMLX" /** * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules * @incr: integer to convert. * @hdw: ascii string holding converted integer plus a string terminator. * * Description: * JEDEC Joint Electron Device Engineering Council. * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii * character string. The string is then terminated with a NULL in byte 9. * Hex 0-9 becomes ascii '0' to '9'. * Hex a-f becomes ascii '=' to 'B' capital B. * * Notes: * Coded for 32 bit integers only. **/ static void lpfc_jedec_to_ascii(int incr, char hdw[]) { int i, j; for (i = 0; i < 8; i++) { j = (incr & 0xf); if (j <= 9) hdw[7 - i] = 0x30 + j; else hdw[7 - i] = 0x61 + j - 10; incr = (incr >> 4); } hdw[8] = 0; return; } /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); } /** * lpfc_enable_fip_show - Return the fip mode of the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->hba_flag & HBA_FIP_SUPPORT) return snprintf(buf, PAGE_SIZE, "1\n"); else return snprintf(buf, PAGE_SIZE, "0\n"); } static ssize_t lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->cfg_enable_bg) if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n"); else return snprintf(buf, PAGE_SIZE, "BlockGuard Not Supported\n"); else return snprintf(buf, PAGE_SIZE, "BlockGuard Disabled\n"); } static ssize_t lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_guard_err_cnt); } static ssize_t lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_apptag_err_cnt); } static ssize_t lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_reftag_err_cnt); } /** * lpfc_info_show - Return some pci info about the host in ascii * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text from lpfc_info(). * * Returns: size of formatted string. **/ static ssize_t lpfc_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); } /** * lpfc_serialnum_show - Return the hba serial number in ascii * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text serial number. * * Returns: size of formatted string. **/ static ssize_t lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); } /** * lpfc_temp_sensor_show - Return the temperature sensor level * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted support level. * * Description: * Returns a number indicating the temperature sensor level currently * supported, zero or one in ascii. * * Returns: size of formatted string. **/ static ssize_t lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); } /** * lpfc_modeldesc_show - Return the model description of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd model description. * * Returns: size of formatted string. **/ static ssize_t lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); } /** * lpfc_modelname_show - Return the model name of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd model name. * * Returns: size of formatted string. **/ static ssize_t lpfc_modelname_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); } /** * lpfc_programtype_show - Return the program type of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_programtype_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); } /** * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the Menlo Maintenance sli flag. * * Returns: size of formatted string. **/ static ssize_t lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", (phba->sli.sli_flag & LPFC_MENLO_MAINT)); } /** * lpfc_vportnum_show - Return the port number in ascii of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); } /** * lpfc_fwrev_show - Return the firmware rev running in the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t if_type; uint8_t sli_family; char fwrev[FW_REV_STR_SIZE]; int len; lpfc_decode_firmware_rev(phba, fwrev, 1); if_type = phba->sli4_hba.pc_sli4_params.if_type; sli_family = phba->sli4_hba.pc_sli4_params.sli_family; if (phba->sli_rev < LPFC_SLI_REV4) len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); else len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", fwrev, phba->sli_rev, if_type, sli_family); return len; } /** * lpfc_hdw_show - Return the jedec information about the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) { char hdw[9]; struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; lpfc_vpd_t *vp = &phba->vpd; lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); return snprintf(buf, PAGE_SIZE, "%s\n", hdw); } /** * lpfc_option_rom_version_show - Return the adapter ROM FCode version * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the ROM and FCode ascii strings. * * Returns: size of formatted string. **/ static ssize_t lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); } /** * lpfc_state_show - Return the link state of the port * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains text describing the state of the link. * * Notes: * The switch statement has no default so zero will be returned. * * Returns: size of formatted string. **/ static ssize_t lpfc_link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int len = 0; switch (phba->link_state) { case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: if (phba->hba_flag & LINK_DISABLED) len += snprintf(buf + len, PAGE_SIZE-len, "Link Down - User disabled\n"); else len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - "); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: len += snprintf(buf + len, PAGE_SIZE-len, "Configuring Link\n"); break; case LPFC_FDISC: case LPFC_FLOGI: case LPFC_FABRIC_CFG_LINK: case LPFC_NS_REG: case LPFC_NS_QRY: case LPFC_BUILD_DISC_LIST: case LPFC_DISC_AUTH: len += snprintf(buf + len, PAGE_SIZE - len, "Discovery\n"); break; case LPFC_VPORT_READY: len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n"); break; case LPFC_VPORT_FAILED: len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n"); break; case LPFC_VPORT_UNKNOWN: len += snprintf(buf + len, PAGE_SIZE - len, "Unknown\n"); break; } if (phba->sli.sli_flag & LPFC_MENLO_MAINT) len += snprintf(buf + len, PAGE_SIZE-len, " Menlo Maint Mode\n"); else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) len += snprintf(buf + len, PAGE_SIZE-len, " Public Loop\n"); else len += snprintf(buf + len, PAGE_SIZE-len, " Private Loop\n"); } else { if (vport->fc_flag & FC_FABRIC) len += snprintf(buf + len, PAGE_SIZE-len, " Fabric\n"); else len += snprintf(buf + len, PAGE_SIZE-len, " Point-2-Point\n"); } } return len; } /** * lpfc_sli4_protocol_show - Return the fip mode of the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev < LPFC_SLI_REV4) return snprintf(buf, PAGE_SIZE, "fc\n"); if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) { if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE) return snprintf(buf, PAGE_SIZE, "fcoe\n"); if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) return snprintf(buf, PAGE_SIZE, "fc\n"); } return snprintf(buf, PAGE_SIZE, "unknown\n"); } /** * lpfc_link_state_store - Transition the link_state on an HBA port * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: one or more lpfc_polling_flags values. * @count: not used. * * Returns: * -EINVAL if the buffer is not "up" or "down" * return from link state change function if non-zero * length of the buf on success **/ static ssize_t lpfc_link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int status = -EINVAL; if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && (phba->link_state == LPFC_LINK_DOWN)) status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && (phba->link_state >= LPFC_LINK_UP)) status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); if (status == 0) return strlen(buf); else return status; } /** * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the sum of fc mapped and unmapped. * * Description: * Returns the ascii text number of the sum of the fc mapped and unmapped * vport counts. * * Returns: size of formatted string. **/ static ssize_t lpfc_num_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", vport->fc_map_cnt + vport->fc_unmap_cnt); } /** * lpfc_issue_lip - Misnomer, name carried over from long ago * @shost: Scsi_Host pointer. * * Description: * Bring the link down gracefully then re-init the link. The firmware will * re-init the fiber channel interface as required. Does not issue a LIP. * * Returns: * -EPERM port offline or management commands are being blocked * -ENOMEM cannot allocate memory for the mailbox command * -EIO error sending the mailbox command * zero for success **/ static int lpfc_issue_lip(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *pmboxq; int mbxstatus = MBXERR_ERROR; if ((vport->fc_flag & FC_OFFLINE_MODE) || (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); if (!pmboxq) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0 || pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "2859 SLI authentication is required " "for INIT_LINK but has not done yet\n"); } lpfc_set_loopback_flag(phba); if (mbxstatus != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); if (mbxstatus == MBXERR_ERROR) return -EIO; return 0; } /** * lpfc_do_offline - Issues a mailbox command to bring the link down * @phba: lpfc_hba pointer. * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. * * Notes: * Assumes any error from lpfc_do_offline() will be negative. * Can wait up to 5 seconds for the port ring buffers count * to reach zero, prints a warning if it is not zero and continues. * lpfc_workq_post_event() returns a non-zero return code if call fails. * * Returns: * -EIO error posting the event * zero for success **/ static int lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) { struct completion online_compl; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; int status = 0; int cnt = 0; int i; int rc; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; psli = &phba->sli; /* Wait a little for things to settle down, but not * long enough for dev loss timeout to expire. */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; while (pring->txcmplq_cnt) { msleep(10); if (cnt++ > 500) { /* 5 secs */ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0466 Outstanding IO when " "bringing Adapter offline\n"); break; } } } init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, type); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; return 0; } /** * lpfc_selective_reset - Offline then onlines the port * @phba: lpfc_hba pointer. * * Description: * If the port is configured to allow a reset then the hba is brought * offline then online. * * Notes: * Assumes any error from lpfc_do_offline() will be negative. * Do not make this function static. * * Returns: * lpfc_do_offline() return code if not zero * -EIO reset not configured or error posting the event * zero for success **/ int lpfc_selective_reset(struct lpfc_hba *phba) { struct completion online_compl; int status = 0; int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) return status; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; return 0; } /** * lpfc_issue_reset - Selectively resets an adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string "selective". * @count: unused variable. * * Description: * If the buf contains the string "selective" then lpfc_selective_reset() * is called to perform the reset. * * Notes: * Assumes any error from lpfc_selective_reset() will be negative. * If lpfc_selective_reset() returns zero then the length of the buffer * is returned which indicates success * * Returns: * -EINVAL if the buffer does not contain the string "selective" * length of buf if lpfc-selective_reset() if the call succeeds * return value of lpfc_selective_reset() if the call fails **/ static ssize_t lpfc_issue_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int status = -EINVAL; if (!phba->cfg_enable_hba_reset) return -EACCES; if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) status = phba->lpfc_selective_reset(phba); if (status == 0) return strlen(buf); else return status; } /** * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness * @phba: lpfc_hba pointer. * * Description: * SLI4 interface type-2 device to wait on the sliport status register for * the readyness after performing a firmware reset. * * Returns: * zero for success, -EPERM when port does not have privilage to perform the * reset, -EIO when port timeout from recovering from the reset. * * Note: * As the caller will interpret the return code by value, be careful in making * change or addition to return codes. **/ int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) { struct lpfc_register portstat_reg = {0}; int i; msleep(100); lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); /* verify if privilaged for the request operation */ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && !bf_get(lpfc_sliport_status_err, &portstat_reg)) return -EPERM; /* wait for the SLI port firmware ready after firmware reset */ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { msleep(10); lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) continue; break; } if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) return 0; else return -EIO; } /** * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc * @phba: lpfc_hba pointer. * * Description: * Request SLI4 interface type-2 device to perform a physical register set * access. * * Returns: * zero for success **/ static ssize_t lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) { struct completion online_compl; struct pci_dev *pdev = phba->pcidev; uint32_t before_fc_flag; uint32_t sriov_nr_virtfn; uint32_t reg_val; int status = 0, rc = 0; int job_posted = 1, sriov_err; if (!phba->cfg_enable_hba_reset) return -EACCES; if ((phba->sli_rev < LPFC_SLI_REV4) || (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_2)) return -EPERM; /* Keep state if we need to restore back */ before_fc_flag = phba->pport->fc_flag; sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; /* Disable SR-IOV virtual functions if enabled */ if (phba->cfg_sriov_nr_virtfn) { pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) return status; /* wait for the device to be quiesced before firmware reset */ msleep(100); reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); if (opcode == LPFC_FW_DUMP) reg_val |= LPFC_FW_DUMP_REQUEST; else if (opcode == LPFC_FW_RESET) reg_val |= LPFC_CTL_PDEV_CTL_FRST; else if (opcode == LPFC_DV_RESET) reg_val |= LPFC_CTL_PDEV_CTL_DRST; writel(reg_val, phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* flush */ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* delay driver action following IF_TYPE_2 reset */ rc = lpfc_sli4_pdev_status_reg_wait(phba); if (rc == -EPERM) { /* no privilage for reset */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3150 No privilage to perform the requested " "access: x%x\n", reg_val); } else if (rc == -EIO) { /* reset failed, there is nothing more we can do */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3153 Fail to perform the requested " "access: x%x\n", reg_val); return rc; } /* keep the original port state */ if (before_fc_flag & FC_OFFLINE_MODE) goto out; init_completion(&online_compl); job_posted = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (!job_posted) goto out; wait_for_completion(&online_compl); out: /* in any case, restore the virtual functions enabled as before */ if (sriov_nr_virtfn) { sriov_err = lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); if (!sriov_err) phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn; } /* return proper error code */ if (!rc) { if (!job_posted) rc = -ENOMEM; else if (status) rc = -EIO; } return rc; } /** * lpfc_nport_evt_cnt_show - Return the number of nport events * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the ascii number of nport events. * * Returns: size of formatted string. **/ static ssize_t lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); } /** * lpfc_board_mode_show - Return the state of the board * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the state of the adapter. * * Returns: size of formatted string. **/ static ssize_t lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; char * state; if (phba->link_state == LPFC_HBA_ERROR) state = "error"; else if (phba->link_state == LPFC_WARM_START) state = "warm start"; else if (phba->link_state == LPFC_INIT_START) state = "offline"; else state = "online"; return snprintf(buf, PAGE_SIZE, "%s\n", state); } /** * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing one of the strings "online", "offline", "warm" or "error". * @count: unused variable. * * Returns: * -EACCES if enable hba reset not enabled * -EINVAL if the buffer does not contain a valid string (see above) * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails * buf length greater than zero indicates success **/ static ssize_t lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct completion online_compl; char *board_mode_str = NULL; int status = 0; int rc; if (!phba->cfg_enable_hba_reset) { status = -EACCES; goto board_mode_out; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3050 lpfc_board_mode set to %s\n", buf); init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) { status = -ENOMEM; goto board_mode_out; } wait_for_completion(&online_compl); } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) if (phba->sli_rev == LPFC_SLI_REV4) status = -EINVAL; else status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); else if (strncmp(buf, "error", sizeof("error") - 1) == 0) if (phba->sli_rev == LPFC_SLI_REV4) status = -EINVAL; else status = lpfc_do_offline(phba, LPFC_EVT_KILL); else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP); else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); else status = -EINVAL; board_mode_out: if (!status) return strlen(buf); else { board_mode_str = strchr(buf, '\n'); if (board_mode_str) *board_mode_str = '\0'; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3097 Failed \"%s\", status(%d), " "fc_flag(x%x)\n", buf, status, phba->pport->fc_flag); return status; } } /** * lpfc_get_hba_info - Return various bits of informaton about the adapter * @phba: pointer to the adapter structure. * @mxri: max xri count. * @axri: available xri count. * @mrpi: max rpi count. * @arpi: available rpi count. * @mvpi: max vpi count. * @avpi: available vpi count. * * Description: * If an integer pointer for an count is not null then the value for the * count is returned. * * Returns: * zero on error * one for success **/ static int lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mxri, uint32_t *axri, uint32_t *mrpi, uint32_t *arpi, uint32_t *mvpi, uint32_t *avpi) { struct lpfc_mbx_read_config *rd_config; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; uint32_t max_vpi; /* * prevent udev from issuing mailbox commands until the port is * configured. */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return 0; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_CONFIG; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; if (phba->pport->fc_flag & FC_OFFLINE_MODE) rc = MBX_NOT_FINISHED; else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return 0; } if (phba->sli_rev == LPFC_SLI_REV4) { rd_config = &pmboxq->u.mqe.un.rd_config; if (mrpi) *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); if (arpi) *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - phba->sli4_hba.max_cfg_param.rpi_used; if (mxri) *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); if (axri) *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - phba->sli4_hba.max_cfg_param.xri_used; /* Account for differences with SLI-3. Get vpi count from * mailbox data and subtract one for max vpi value. */ max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; if (mvpi) *mvpi = max_vpi; if (avpi) *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used; } else { if (mrpi) *mrpi = pmb->un.varRdConfig.max_rpi; if (arpi) *arpi = pmb->un.varRdConfig.avail_rpi; if (mxri) *mxri = pmb->un.varRdConfig.max_xri; if (axri) *axri = pmb->un.varRdConfig.avail_xri; if (mvpi) *mvpi = pmb->un.varRdConfig.max_vpi; if (avpi) *avpi = pmb->un.varRdConfig.avail_vpi; } mempool_free(pmboxq, phba->mbox_mem_pool); return 1; } /** * lpfc_max_rpi_show - Return maximum rpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum rpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", cnt); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_rpi_show - Return maximum rpi minus available rpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the used rpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_max_xri_show - Return maximum xri * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum xri count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", cnt); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_xri_show - Return maximum xpi minus the available xpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the used xri count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_max_vpi_show - Return maximum vpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum vpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mvpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) return snprintf(buf, PAGE_SIZE, "%d\n", cnt); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_vpi_show - Return maximum vpi minus the available vpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the used vpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); return snprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_npiv_info_show - Return text about NPIV support for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: text that must be interpreted to determine if npiv is supported. * * Description: * Buffer will contain text indicating npiv is not suppoerted on the port, * the port is an NPIV physical port, or it is an npiv virtual port with * the id of the vport. * * Returns: size of formatted string. **/ static ssize_t lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (!(phba->max_vpi)) return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); if (vport->port_type == LPFC_PHYSICAL_PORT) return snprintf(buf, PAGE_SIZE, "NPIV Physical\n"); return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); } /** * lpfc_poll_show - Return text about poll support for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the cfg_poll in hex. * * Notes: * cfg_poll should be a lpfc_polling_flags type. * * Returns: size of formatted string. **/ static ssize_t lpfc_poll_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); } /** * lpfc_poll_store - Set the value of cfg_poll for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: one or more lpfc_polling_flags values. * @count: not used. * * Notes: * buf contents converted to integer and checked for a valid value. * * Returns: * -EINVAL if the buffer connot be converted or is out of range * length of the buf on success **/ static ssize_t lpfc_poll_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t creg_val; uint32_t old_val; int val=0; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if ((val & 0x3) != val) return -EINVAL; if (phba->sli_rev == LPFC_SLI_REV4) val = 0; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3051 lpfc_poll changed from %d to %d\n", phba->cfg_poll, val); spin_lock_irq(&phba->hbalock); old_val = phba->cfg_poll; if (val & ENABLE_FCP_RING_POLLING) { if ((val & DISABLE_FCP_RING_INT) && !(old_val & DISABLE_FCP_RING_INT)) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ lpfc_poll_start_timer(phba); } } else if (val != 0x0) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } if (!(val & DISABLE_FCP_RING_INT) && (old_val & DISABLE_FCP_RING_INT)) { spin_unlock_irq(&phba->hbalock); del_timer(&phba->fcp_poll_timer); spin_lock_irq(&phba->hbalock); if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } phba->cfg_poll = val; spin_unlock_irq(&phba->hbalock); return strlen(buf); } /** * lpfc_fips_level_show - Return the current FIPS level for the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_fips_level_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level); } /** * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); } /** * lpfc_dss_show - Return the current state of dss and the configured state * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text. * * Returns: size of formatted string. **/ static ssize_t lpfc_dss_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n", (phba->cfg_enable_dss) ? "Enabled" : "Disabled", (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? "" : "Not "); } /** * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted support level. * * Description: * Returns the maximum number of virtual functions a physical function can * support, 0 will be returned if called on virtual function. * * Returns: size of formatted string. **/ static ssize_t lpfc_sriov_hw_max_virtfn_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint16_t max_nr_virtfn; max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } /** * lpfc_param_show - Return a cfg attribute value in decimal * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show. * * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in decimal. * * Returns: size of formatted string. **/ #define lpfc_param_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ val = phba->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%d\n",\ phba->cfg_##attr);\ } /** * lpfc_param_hex_show - Return a cfg attribute value in hex * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show * * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in hexadecimal. * * Returns: size of formatted string. **/ #define lpfc_param_hex_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ val = phba->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%#x\n",\ phba->cfg_##attr);\ } /** * lpfc_param_init - Initializes a cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_init. The macro also * takes a default argument, a minimum and maximum argument. * * lpfc_##attr##_init: Initializes an attribute. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Validates the min and max values then sets the adapter config field * accordingly, or uses the default if out of range and prints an error message. * * Returns: * zero on success * -EINVAL if default used **/ #define lpfc_param_init(attr, default, minval, maxval) \ static int \ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ { \ if (val >= minval && val <= maxval) {\ phba->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "0449 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ phba->cfg_##attr = default;\ return -EINVAL;\ } /** * lpfc_param_set - Set a cfg attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_set * * lpfc_##attr##_set: Sets an attribute value. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Description: * Validates the min and max values then sets the * adapter config field if in the valid range. prints error message * and does not set the parameter if invalid. * * Returns: * zero on success * -EINVAL if val is invalid **/ #define lpfc_param_set(attr, default, minval, maxval) \ static int \ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ { \ if (val >= minval && val <= maxval) {\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "3052 lpfc_" #attr " changed from %d to %d\n", \ phba->cfg_##attr, val); \ phba->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "0450 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ return -EINVAL;\ } /** * lpfc_param_store - Set a vport attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_store. * * lpfc_##attr##_store: Set an sttribute value. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: contains the attribute value in ascii. * @count: not used. * * Description: * Convert the ascii text number to an integer, then * use the lpfc_##attr##_set function to set the value. * * Returns: * -EINVAL if val is invalid or lpfc_##attr##_set() fails * length of buffer upon success. **/ #define lpfc_param_store(attr) \ static ssize_t \ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ if (sscanf(buf, "%i", &val) != 1)\ return -EINVAL;\ if (lpfc_##attr##_set(phba, val) == 0) \ return strlen(buf);\ else \ return -EINVAL;\ } /** * lpfc_vport_param_show - Return decimal formatted cfg attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show * * lpfc_##attr##_show: prints the attribute value in decimal. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in decimal. * * Returns: length of formatted string. **/ #define lpfc_vport_param_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ uint val = 0;\ val = vport->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ } /** * lpfc_vport_param_hex_show - Return hex formatted attribute value * * Description: * Macro that given an attr e.g. * hba_queue_depth expands into a function with the name * lpfc_hba_queue_depth_show * * lpfc_##attr##_show: prints the attribute value in hexadecimal. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in hexadecimal. * * Returns: length of formatted string. **/ #define lpfc_vport_param_hex_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ uint val = 0;\ val = vport->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ } /** * lpfc_vport_param_init - Initialize a vport cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_init. The macro also * takes a default argument, a minimum and maximum argument. * * lpfc_##attr##_init: validates the min and max values then sets the * adapter config field accordingly, or uses the default if out of range * and prints an error message. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Returns: * zero on success * -EINVAL if default used **/ #define lpfc_vport_param_init(attr, default, minval, maxval) \ static int \ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ vport->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "0423 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ vport->cfg_##attr = default;\ return -EINVAL;\ } /** * lpfc_vport_param_set - Set a vport cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_set * * lpfc_##attr##_set: validates the min and max values then sets the * adapter config field if in the valid range. prints error message * and does not set the parameter if invalid. * @phba: pointer the the adapter structure. * @val: integer attribute value. * * Returns: * zero on success * -EINVAL if val is invalid **/ #define lpfc_vport_param_set(attr, default, minval, maxval) \ static int \ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "3053 lpfc_" #attr " changed from %d to %d\n", \ vport->cfg_##attr, val); \ vport->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "0424 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ return -EINVAL;\ } /** * lpfc_vport_param_store - Set a vport attribute * * Description: * Macro that given an attr e.g. hba_queue_depth * expands into a function with the name lpfc_hba_queue_depth_store * * lpfc_##attr##_store: convert the ascii text number to an integer, then * use the lpfc_##attr##_set function to set the value. * @cdev: class device that is converted into a Scsi_host. * @buf: contains the attribute value in decimal. * @count: not used. * * Returns: * -EINVAL if val is invalid or lpfc_##attr##_set() fails * length of buffer upon success. **/ #define lpfc_vport_param_store(attr) \ static ssize_t \ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ uint val = 0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ if (sscanf(buf, "%i", &val) != 1)\ return -EINVAL;\ if (lpfc_##attr##_set(vport, val) == 0) \ return strlen(buf);\ else \ return -EINVAL;\ } #define LPFC_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_init(name, defval, minval, maxval) #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ lpfc_param_set(name, defval, minval, maxval)\ lpfc_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ lpfc_param_set(name, defval, minval, maxval)\ lpfc_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_init(name, defval, minval, maxval) #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ lpfc_vport_param_set(name, defval, minval, maxval)\ lpfc_vport_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ lpfc_vport_param_set(name, defval, minval, maxval)\ lpfc_vport_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, lpfc_link_state_store); static DEVICE_ATTR(option_rom_version, S_IRUGO, lpfc_option_rom_version_show, NULL); static DEVICE_ATTR(num_discovered_ports, S_IRUGO, lpfc_num_discovered_ports_show, NULL); static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL); static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, lpfc_sriov_hw_max_virtfn_show, NULL); static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; /** * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string lpfc_soft_wwn_key. * @count: must be size of lpfc_soft_wwn_key. * * Returns: * -EINVAL if the buffer does not contain lpfc_soft_wwn_key * length of buf indicates success **/ static ssize_t lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; unsigned int cnt = count; /* * We're doing a simple sanity check for soft_wwpn setting. * We require that the user write a specific key to enable * the soft_wwpn attribute to be settable. Once the attribute * is written, the enable key resets. If further updates are * desired, the key must be written again to re-enable the * attribute. * * The "key" is not secret - it is a hardcoded string shown * here. The intent is to protect against the random user or * application that is just writing attributes. */ /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; if ((cnt != strlen(lpfc_soft_wwn_key)) || (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0)) return -EINVAL; phba->soft_wwn_enable = 1; return count; } static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, lpfc_soft_wwn_enable_store); /** * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the wwpn in hexadecimal. * * Returns: size of formatted string. **/ static ssize_t lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)phba->cfg_soft_wwpn); } /** * lpfc_soft_wwpn_store - Set the ww port name of the adapter * @dev class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: contains the wwpn in hexadecimal. * @count: number of wwpn bytes in buf * * Returns: * -EACCES hba reset not enabled, adapter over temp * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid * -EIO error taking adapter offline or online * value of count on success **/ static ssize_t lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct completion online_compl; int stat1=0, stat2=0; unsigned int i, j, cnt=count; u8 wwpn[8]; int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; spin_lock_irq(&phba->hbalock); if (phba->over_temp_state == HBA_OVER_TEMP) { spin_unlock_irq(&phba->hbalock); return -EACCES; } spin_unlock_irq(&phba->hbalock); /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) return -EINVAL; phba->soft_wwn_enable = 0; memset(wwpn, 0, sizeof(wwpn)); /* Validate and store the new name */ for (i=0, j=0; i < 16; i++) { int value; value = hex_to_bin(*buf++); if (value >= 0) j = (j << 4) | value; else return -EINVAL; if (i % 2) { wwpn[i/2] = j & 0xff; j = 0; } } phba->cfg_soft_wwpn = wwn_to_u64(wwpn); fc_host_port_name(shost) = phba->cfg_soft_wwpn; if (phba->cfg_soft_wwnn) fc_host_node_name(shost) = phba->cfg_soft_wwnn; dev_printk(KERN_NOTICE, &phba->pcidev->dev, "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (stat1) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0463 lpfc_soft_wwpn attribute set failed to " "reinit adapter - %d\n", stat1); init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (stat2) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0464 lpfc_soft_wwpn attribute set failed to " "reinit adapter - %d\n", stat2); return (stat1 || stat2) ? -EIO : count; } static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); /** * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the wwnn in hexadecimal. * * Returns: size of formatted string. **/ static ssize_t lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)phba->cfg_soft_wwnn); } /** * lpfc_soft_wwnn_store - sets the ww node name of the adapter * @cdev: class device that is converted into a Scsi_host. * @buf: contains the ww node name in hexadecimal. * @count: number of wwnn bytes in buf. * * Returns: * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid * value of count on success **/ static ssize_t lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; unsigned int i, j, cnt=count; u8 wwnn[8]; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) return -EINVAL; /* * Allow wwnn to be set many times, as long as the enable is set. * However, once the wwpn is set, everything locks. */ memset(wwnn, 0, sizeof(wwnn)); /* Validate and store the new name */ for (i=0, j=0; i < 16; i++) { int value; value = hex_to_bin(*buf++); if (value >= 0) j = (j << 4) | value; else return -EINVAL; if (i % 2) { wwnn[i/2] = j & 0xff; j = 0; } } phba->cfg_soft_wwnn = wwn_to_u64(wwnn); dev_printk(KERN_NOTICE, &phba->pcidev->dev, "lpfc%d: soft_wwnn set. Value will take effect upon " "setting of the soft_wwpn\n", phba->brd_no); return count; } static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" " 0 - none," " 1 - poll with interrupts enabled" " 3 - poll and disable FCP ring interrupts"); static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); int lpfc_sli_mode = 0; module_param(lpfc_sli_mode, int, S_IRUGO); MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" " 0 - auto (SLI-3 if supported)," " 2 - select SLI-2 even on SLI-3 capable HBAs," " 3 - select SLI-3"); int lpfc_enable_npiv = 1; module_param(lpfc_enable_npiv, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); lpfc_param_show(enable_npiv); lpfc_param_init(enable_npiv, 1, 0, 1); static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); int lpfc_enable_rrq; module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); lpfc_param_show(enable_rrq); lpfc_param_init(enable_rrq, 0, 0, 1); static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); /* # lpfc_suppress_link_up: Bring link up at initialization # 0x0 = bring link up (issue MBX_INIT_LINK) # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) # 0x2 = never bring up link # Default value is 0. */ LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, LPFC_DELAY_INIT_LINK_INDEFINITELY, "Suppress Link Up at initialization"); /* # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS # 1 - (1024) # 2 - (2048) # 3 - (3072) # 4 - (4096) # 5 - (5120) */ static ssize_t lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); } static DEVICE_ATTR(iocb_hw, S_IRUGO, lpfc_iocb_hw_show, NULL); static ssize_t lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->sli.ring[LPFC_ELS_RING].txq_max); } static DEVICE_ATTR(txq_hw, S_IRUGO, lpfc_txq_hw_show, NULL); static ssize_t lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; return snprintf(buf, PAGE_SIZE, "%d\n", phba->sli.ring[LPFC_ELS_RING].txcmplq_max); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); int lpfc_iocb_cnt = 2; module_param(lpfc_iocb_cnt, int, S_IRUGO); MODULE_PARM_DESC(lpfc_iocb_cnt, "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); lpfc_param_show(iocb_cnt); lpfc_param_init(iocb_cnt, 2, 1, 5); static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO, lpfc_iocb_cnt_show, NULL); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear # until the timer expires. Value range is [0,255]. Default value is 30. */ static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; module_param(lpfc_nodev_tmo, int, 0); MODULE_PARM_DESC(lpfc_nodev_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); /** * lpfc_nodev_tmo_show - Return the hba dev loss timeout value * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the dev loss timeout in decimal. * * Returns: size of formatted string. **/ static ssize_t lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); } /** * lpfc_nodev_tmo_init - Set the hba nodev timeout value * @vport: lpfc vport structure pointer. * @val: contains the nodev timeout value. * * Description: * If the devloss tmo is already set then nodev tmo is set to devloss tmo, * a kernel error message is printed and zero is returned. * Else if val is in range then nodev tmo and devloss tmo are set to val. * Otherwise nodev tmo is set to the default value. * * Returns: * zero if already set or if val is in range * -EINVAL val out of range **/ static int lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) { if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; if (val != LPFC_DEF_DEVLOSS_TMO) lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0407 Ignoring nodev_tmo module " "parameter because devloss_tmo is " "set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0400 lpfc_nodev_tmo attribute cannot be set to" " %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; return -EINVAL; } /** * lpfc_update_rport_devloss_tmo - Update dev loss tmo value * @vport: lpfc vport structure pointer. * * Description: * Update all the ndlp's dev loss tmo with the vport devloss tmo value. **/ static void lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) { struct Scsi_Host *shost; struct lpfc_nodelist *ndlp; shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; spin_unlock_irq(shost->host_lock); } /** * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values * @vport: lpfc vport structure pointer. * @val: contains the tmo value. * * Description: * If the devloss tmo is already set or the vport dev loss tmo has changed * then a kernel error message is printed and zero is returned. * Else if val is in range then nodev tmo and devloss tmo are set to val. * Otherwise nodev tmo is set to the default value. * * Returns: * zero if already set or if val is in range * -EINVAL val out of range **/ static int lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) { if (vport->dev_loss_tmo_changed || (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0401 Ignoring change to nodev_tmo " "because devloss_tmo is set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; /* * For compat: set the fc_host dev loss so new rports * will get the value. */ fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0403 lpfc_nodev_tmo attribute cannot be set to" "%d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(nodev_tmo) static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, lpfc_nodev_tmo_show, lpfc_nodev_tmo_store); /* # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that # disappear until the timer expires. Value range is [0,255]. Default # value is 30. */ module_param(lpfc_devloss_tmo, int, S_IRUGO); MODULE_PARM_DESC(lpfc_devloss_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) lpfc_vport_param_show(devloss_tmo) /** * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit * @vport: lpfc vport structure pointer. * @val: contains the tmo value. * * Description: * If val is in a valid range then set the vport nodev tmo, * devloss tmo, also set the vport dev loss tmo changed flag. * Else a kernel error message is printed. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) { if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; vport->dev_loss_tmo_changed = 1; fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0404 lpfc_devloss_tmo attribute cannot be set to" " %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(devloss_tmo) static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, lpfc_devloss_tmo_show, lpfc_devloss_tmo_store); /* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: # See lpfc_logmsh.h for definitions. */ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, "Verbose logging bit-mask"); /* # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters # objects that have been registered with the nameserver after login. */ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, "Deregister nameserver objects before LOGO"); /* # lun_queue_depth: This parameter is used to limit the number of outstanding # commands per FCP LUN. Value range is [1,128]. Default value is 30. */ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, "Max number of FCP commands we can queue to a specific LUN"); /* # tgt_queue_depth: This parameter is used to limit the number of outstanding # commands per target port. Value range is [10,65535]. Default value is 65535. */ LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535, "Max number of FCP commands we can queue to a specific target port"); /* # hba_queue_depth: This parameter is used to limit the number of outstanding # commands per lpfc HBA. Value range is [32,8192]. If this parameter # value is greater than the maximum number of exchanges supported by the HBA, # then maximum number of exchanges supported by the HBA is used to determine # the hba_queue_depth. */ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, "Max number of FCP commands we can queue to a lpfc HBA"); /* # peer_port_login: This parameter allows/prevents logins # between peer ports hosted on the same physical port. # When this parameter is set 0 peer ports of same physical port # are not allowed to login to each other. # When this parameter is set 1 peer ports of same physical port # are allowed to login to each other. # Default value of this parameter is 0. */ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, "Allow peer ports on the same physical port to login to each " "other."); /* # restrict_login: This parameter allows/prevents logins # between Virtual Ports and remote initiators. # When this parameter is not set (0) Virtual Ports will accept PLOGIs from # other initiators and will attempt to PLOGI all remote ports. # When this parameter is set (1) Virtual Ports will reject PLOGIs from # remote ports and will not attempt to PLOGI to other initiators. # This parameter does not restrict to the physical port. # This parameter does not restrict logins to Fabric resident remote ports. # Default value of this parameter is 1. */ static int lpfc_restrict_login = 1; module_param(lpfc_restrict_login, int, S_IRUGO); MODULE_PARM_DESC(lpfc_restrict_login, "Restrict virtual ports login to remote initiators."); lpfc_vport_param_show(restrict_login); /** * lpfc_restrict_login_init - Set the vport restrict login flag * @vport: lpfc vport structure pointer. * @val: contains the restrict login value. * * Description: * If val is not in a valid range then log a kernel error message and set * the vport restrict login to one. * If the port type is physical clear the restrict login flag and return. * Else set the restrict login flag to val. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_restrict_login_init(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0422 lpfc_restrict_login attribute cannot " "be set to %d, allowed range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } if (vport->port_type == LPFC_PHYSICAL_PORT) { vport->cfg_restrict_login = 0; return 0; } vport->cfg_restrict_login = val; return 0; } /** * lpfc_restrict_login_set - Set the vport restrict login flag * @vport: lpfc vport structure pointer. * @val: contains the restrict login value. * * Description: * If val is not in a valid range then log a kernel error message and set * the vport restrict login to one. * If the port type is physical and the val is not zero log a kernel * error message, clear the restrict login flag and return zero. * Else set the restrict login flag to val. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_restrict_login_set(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0425 lpfc_restrict_login attribute cannot " "be set to %d, allowed range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0468 lpfc_restrict_login must be 0 for " "Physical ports.\n"); vport->cfg_restrict_login = 0; return 0; } vport->cfg_restrict_login = val; return 0; } lpfc_vport_param_store(restrict_login); static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR, lpfc_restrict_login_show, lpfc_restrict_login_store); /* # Some disk devices have a "select ID" or "select Target" capability. # From a protocol standpoint "select ID" usually means select the # Fibre channel "ALPA". In the FC-AL Profile there is an "informative # annex" which contains a table that maps a "select ID" (a number # between 0 and 7F) to an ALPA. By default, for compatibility with # older drivers, the lpfc driver scans this table from low ALPA to high # ALPA. # # Turning on the scan-down variable (on = 1, off = 0) will # cause the lpfc driver to use an inverted table, effectively # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1. # # (Note: This "select ID" functionality is a LOOP ONLY characteristic # and will not work across a fabric. Also this parameter will take # effect only in the case when ALPA map is not available.) */ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, "Start scanning for devices from highest ALPA to lowest"); /* # lpfc_topology: link topology for init link # 0x0 = attempt loop mode then point-to-point # 0x01 = internal loopback mode # 0x02 = attempt point-to-point mode only # 0x04 = attempt loop mode only # 0x06 = attempt point-to-point mode then loop # Set point-to-point mode if you want to run as an N_Port. # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. # Default value is 0. */ /** * lpfc_topology_set - Set the adapters topology field * @phba: lpfc_hba pointer. * @val: topology value. * * Description: * If val is in a valid range then set the adapter's topology field and * issue a lip; if the lip fails reset the topology to the old value. * * If the value is not in range log a kernel error message and return an error. * * Returns: * zero if val is in range and lip okay * non-zero return value from lpfc_issue_lip() * -EINVAL val out of range **/ static ssize_t lpfc_topology_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = 0; int nolip = 0; const char *val_buf = buf; int err; uint32_t prev_val; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; val_buf = &buf[strlen("nolip ")]; } if (!isdigit(val_buf[0])) return -EINVAL; if (sscanf(val_buf, "%i", &val) != 1) return -EINVAL; if (val >= 0 && val <= 6) { prev_val = phba->cfg_topology; phba->cfg_topology = val; if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3113 Loop mode not supported at speed %d\n", phba->cfg_link_speed); phba->cfg_topology = prev_val; return -EINVAL; } if (nolip) return strlen(buf); lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; return -EINVAL; } else return strlen(buf); } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0467 lpfc_topology attribute cannot be set to %d, " "allowed range is [0, 6]\n", phba->brd_no, val); return -EINVAL; } static int lpfc_topology = 0; module_param(lpfc_topology, int, S_IRUGO); MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); lpfc_param_show(topology) lpfc_param_init(topology, 0, 0, 6) static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, lpfc_topology_show, lpfc_topology_store); /** * lpfc_static_vport_show: Read callback function for * lpfc_static_vport sysfs file. * @dev: Pointer to class device object. * @attr: device attribute structure. * @buf: Data buffer. * * This function is the read call back function for * lpfc_static_vport sysfs file. The lpfc_static_vport * sysfs file report the mageability of the vport. **/ static ssize_t lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; if (vport->vport_flag & STATIC_VPORT) sprintf(buf, "1\n"); else sprintf(buf, "0\n"); return strlen(buf); } /* * Sysfs attribute to control the statistical data collection. */ static DEVICE_ATTR(lpfc_static_vport, S_IRUGO, lpfc_static_vport_show, NULL); /** * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file * @dev: Pointer to class device. * @buf: Data buffer. * @count: Size of the data buffer. * * This function get called when an user write to the lpfc_stat_data_ctrl * sysfs file. This function parse the command written to the sysfs file * and take appropriate action. These commands are used for controlling * driver statistical data collection. * Following are the command this function handles. * * setbucket <bucket_type> <base> <step> * = Set the latency buckets. * destroybucket = destroy all the buckets. * start = start data collection * stop = stop data collection * reset = reset the collected data **/ static ssize_t lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; #define LPFC_MAX_DATA_CTRL_LEN 1024 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN]; unsigned long i; char *str_ptr, *token; struct lpfc_vport **vports; struct Scsi_Host *v_shost; char *bucket_type_str, *base_str, *step_str; unsigned long base, step, bucket_type; if (!strncmp(buf, "setbucket", strlen("setbucket"))) { if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1)) return -EINVAL; strcpy(bucket_data, buf); str_ptr = &bucket_data[0]; /* Ignore this token - this is command token */ token = strsep(&str_ptr, "\t "); if (!token) return -EINVAL; bucket_type_str = strsep(&str_ptr, "\t "); if (!bucket_type_str) return -EINVAL; if (!strncmp(bucket_type_str, "linear", strlen("linear"))) bucket_type = LPFC_LINEAR_BUCKET; else if (!strncmp(bucket_type_str, "power2", strlen("power2"))) bucket_type = LPFC_POWER2_BUCKET; else return -EINVAL; base_str = strsep(&str_ptr, "\t "); if (!base_str) return -EINVAL; base = simple_strtoul(base_str, NULL, 0); step_str = strsep(&str_ptr, "\t "); if (!step_str) return -EINVAL; step = simple_strtoul(step_str, NULL, 0); if (!step) return -EINVAL; /* Block the data collection for every vport */ vports = lpfc_create_vport_work_array(phba); if (vports == NULL) return -ENOMEM; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(v_shost->host_lock); /* Block and reset data collection */ vports[i]->stat_data_blocked = 1; if (vports[i]->stat_data_enabled) lpfc_vport_reset_stat_data(vports[i]); spin_unlock_irq(v_shost->host_lock); } /* Set the bucket attributes */ phba->bucket_type = bucket_type; phba->bucket_base = base; phba->bucket_step = step; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); /* Unblock data collection */ spin_lock_irq(v_shost->host_lock); vports[i]->stat_data_blocked = 0; spin_unlock_irq(v_shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); return strlen(buf); } if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) { vports = lpfc_create_vport_work_array(phba); if (vports == NULL) return -ENOMEM; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->stat_data_blocked = 1; lpfc_free_bucket(vport); vport->stat_data_enabled = 0; vports[i]->stat_data_blocked = 0; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); phba->bucket_type = LPFC_NO_BUCKET; phba->bucket_base = 0; phba->bucket_step = 0; return strlen(buf); } if (!strncmp(buf, "start", strlen("start"))) { /* If no buckets configured return error */ if (phba->bucket_type == LPFC_NO_BUCKET) return -EINVAL; spin_lock_irq(shost->host_lock); if (vport->stat_data_enabled) { spin_unlock_irq(shost->host_lock); return strlen(buf); } lpfc_alloc_bucket(vport); vport->stat_data_enabled = 1; spin_unlock_irq(shost->host_lock); return strlen(buf); } if (!strncmp(buf, "stop", strlen("stop"))) { spin_lock_irq(shost->host_lock); if (vport->stat_data_enabled == 0) { spin_unlock_irq(shost->host_lock); return strlen(buf); } lpfc_free_bucket(vport); vport->stat_data_enabled = 0; spin_unlock_irq(shost->host_lock); return strlen(buf); } if (!strncmp(buf, "reset", strlen("reset"))) { if ((phba->bucket_type == LPFC_NO_BUCKET) || !vport->stat_data_enabled) return strlen(buf); spin_lock_irq(shost->host_lock); vport->stat_data_blocked = 1; lpfc_vport_reset_stat_data(vport); vport->stat_data_blocked = 0; spin_unlock_irq(shost->host_lock); return strlen(buf); } return -EINVAL; } /** * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file * @dev: Pointer to class device object. * @buf: Data buffer. * * This function is the read call back function for * lpfc_stat_data_ctrl sysfs file. This function report the * current statistical data collection state. **/ static ssize_t lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int index = 0; int i; char *bucket_type; unsigned long bucket_value; switch (phba->bucket_type) { case LPFC_LINEAR_BUCKET: bucket_type = "linear"; break; case LPFC_POWER2_BUCKET: bucket_type = "power2"; break; default: bucket_type = "No Bucket"; break; } sprintf(&buf[index], "Statistical Data enabled :%d, " "blocked :%d, Bucket type :%s, Bucket base :%d," " Bucket step :%d\nLatency Ranges :", vport->stat_data_enabled, vport->stat_data_blocked, bucket_type, phba->bucket_base, phba->bucket_step); index = strlen(buf); if (phba->bucket_type != LPFC_NO_BUCKET) { for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { if (phba->bucket_type == LPFC_LINEAR_BUCKET) bucket_value = phba->bucket_base + phba->bucket_step * i; else bucket_value = phba->bucket_base + (1 << i) * phba->bucket_step; if (index + 10 > PAGE_SIZE) break; sprintf(&buf[index], "%08ld ", bucket_value); index = strlen(buf); } } sprintf(&buf[index], "\n"); return strlen(buf); } /* * Sysfs attribute to control the statistical data collection. */ static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR, lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store); /* * lpfc_drvr_stat_data: sysfs attr to get driver statistical data. */ /* * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN * for each target. */ #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18) #define MAX_STAT_DATA_SIZE_PER_TARGET \ STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT) /** * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute * @filp: sysfs file * @kobj: Pointer to the kernel object * @bin_attr: Attribute object * @buff: Buffer pointer * @off: File offset * @count: Buffer size * * This function is the read call back function for lpfc_drvr_stat_data * sysfs file. This function export the statistical data to user * applications. **/ static ssize_t sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int i = 0, index = 0; unsigned long nport_index; struct lpfc_nodelist *ndlp = NULL; nport_index = (unsigned long)off / MAX_STAT_DATA_SIZE_PER_TARGET; if (!vport->stat_data_enabled || vport->stat_data_blocked || (phba->bucket_type == LPFC_NO_BUCKET)) return 0; spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data) continue; if (nport_index > 0) { nport_index--; continue; } if ((index + MAX_STAT_DATA_SIZE_PER_TARGET) > count) break; if (!ndlp->lat_data) continue; /* Print the WWN */ sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:", ndlp->nlp_portname.u.wwn[0], ndlp->nlp_portname.u.wwn[1], ndlp->nlp_portname.u.wwn[2], ndlp->nlp_portname.u.wwn[3], ndlp->nlp_portname.u.wwn[4], ndlp->nlp_portname.u.wwn[5], ndlp->nlp_portname.u.wwn[6], ndlp->nlp_portname.u.wwn[7]); index = strlen(buf); for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { sprintf(&buf[index], "%010u,", ndlp->lat_data[i].cmd_count); index = strlen(buf); } sprintf(&buf[index], "\n"); index = strlen(buf); } spin_unlock_irq(shost->host_lock); return index; } static struct bin_attribute sysfs_drvr_stat_data_attr = { .attr = { .name = "lpfc_drvr_stat_data", .mode = S_IRUSR, }, .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, .read = sysfs_drvr_stat_data_read, .write = NULL, }; /* # lpfc_link_speed: Link speed selection for initializing the Fibre Channel # connection. # Value range is [0,16]. Default value is 0. */ /** * lpfc_link_speed_set - Set the adapters link speed * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range then set the adapter's link speed field and * issue a lip; if the lip fails reset the link speed to the old value. * * Notes: * If the value is not in range log a kernel error message and return an error. * * Returns: * zero if val is in range and lip okay. * non-zero return value from lpfc_issue_lip() * -EINVAL val out of range **/ static ssize_t lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = LPFC_USER_LINK_SPEED_AUTO; int nolip = 0; const char *val_buf = buf; int err; uint32_t prev_val; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; val_buf = &buf[strlen("nolip ")]; } if (!isdigit(val_buf[0])) return -EINVAL; if (sscanf(val_buf, "%i", &val) != 1) return -EINVAL; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3055 lpfc_link_speed changed from %d to %d %s\n", phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2879 lpfc_link_speed attribute cannot be set " "to %d. Speed is not supported by this port.\n", val); return -EINVAL; } if (val == LPFC_USER_LINK_SPEED_16G && phba->fc_topology == LPFC_TOPOLOGY_LOOP) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3112 lpfc_link_speed attribute cannot be set " "to %d. Speed is not supported in loop mode.\n", val); return -EINVAL; } if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { prev_val = phba->cfg_link_speed; phba->cfg_link_speed = val; if (nolip) return strlen(buf); err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_link_speed = prev_val; return -EINVAL; } else return strlen(buf); } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0469 lpfc_link_speed attribute cannot be set to %d, " "allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val); return -EINVAL; } static int lpfc_link_speed = 0; module_param(lpfc_link_speed, int, S_IRUGO); MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); lpfc_param_show(link_speed) /** * lpfc_link_speed_init - Set the adapters link speed * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range then set the adapter's link speed field. * * Notes: * If the value is not in range log a kernel error message, clear the link * speed and return an error. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_link_speed_init(struct lpfc_hba *phba, int val) { if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3111 lpfc_link_speed of %d cannot " "support loop mode, setting topology to default.\n", val); phba->cfg_topology = 0; } if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { phba->cfg_link_speed = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0405 lpfc_link_speed attribute cannot " "be set to %d, allowed values are " "["LPFC_LINK_SPEED_STRING"]\n", val); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; return -EINVAL; } static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, lpfc_link_speed_show, lpfc_link_speed_store); /* # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) # 0 = aer disabled or not supported # 1 = aer supported and enabled (default) # Value range is [0,1]. Default value is 1. */ /** * lpfc_aer_support_store - Set the adapter for aer support * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing enable or disable aer flag. * @count: unused variable. * * Description: * If the val is 1 and currently the device's AER capability was not * enabled, invoke the kernel's enable AER helper routine, trying to * enable the device's AER capability. If the helper routine enabling * AER returns success, update the device's cfg_aer_support flag to * indicate AER is supported by the device; otherwise, if the device * AER capability is already enabled to support AER, then do nothing. * * If the val is 0 and currently the device's AER support was enabled, * invoke the kernel's disable AER helper routine. After that, update * the device's cfg_aer_support flag to indicate AER is not supported * by the device; otherwise, if the device AER capability is already * disabled from supporting AER, then do nothing. * * Returns: * length of the buf on success if val is in range the intended mode * is supported. * -EINVAL if val out of range or intended mode is not supported. **/ static ssize_t lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = 0, rc = -EINVAL; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; switch (val) { case 0: if (phba->hba_flag & HBA_AER_ENABLED) { rc = pci_disable_pcie_error_reporting(phba->pcidev); if (!rc) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~HBA_AER_ENABLED; spin_unlock_irq(&phba->hbalock); phba->cfg_aer_support = 0; rc = strlen(buf); } else rc = -EPERM; } else { phba->cfg_aer_support = 0; rc = strlen(buf); } break; case 1: if (!(phba->hba_flag & HBA_AER_ENABLED)) { rc = pci_enable_pcie_error_reporting(phba->pcidev); if (!rc) { spin_lock_irq(&phba->hbalock); phba->hba_flag |= HBA_AER_ENABLED; spin_unlock_irq(&phba->hbalock); phba->cfg_aer_support = 1; rc = strlen(buf); } else rc = -EPERM; } else { phba->cfg_aer_support = 1; rc = strlen(buf); } break; default: rc = -EINVAL; break; } return rc; } static int lpfc_aer_support = 1; module_param(lpfc_aer_support, int, S_IRUGO); MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); lpfc_param_show(aer_support) /** * lpfc_aer_support_init - Set the initial adapters aer support flag * @phba: lpfc_hba pointer. * @val: enable aer or disable aer flag. * * Description: * If val is in a valid range [0,1], then set the adapter's initial * cfg_aer_support field. It will be up to the driver's probe_one * routine to determine whether the device's AER support can be set * or not. * * Notes: * If the value is not in range log a kernel error message, and * choose the default value of setting AER support and return. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_aer_support_init(struct lpfc_hba *phba, int val) { if (val == 0 || val == 1) { phba->cfg_aer_support = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2712 lpfc_aer_support attribute value %d out " "of range, allowed values are 0|1, setting it " "to default value of 1\n", val); /* By default, try to enable AER on a device */ phba->cfg_aer_support = 1; return -EINVAL; } static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR, lpfc_aer_support_show, lpfc_aer_support_store); /** * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing flag 1 for aer cleanup state. * @count: unused variable. * * Description: * If the @buf contains 1 and the device currently has the AER support * enabled, then invokes the kernel AER helper routine * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable * error status register. * * Notes: * * Returns: * -EINVAL if the buf does not contain the 1 or the device is not currently * enabled with the AER support. **/ static ssize_t lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val, rc = -1; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if (val != 1) return -EINVAL; if (phba->hba_flag & HBA_AER_ENABLED) rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev); if (rc == 0) return strlen(buf); else return -EPERM; } static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, lpfc_aer_cleanup_state); /** * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string the number of vfs to be enabled. * @count: unused variable. * * Description: * When this api is called either through user sysfs, the driver shall * try to enable or disable SR-IOV virtual functions according to the * following: * * If zero virtual function has been enabled to the physical function, * the driver shall invoke the pci enable virtual function api trying * to enable the virtual functions. If the nr_vfn provided is greater * than the maximum supported, the maximum virtual function number will * be used for invoking the api; otherwise, the nr_vfn provided shall * be used for invoking the api. If the api call returned success, the * actual number of virtual functions enabled will be set to the driver * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver * cfg_sriov_nr_virtfn remains zero. * * If none-zero virtual functions have already been enabled to the * physical function, as reflected by the driver's cfg_sriov_nr_virtfn, * -EINVAL will be returned and the driver does nothing; * * If the nr_vfn provided is zero and none-zero virtual functions have * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the * disabling virtual function api shall be invoded to disable all the * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to * zero. Otherwise, if zero virtual function has been enabled, do * nothing. * * Returns: * length of the buf on success if val is in range the intended mode * is supported. * -EINVAL if val out of range or intended mode is not supported. **/ static ssize_t lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; struct pci_dev *pdev = phba->pcidev; int val = 0, rc = -EINVAL; /* Sanity check on user data */ if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if (val < 0) return -EINVAL; /* Request disabling virtual functions */ if (val == 0) { if (phba->cfg_sriov_nr_virtfn > 0) { pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } return strlen(buf); } /* Request enabling virtual functions */ if (phba->cfg_sriov_nr_virtfn > 0) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3018 There are %d virtual functions " "enabled on physical function.\n", phba->cfg_sriov_nr_virtfn); return -EEXIST; } if (val <= LPFC_MAX_VFN_PER_PFN) phba->cfg_sriov_nr_virtfn = val; else { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3019 Enabling %d virtual functions is not " "allowed.\n", val); return -EINVAL; } rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); if (rc) { phba->cfg_sriov_nr_virtfn = 0; rc = -EPERM; } else rc = strlen(buf); return rc; } static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN; module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn"); lpfc_param_show(sriov_nr_virtfn) /** * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range [0,255], then set the adapter's initial * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum * number shall be used instead. It will be up to the driver's probe_one * routine to determine whether the device's SR-IOV is supported or not. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val) { if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) { phba->cfg_sriov_nr_virtfn = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3017 Enabling %d virtual functions is not " "allowed.\n", val); return -EINVAL; } static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. */ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, "Select Fibre Channel class of service for FCP sequences"); /* # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range # is [0,1]. Default value is 0. */ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the # SCSI command completion time is not used for controlling I/O queue depth. When # the parameter is set to a non-zero value, the I/O queue depth is controlled # to limit the I/O completion time to the parameter value. # The value is set in milliseconds. */ static int lpfc_max_scsicmpl_time; module_param(lpfc_max_scsicmpl_time, int, S_IRUGO); MODULE_PARM_DESC(lpfc_max_scsicmpl_time, "Use command completion time to control queue depth"); lpfc_vport_param_show(max_scsicmpl_time); lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000); static int lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp, *next_ndlp; if (val == vport->cfg_max_scsicmpl_time) return 0; if ((val < 0) || (val > 60000)) return -EINVAL; vport->cfg_max_scsicmpl_time = val; spin_lock_irq(shost->host_lock); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; } spin_unlock_irq(shost->host_lock); return 0; } lpfc_vport_param_store(max_scsicmpl_time); static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR, lpfc_max_scsicmpl_time_show, lpfc_max_scsicmpl_time_store); /* # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value # range is [0,1]. Default value is 0. */ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); /* # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing # cr_delay (msec) or cr_count outstanding commands. cr_delay can take # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay # is 0. Default value of cr_count is 1. The cr_count feature is disabled if # cr_delay is set to 0. */ LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an " "interrupt response is generated"); LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " "interrupt response is generated"); /* # lpfc_multi_ring_support: Determines how many rings to spread available # cmd/rsp IOCB entries across. # Value range is [1,2]. Default value is 1. */ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " "SLI rings to spread IOCB entries across"); /* # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this # identifies what rctl value to configure the additional ring for. # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). */ LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, 255, "Identifies RCTL for additional ring configuration"); /* # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this # identifies what type value to configure the additional ring for. # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). */ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 255, "Identifies TYPE for additional ring configuration"); /* # lpfc_fdmi_on: controls FDMI support. # 0 = no FDMI support # 1 = support FDMI without attribute of hostname # 2 = support FDMI with attribute of hostname # Value range [0,2]. Default value is 0. */ LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support"); /* # Specifies the maximum number of ELS cmds we can have outstanding (for # discovery). Value range is [1,64]. Default value = 32. */ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " "during discovery"); /* # lpfc_max_luns: maximum allowed LUN. # Value range is [0,65535]. Default value is 255. # NOTE: The SCSI layer might probe all allowed LUN on some old targets. */ LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN"); /* # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. # Value range is [1,255], default value is 10. */ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, "Milliseconds driver will wait between polling FCP ring"); /* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature # 0 = MSI disabled # 1 = MSI enabled # 2 = MSI-X enabled (default) # Value range is [0,2]. Default value is 2. */ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* # lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second # # Value range is [636,651042]. Default value is 10000. */ LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, "Set the maximum number of fast-path FCP interrupts per second"); /* # lpfc_fcp_wq_count: Set the number of fast-path FCP work queues # # Value range is [1,31]. Default value is 4. */ LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, "Set the number of fast-path FCP work queues, if possible"); /* # lpfc_fcp_eq_count: Set the number of fast-path FCP event queues # # Value range is [1,7]. Default value is 1. */ LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, "Set the number of fast-path FCP event queues, if possible"); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled # 1 = HBA resets enabled (default) # Value range is [0,1]. Default value is 1. */ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); /* # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. # 0 = HBA Heartbeat disabled # 1 = HBA Heartbeat enabled (default) # Value range is [0,1]. Default value is 1. */ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); /* # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) # 0 = BlockGuard disabled (default) # 1 = BlockGuard enabled # Value range is [0,1]. Default value is 0. */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the # SCSI mid-layer # - Only meaningful if BG is turned on (lpfc_enable_bg=1). # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all profiles. # */ unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | SHOST_DIX_TYPE1_PROTECTION; module_param(lpfc_prot_mask, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); /* # lpfc_prot_guard: i # - Bit mask of protection guard types to register with the SCSI mid-layer # - Guard types are currently either 1) IP checksum 2) T10-DIF CRC # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all guard types # */ unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; module_param(lpfc_prot_guard, byte, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); /* * Delay initial NPort discovery when Clean Address bit is cleared in * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. * This parameter can have value 0 or 1. * When this parameter is set to 0, no delay is added to the initial * discovery. * When this parameter is set to non-zero value, initial Nport discovery is * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC * accept and FCID/Fabric name/Fabric portname is changed. * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion * when Clean Address bit is cleared in FLOGI/FDISC * accept and FCID/Fabric name/Fabric portname is changed. * Default value is 0. */ int lpfc_delay_discovery; module_param(lpfc_delay_discovery, int, S_IRUGO); MODULE_PARM_DESC(lpfc_delay_discovery, "Delay NPort discovery when Clean Address bit is cleared. " "Allowed values: 0,1."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count * This value can be set to values between 64 and 256. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, &dev_attr_bg_guard_err, &dev_attr_bg_apptag_err, &dev_attr_bg_reftag_err, &dev_attr_info, &dev_attr_serialnum, &dev_attr_modeldesc, &dev_attr_modelname, &dev_attr_programtype, &dev_attr_portnum, &dev_attr_fwrev, &dev_attr_hdw, &dev_attr_option_rom_version, &dev_attr_link_state, &dev_attr_num_discovered_ports, &dev_attr_menlo_mgmt_mode, &dev_attr_lpfc_drvr_version, &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_temp_sensor, &dev_attr_lpfc_log_verbose, &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_tgt_queue_depth, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_ack0, &dev_attr_lpfc_topology, &dev_attr_lpfc_scan_down, &dev_attr_lpfc_link_speed, &dev_attr_lpfc_cr_delay, &dev_attr_lpfc_cr_count, &dev_attr_lpfc_multi_ring_support, &dev_attr_lpfc_multi_ring_rctl, &dev_attr_lpfc_multi_ring_type, &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_lpfc_enable_npiv, &dev_attr_lpfc_fcf_failover_policy, &dev_attr_lpfc_enable_rrq, &dev_attr_nport_evt_cnt, &dev_attr_board_mode, &dev_attr_max_vpi, &dev_attr_used_vpi, &dev_attr_max_rpi, &dev_attr_used_rpi, &dev_attr_max_xri, &dev_attr_used_xri, &dev_attr_npiv_info, &dev_attr_issue_reset, &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_wq_count, &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, &dev_attr_lpfc_soft_wwn_enable, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, &dev_attr_lpfc_sg_seg_cnt, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_prot_sg_seg_cnt, &dev_attr_lpfc_aer_support, &dev_attr_lpfc_aer_state_cleanup, &dev_attr_lpfc_sriov_nr_virtfn, &dev_attr_lpfc_suppress_link_up, &dev_attr_lpfc_iocb_cnt, &dev_attr_iocb_hw, &dev_attr_txq_hw, &dev_attr_txcmplq_hw, &dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_rev, &dev_attr_lpfc_dss, &dev_attr_lpfc_sriov_hw_max_virtfn, &dev_attr_protocol, NULL, }; struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_info, &dev_attr_link_state, &dev_attr_num_discovered_ports, &dev_attr_lpfc_drvr_version, &dev_attr_lpfc_log_verbose, &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_tgt_queue_depth, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_restrict_login, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_nport_evt_cnt, &dev_attr_npiv_info, &dev_attr_lpfc_enable_da_id, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_static_vport, &dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_rev, NULL, }; /** * sysfs_ctlreg_write - Write method for writing to ctlreg * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be written to the adapter IOREG space. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. * Uses the adapter io control registers to send buf contents to the adapter. * * Returns: * -ERANGE off and count combo out of range * -EINVAL off, count or buff address invalid * -EPERM adapter is offline * value of count, buf contents written **/ static ssize_t sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev >= LPFC_SLI_REV4) return -EPERM; if ((off + count) > FF_REG_AREA_SIZE) return -ERANGE; if (count <= LPFC_REG_WRITE_KEY_SIZE) return 0; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; /* This is to protect HBA registers from accidental writes. */ if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) return -EINVAL; if (!(vport->fc_flag & FC_OFFLINE_MODE)) return -EPERM; spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; buf_off += sizeof(uint32_t)) writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), phba->ctrl_regs_memmap_p + off + buf_off); spin_unlock_irq(&phba->hbalock); return count; } /** * sysfs_ctlreg_read - Read method for reading from ctlreg * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: if successful contains the data from the adapter IOREG space. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. * Uses the adapter io control registers to read data into buf. * * Returns: * -ERANGE off and count combo out of range * -EINVAL off, count or buff address invalid * value of count, buf contents read **/ static ssize_t sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; uint32_t * tmp_ptr; struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev >= LPFC_SLI_REV4) return -EPERM; if (off > FF_REG_AREA_SIZE) return -ERANGE; if ((off + count) > FF_REG_AREA_SIZE) count = FF_REG_AREA_SIZE - off; if (count == 0) return 0; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { tmp_ptr = (uint32_t *)(buf + buf_off); *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); } spin_unlock_irq(&phba->hbalock); return count; } static struct bin_attribute sysfs_ctlreg_attr = { .attr = { .name = "ctlreg", .mode = S_IRUSR | S_IWUSR, }, .size = 256, .read = sysfs_ctlreg_read, .write = sysfs_ctlreg_write, }; /** * sysfs_mbox_write - Write method for writing information via mbox * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be written to sysfs mbox. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Deprecated function. All mailbox access from user space is performed via the * bsg interface. * * Returns: * -EPERM operation not permitted **/ static ssize_t sysfs_mbox_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return -EPERM; } /** * sysfs_mbox_read - Read method for reading information via mbox * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be read from sysfs mbox. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Deprecated function. All mailbox access from user space is performed via the * bsg interface. * * Returns: * -EPERM operation not permitted **/ static ssize_t sysfs_mbox_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return -EPERM; } static struct bin_attribute sysfs_mbox_attr = { .attr = { .name = "mbox", .mode = S_IRUSR | S_IWUSR, }, .size = MAILBOX_SYSFS_MAX, .read = sysfs_mbox_read, .write = sysfs_mbox_write, }; /** * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries * @vport: address of lpfc vport structure. * * Return codes: * zero on success * error return code from sysfs_create_bin_file() **/ int lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int error; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_drvr_stat_data_attr); /* Virtual ports do not need ctrl_reg and mbox */ if (error || vport->port_type == LPFC_NPIV_PORT) goto out; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); if (error) goto out_remove_stat_attr; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); if (error) goto out_remove_ctlreg_attr; return 0; out_remove_ctlreg_attr: sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); out_remove_stat_attr: sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_drvr_stat_data_attr); out: return error; } /** * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries * @vport: address of lpfc vport structure. **/ void lpfc_free_sysfs_attr(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_drvr_stat_data_attr); /* Virtual ports do not need ctrl_reg and mbox */ if (vport->port_type == LPFC_NPIV_PORT) return; sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); } /* * Dynamic FC Host Attributes Support */ /** * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_id(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; /* note: fc_myDID already in cpu endianness */ fc_host_port_id(shost) = vport->fc_myDID; } /** * lpfc_get_host_port_type - Set the value of the scsi host port type * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_type(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (vport->port_type == LPFC_NPIV_PORT) { fc_host_port_type(shost) = FC_PORTTYPE_NPIV; } else if (lpfc_is_link_up(phba)) { if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; else fc_host_port_type(shost) = FC_PORTTYPE_LPORT; } else { if (vport->fc_flag & FC_FABRIC) fc_host_port_type(shost) = FC_PORTTYPE_NPORT; else fc_host_port_type(shost) = FC_PORTTYPE_PTP; } } else fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_port_state - Set the value of the scsi host port state * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_state(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_OFFLINE_MODE) fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; else { switch (phba->link_state) { case LPFC_LINK_UNKNOWN: case LPFC_LINK_DOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: /* Links up, reports port state accordingly */ if (vport->port_state < LPFC_VPORT_READY) fc_host_port_state(shost) = FC_PORTSTATE_BYPASSED; else fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case LPFC_HBA_ERROR: fc_host_port_state(shost) = FC_PORTSTATE_ERROR; break; default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_speed - Set the value of the scsi host speed * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_speed(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (lpfc_is_link_up(phba)) { switch(phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; case LPFC_LINK_SPEED_2GHZ: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case LPFC_LINK_SPEED_4GHZ: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case LPFC_LINK_SPEED_8GHZ: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case LPFC_LINK_SPEED_10GHZ: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case LPFC_LINK_SPEED_16GHZ: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } else fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_fabric_name (struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; u64 node_name; spin_lock_irq(shost->host_lock); if ((vport->port_state > LPFC_FLOGI) && ((vport->fc_flag & FC_FABRIC) || ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && (vport->fc_flag & FC_PUBLIC_LOOP)))) node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); else /* fabric is local port if there is no F/FL_Port */ node_name = 0; spin_unlock_irq(shost->host_lock); fc_host_fabric_name(shost) = node_name; } /** * lpfc_get_stats - Return statistical information about the adapter * @shost: kernel scsi host pointer. * * Notes: * NULL on error for link down, no mbox pool, sli2 active, * management not allowed, memory allocation error, or mbox error. * * Returns: * NULL for error * address of the adapter host statistics **/ static struct fc_host_statistics * lpfc_get_stats(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; unsigned long seconds; int rc = 0; /* * prevent udev from issuing mailbox commands until the port is * configured. */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return NULL; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return NULL; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return NULL; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if (vport->fc_flag & FC_OFFLINE_MODE) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } memset(hs, 0, sizeof (struct fc_host_statistics)); hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; /* * The MBX_READ_STATUS returns tx_k_bytes which has to * converted to words */ hs->tx_words = (uint64_t) ((uint64_t)pmb->un.varRdStatus.xmitByteCnt * (uint64_t)256); hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; hs->rx_words = (uint64_t) ((uint64_t)pmb->un.varRdStatus.rcvByteCnt * (uint64_t)256); memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if (vport->fc_flag & FC_OFFLINE_MODE) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; hs->link_failure_count -= lso->link_failure_count; hs->loss_of_sync_count -= lso->loss_of_sync_count; hs->loss_of_signal_count -= lso->loss_of_signal_count; hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; hs->invalid_tx_word_count -= lso->invalid_tx_word_count; hs->invalid_crc_count -= lso->invalid_crc_count; hs->error_frames -= lso->error_frames; if (phba->hba_flag & HBA_FCOE_MODE) { hs->lip_count = -1; hs->nos_count = (phba->link_events >> 1); hs->nos_count -= lso->link_events; } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; seconds = get_seconds(); if (seconds < psli->stats_start) hs->seconds_since_last_reset = seconds + ((unsigned long)-1 - psli->stats_start); else hs->seconds_since_last_reset = seconds - psli->stats_start; mempool_free(pmboxq, phba->mbox_mem_pool); return hs; } /** * lpfc_reset_stats - Copy the adapter link stats information * @shost: kernel scsi host pointer. **/ static void lpfc_reset_stats(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return; memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmb->un.varWords[0] = 0x1; /* reset request */ pmboxq->context1 = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return; } memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free( pmboxq, phba->mbox_mem_pool); return; } lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; lso->error_frames = pmb->un.varRdLnk.crcCnt; if (phba->hba_flag & HBA_FCOE_MODE) lso->link_events = (phba->link_events >> 1); else lso->link_events = (phba->fc_eventTag >> 1); psli->stats_start = get_seconds(); mempool_free(pmboxq, phba->mbox_mem_pool); return; } /* * The LPFC driver treats linkdown handling as target loss events so there * are no sysfs handlers for link_down_tmo. */ /** * lpfc_get_node_by_target - Return the nodelist for a target * @starget: kernel scsi target pointer. * * Returns: * address of the node list if found * NULL target not found **/ static struct lpfc_nodelist * lpfc_get_node_by_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); /* Search for this, mapped, target ID */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_MAPPED_NODE && starget->id == ndlp->nlp_sid) { spin_unlock_irq(shost->host_lock); return ndlp; } } spin_unlock_irq(shost->host_lock); return NULL; } /** * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1 * @starget: kernel scsi target pointer. **/ static void lpfc_get_starget_port_id(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; } /** * lpfc_get_starget_node_name - Set the target node name * @starget: kernel scsi target pointer. * * Description: Set the target node name to the ndlp node name wwn or zero. **/ static void lpfc_get_starget_node_name(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_node_name(starget) = ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; } /** * lpfc_get_starget_port_name - Set the target port name * @starget: kernel scsi target pointer. * * Description: set the target port name to the ndlp port name wwn or zero. **/ static void lpfc_get_starget_port_name(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_port_name(starget) = ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; } /** * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo * @rport: fc rport address. * @timeout: new value for dev loss tmo. * * Description: * If timeout is non zero set the dev_loss_tmo to timeout, else set * dev_loss_tmo to one. **/ static void lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { if (timeout) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = 1; } /** * lpfc_rport_show_function - Return rport target information * * Description: * Macro that uses field to generate a function with the name lpfc_show_rport_ * * lpfc_show_rport_##field: returns the bytes formatted in buf * @cdev: class converted to an fc_rport. * @buf: on return contains the target_field or zero. * * Returns: size of formatted string. **/ #define lpfc_rport_show_function(field, format_string, sz, cast) \ static ssize_t \ lpfc_show_rport_##field (struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fc_rport *rport = transport_class_to_rport(dev); \ struct lpfc_rport_data *rdata = rport->hostdata; \ return snprintf(buf, sz, format_string, \ (rdata->target) ? cast rdata->target->field : 0); \ } #define lpfc_rport_rd_attr(field, format_string, sz) \ lpfc_rport_show_function(field, format_string, sz, ) \ static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) /** * lpfc_set_vport_symbolic_name - Set the vport's symbolic name * @fc_vport: The fc_vport who's symbolic name has been changed. * * Description: * This function is called by the transport after the @fc_vport's symbolic name * has been changed. This function re-registers the symbolic name with the * switch to propagate the change into the fabric if the vport is active. **/ static void lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; if (vport->port_state == LPFC_VPORT_READY) lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); } /** * lpfc_hba_log_verbose_init - Set hba's log verbose level * @phba: Pointer to lpfc_hba struct. * * This function is called by the lpfc_get_cfgparam() routine to set the * module lpfc_log_verbose into the @phba cfg_log_verbose for use with * log message according to the module's lpfc_log_verbose parameter setting * before hba port or vport created. **/ static void lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) { phba->cfg_log_verbose = verbose; } struct fc_function_template lpfc_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ .get_host_port_id = lpfc_get_host_port_id, .show_host_port_id = 1, .get_host_port_type = lpfc_get_host_port_type, .show_host_port_type = 1, .get_host_port_state = lpfc_get_host_port_state, .show_host_port_state = 1, /* active_fc4s is shown but doesn't change (thus no get function) */ .show_host_active_fc4s = 1, .get_host_speed = lpfc_get_host_speed, .show_host_speed = 1, .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. */ .get_fc_host_stats = lpfc_get_stats, .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_starget_port_id = lpfc_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = lpfc_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = lpfc_get_starget_port_name, .show_starget_port_name = 1, .issue_fc_host_lip = lpfc_issue_lip, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, .dd_fcvport_size = sizeof(struct lpfc_vport *), .vport_disable = lpfc_vport_disable, .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, .bsg_request = lpfc_bsg_request, .bsg_timeout = lpfc_bsg_timeout, }; struct fc_function_template lpfc_vport_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ .get_host_port_id = lpfc_get_host_port_id, .show_host_port_id = 1, .get_host_port_type = lpfc_get_host_port_type, .show_host_port_type = 1, .get_host_port_state = lpfc_get_host_port_state, .show_host_port_state = 1, /* active_fc4s is shown but doesn't change (thus no get function) */ .show_host_active_fc4s = 1, .get_host_speed = lpfc_get_host_speed, .show_host_speed = 1, .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. */ .get_fc_host_stats = lpfc_get_stats, .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_starget_port_id = lpfc_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = lpfc_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = lpfc_get_starget_port_name, .show_starget_port_name = 1, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, .vport_disable = lpfc_vport_disable, .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, }; /** * lpfc_get_cfgparam - Used during probe_one to init the adapter structure * @phba: lpfc_hba pointer. **/ void lpfc_get_cfgparam(struct lpfc_hba *phba) { lpfc_cr_delay_init(phba, lpfc_cr_delay); lpfc_cr_count_init(phba, lpfc_cr_count); lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); lpfc_ack0_init(phba, lpfc_ack0); lpfc_topology_init(phba, lpfc_topology); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_bg_init(phba, lpfc_enable_bg); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; else phba->cfg_poll = lpfc_poll; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); phba->cfg_enable_dss = 1; return; } /** * lpfc_get_vport_cfgparam - Used during port create, init the vport structure * @vport: lpfc_vport pointer. **/ void lpfc_get_vport_cfgparam(struct lpfc_vport *vport) { lpfc_log_verbose_init(vport, lpfc_log_verbose); lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth); lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); lpfc_peer_port_login_init(vport, lpfc_peer_port_login); lpfc_restrict_login_init(vport, lpfc_restrict_login); lpfc_fcp_class_init(vport, lpfc_fcp_class); lpfc_use_adisc_init(vport, lpfc_use_adisc); lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); lpfc_fdmi_on_init(vport, lpfc_fdmi_on); lpfc_discovery_threads_init(vport, lpfc_discovery_threads); lpfc_max_luns_init(vport, lpfc_max_luns); lpfc_scan_down_init(vport, lpfc_scan_down); lpfc_enable_da_id_init(vport, lpfc_enable_da_id); return; }
gpl-2.0
embeddedarm/linux-3.4-ts75xx
drivers/scsi/pm8001/pm8001_sas.c
4853
34067
/* * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * */ #include <linux/slab.h> #include "pm8001_sas.h" /** * pm8001_find_tag - from sas task to find out tag that belongs to this task * @task: the task sent to the LLDD * @tag: the found tag associated with the task */ static int pm8001_find_tag(struct sas_task *task, u32 *tag) { if (task->lldd_task) { struct pm8001_ccb_info *ccb; ccb = task->lldd_task; *tag = ccb->ccb_tag; return 1; } return 0; } /** * pm8001_tag_clear - clear the tags bitmap * @pm8001_ha: our hba struct * @tag: the found tag associated with the task */ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) { void *bitmap = pm8001_ha->tags; clear_bit(tag, bitmap); } static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { pm8001_tag_clear(pm8001_ha, tag); } static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag) { void *bitmap = pm8001_ha->tags; set_bit(tag, bitmap); } /** * pm8001_tag_alloc - allocate a empty tag for task used. * @pm8001_ha: our hba struct * @tag_out: the found empty tag . */ inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) { unsigned int index, tag; void *bitmap = pm8001_ha->tags; index = find_first_zero_bit(bitmap, pm8001_ha->tags_num); tag = index; if (tag >= pm8001_ha->tags_num) return -SAS_QUEUE_FULL; pm8001_tag_set(pm8001_ha, tag); *tag_out = tag; return 0; } void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) { int i; for (i = 0; i < pm8001_ha->tags_num; ++i) pm8001_tag_clear(pm8001_ha, i); } /** * pm8001_mem_alloc - allocate memory for pm8001. * @pdev: pci device. * @virt_addr: the allocated virtual address * @pphys_addr_hi: the physical address high byte address. * @pphys_addr_lo: the physical address low byte address. * @mem_size: memory size. */ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align) { caddr_t mem_virt_alloc; dma_addr_t mem_dma_handle; u64 phys_align; u64 align_offset = 0; if (align) align_offset = (dma_addr_t)align - 1; mem_virt_alloc = pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle); if (!mem_virt_alloc) { pm8001_printk("memory allocation error\n"); return -1; } memset((void *)mem_virt_alloc, 0, mem_size+align); *pphys_addr = mem_dma_handle; phys_align = (*pphys_addr + align_offset) & ~align_offset; *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; *pphys_addr_hi = upper_32_bits(phys_align); *pphys_addr_lo = lower_32_bits(phys_align); return 0; } /** * pm8001_find_ha_by_dev - from domain device which come from sas layer to * find out our hba struct. * @dev: the domain device which from sas layer. */ static struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) { struct sas_ha_struct *sha = dev->port->ha; struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; return pm8001_ha; } /** * pm8001_phy_control - this function should be registered to * sas_domain_function_template to provide libsas used, note: this is just * control the HBA phy rather than other expander phy if you want control * other phy, you should use SMP command. * @sas_phy: which phy in HBA phys. * @func: the operation. * @funcdata: always NULL. */ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { int rc = 0, phy_id = sas_phy->id; struct pm8001_hba_info *pm8001_ha = NULL; struct sas_phy_linkrates *rates; DECLARE_COMPLETION_ONSTACK(completion); unsigned long flags; pm8001_ha = sas_phy->ha->lldd_ha; pm8001_ha->phy[phy_id].enable_completion = &completion; switch (func) { case PHY_FUNC_SET_LINK_RATE: rates = funcdata; if (rates->minimum_linkrate) { pm8001_ha->phy[phy_id].minimum_linkrate = rates->minimum_linkrate; } if (rates->maximum_linkrate) { pm8001_ha->phy[phy_id].maximum_linkrate = rates->maximum_linkrate; } if (pm8001_ha->phy[phy_id].phy_state == 0) { PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_LINK_RESET); break; case PHY_FUNC_HARD_RESET: if (pm8001_ha->phy[phy_id].phy_state == 0) { PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_HARD_RESET); break; case PHY_FUNC_LINK_RESET: if (pm8001_ha->phy[phy_id].phy_state == 0) { PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_LINK_RESET); break; case PHY_FUNC_RELEASE_SPINUP_HOLD: PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_LINK_RESET); break; case PHY_FUNC_DISABLE: PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); break; case PHY_FUNC_GET_EVENTS: spin_lock_irqsave(&pm8001_ha->lock, flags); if (-1 == pm8001_bar4_shift(pm8001_ha, (phy_id < 4) ? 0x30000 : 0x40000)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); return -EINVAL; } { struct sas_phy *phy = sas_phy->phy; uint32_t *qp = (uint32_t *)(((char *) pm8001_ha->io_mem[2].memvirtaddr) + 0x1034 + (0x4000 * (phy_id & 3))); phy->invalid_dword_count = qp[0]; phy->running_disparity_error_count = qp[1]; phy->loss_of_dword_sync_count = qp[3]; phy->phy_reset_problem_count = qp[4]; } pm8001_bar4_shift(pm8001_ha, 0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return 0; default: rc = -EOPNOTSUPP; } msleep(300); return rc; } /** * pm8001_scan_start - we should enable all HBA phys by sending the phy_start * command to HBA. * @shost: the scsi host data. */ void pm8001_scan_start(struct Scsi_Host *shost) { int i; struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); /* give the phy enabling interrupt event time to come in (1s * is empirically about all it takes) */ if (time < HZ) return 0; /* Wait for discovery to finish */ sas_drain_work(ha); return 1; } /** * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to smp task */ static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); } u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) { struct ata_queued_cmd *qc = task->uldd_task; if (qc) { if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ) { *tag = qc->tag; return 1; } } return 0; } /** * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to sata task */ static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); } /** * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data * @pm8001_ha: our hba card information * @ccb: the ccb which attached to TM * @tmf: the task management IU */ static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); } /** * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to ssp task */ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); } /* Find the local port id that's attached to this device */ static int sas_find_local_port_id(struct domain_device *dev) { struct domain_device *pdev = dev->parent; /* Directly attached device */ if (!pdev) return dev->port->id; while (pdev) { struct domain_device *pdev_p = pdev->parent; if (!pdev_p) return pdev->port->id; pdev = pdev->parent; } return 0; } /** * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. * @task: the task to be execute. * @num: if can_queue great than 1, the task can be queued up. for SMP task, * we always execute one one time. * @gfp_flags: gfp_flags. * @is_tmf: if it is task management task. * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { struct domain_device *dev = task->dev; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev; struct pm8001_port *port = NULL; struct sas_task *t = task; struct pm8001_ccb_info *ccb; u32 tag = 0xdeadbeef, rc, n_elem = 0; u32 n = num; unsigned long flags = 0; if (!dev->port) { struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; if (dev->dev_type != SATA_DEV) t->task_done(t); return 0; } pm8001_ha = pm8001_find_ha_by_dev(task->dev); PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); spin_lock_irqsave(&pm8001_ha->lock, flags); do { dev = t->dev; pm8001_dev = dev->lldd_dev; port = &pm8001_ha->port[sas_find_local_port_id(dev)]; if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) { if (sas_protocol_ata(t->task_proto)) { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; spin_unlock_irqrestore(&pm8001_ha->lock, flags); t->task_done(t); spin_lock_irqsave(&pm8001_ha->lock, flags); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); continue; } else { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; t->task_done(t); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); continue; } } rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) goto err_out; ccb = &pm8001_ha->ccb_info[tag]; if (!sas_protocol_ata(t->task_proto)) { if (t->num_scatter) { n_elem = dma_map_sg(pm8001_ha->dev, t->scatter, t->num_scatter, t->data_dir); if (!n_elem) { rc = -ENOMEM; goto err_out_tag; } } } else { n_elem = t->num_scatter; } t->lldd_task = ccb; ccb->n_elem = n_elem; ccb->ccb_tag = tag; ccb->task = t; switch (t->task_proto) { case SAS_PROTOCOL_SMP: rc = pm8001_task_prep_smp(pm8001_ha, ccb); break; case SAS_PROTOCOL_SSP: if (is_tmf) rc = pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf); else rc = pm8001_task_prep_ssp(pm8001_ha, ccb); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: rc = pm8001_task_prep_ata(pm8001_ha, ccb); break; default: dev_printk(KERN_ERR, pm8001_ha->dev, "unknown sas_task proto: 0x%x\n", t->task_proto); rc = -EINVAL; break; } if (rc) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("rc is %x\n", rc)); goto err_out_tag; } /* TODO: select normal or high priority */ spin_lock(&t->task_state_lock); t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&t->task_state_lock); pm8001_dev->running_req++; if (n > 1) t = list_entry(t->list.next, struct sas_task, list); } while (--n); rc = 0; goto out_done; err_out_tag: pm8001_tag_free(pm8001_ha, tag); err_out: dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); if (!sas_protocol_ata(t->task_proto)) if (n_elem) dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem, t->data_dir); out_done: spin_unlock_irqrestore(&pm8001_ha->lock, flags); return rc; } /** * pm8001_queue_command - register for upper layer used, all IO commands sent * to HBA are from this interface. * @task: the task to be execute. * @num: if can_queue great than 1, the task can be queued up. for SMP task, * we always execute one one time * @gfp_flags: gfp_flags */ int pm8001_queue_command(struct sas_task *task, const int num, gfp_t gfp_flags) { return pm8001_task_exec(task, num, gfp_flags, 0, NULL); } void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx) { pm8001_tag_clear(pm8001_ha, ccb_idx); } /** * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. * @pm8001_ha: our hba card information * @ccb: the ccb which attached to ssp task * @task: the task to be free. * @ccb_idx: ccb index. */ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) { if (!ccb->task) return; if (!sas_protocol_ata(task->task_proto)) if (ccb->n_elem) dma_unmap_sg(pm8001_ha->dev, task->scatter, task->num_scatter, task->data_dir); switch (task->task_proto) { case SAS_PROTOCOL_SMP: dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SSP: default: /* do nothing */ break; } task->lldd_task = NULL; ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; ccb->open_retry = 0; pm8001_ccb_free(pm8001_ha, ccb_idx); } /** * pm8001_alloc_dev - find a empty pm8001_device * @pm8001_ha: our hba card information */ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { pm8001_ha->devices[dev].id = dev; return &pm8001_ha->devices[dev]; } } if (dev == PM8001_MAX_DEVICES) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("max support %d devices, ignore ..\n", PM8001_MAX_DEVICES)); } return NULL; } static void pm8001_free_dev(struct pm8001_device *pm8001_dev) { u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); pm8001_dev->id = id; pm8001_dev->dev_type = NO_DEVICE; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; } /** * pm8001_dev_found_notify - libsas notify a device is found. * @dev: the device structure which sas layer used. * * when libsas find a sas domain device, it should tell the LLDD that * device is found, and then LLDD register this device to HBA firmware * by the command "OPC_INB_REG_DEV", after that the HBA will assign a * device ID(according to device's sas address) and returned it to LLDD. From * now on, we communicate with HBA FW with the device ID which HBA assigned * rather than sas address. it is the necessary step for our HBA but it is * the optional for other HBA driver. */ static int pm8001_dev_found_notify(struct domain_device *dev) { unsigned long flags = 0; int res = 0; struct pm8001_hba_info *pm8001_ha = NULL; struct domain_device *parent_dev = dev->parent; struct pm8001_device *pm8001_device; DECLARE_COMPLETION_ONSTACK(completion); u32 flag = 0; pm8001_ha = pm8001_find_ha_by_dev(dev); spin_lock_irqsave(&pm8001_ha->lock, flags); pm8001_device = pm8001_alloc_dev(pm8001_ha); if (!pm8001_device) { res = -1; goto found_out; } pm8001_device->sas_device = dev; dev->lldd_dev = pm8001_device; pm8001_device->dev_type = dev->dev_type; pm8001_device->dcompletion = &completion; if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { int phy_id; struct ex_phy *phy; for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; phy_id++) { phy = &parent_dev->ex_dev.ex_phy[phy_id]; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(dev->sas_addr)) { pm8001_device->attached_phy = phy_id; break; } } if (phy_id == parent_dev->ex_dev.num_phys) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Error: no attached dev:%016llx" " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr))); res = -1; } } else { if (dev->dev_type == SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; flag = 1; /* directly sata*/ } } /*register this device to HBA*/ PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); if (dev->dev_type == SAS_END_DEV) msleep(50); pm8001_ha->flags = PM8001F_RUN_TIME; return 0; found_out: spin_unlock_irqrestore(&pm8001_ha->lock, flags); return res; } int pm8001_dev_found(struct domain_device *dev) { return pm8001_dev_found_notify(dev); } static void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->timer)) return; complete(&task->completion); } static void pm8001_tmf_timedout(unsigned long data) { struct sas_task *task = (struct sas_task *)data; task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->completion); } #define PM8001_TASK_TIMEOUT 20 /** * pm8001_exec_internal_tmf_task - execute some task management commands. * @dev: the wanted device. * @tmf: which task management wanted to be take. * @para_len: para_len. * @parameter: ssp task parameter. * * when errors or exception happened, we may want to do something, for example * abort the issued task which result in this execption, it is done by calling * this function, note it is also with the task execute interface. */ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) { int res, retry; struct sas_task *task = NULL; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); for (retry = 0; retry < 3; retry++) { task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; task->dev = dev; task->task_proto = dev->tproto; memcpy(&task->ssp_task, parameter, para_len); task->task_done = pm8001_task_done; task->timer.data = (unsigned long)task; task->timer.function = pm8001_tmf_timedout; task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; add_timer(&task->timer); res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); if (res) { del_timer(&task->timer); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing internal task " "failed\n")); goto ex_err; } wait_for_completion(&task->completion); res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("TMF task[%x]timeout.\n", tmf->tmf)); goto ex_err; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Blocked task error.\n")); res = -EMSGSIZE; break; } else { PM8001_EH_DBG(pm8001_ha, pm8001_printk(" Task to dev %016llx response:" "0x%x status 0x%x\n", SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat)); sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } static int pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag, u32 task_tag) { int res, retry; u32 ccb_tag; struct pm8001_ccb_info *ccb; struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; task->dev = dev; task->task_proto = dev->tproto; task->task_done = pm8001_task_done; task->timer.data = (unsigned long)task; task->timer.function = pm8001_tmf_timedout; task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; add_timer(&task->timer); res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) return res; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; res = PM8001_CHIP_DISP->task_abort(pm8001_ha, pm8001_dev, flag, task_tag, ccb_tag); if (res) { del_timer(&task->timer); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing internal task " "failed\n")); goto ex_err; } wait_for_completion(&task->completion); res = TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("TMF task timeout.\n")); goto ex_err; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; } else { PM8001_EH_DBG(pm8001_ha, pm8001_printk(" Task to dev %016llx response: " "0x%x status 0x%x\n", SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat)); sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); sas_free_task(task); return res; } /** * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" * @dev: the device structure which sas layer used. */ static void pm8001_dev_gone_notify(struct domain_device *dev) { unsigned long flags = 0; u32 tag; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); spin_lock_irqsave(&pm8001_ha->lock, flags); pm8001_tag_alloc(pm8001_ha, &tag); if (pm8001_dev) { u32 device_id = pm8001_dev->device_id; PM8001_DISC_DBG(pm8001_ha, pm8001_printk("found dev[%d:%x] is gone.\n", pm8001_dev->device_id, pm8001_dev->dev_type)); if (pm8001_dev->running_req) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); spin_lock_irqsave(&pm8001_ha->lock, flags); } PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); pm8001_free_dev(pm8001_dev); } else { PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found dev has gone.\n")); } dev->lldd_dev = NULL; spin_unlock_irqrestore(&pm8001_ha->lock, flags); } void pm8001_dev_gone(struct domain_device *dev) { pm8001_dev_gone_notify(dev); } static int pm8001_issue_ssp_tmf(struct domain_device *dev, u8 *lun, struct pm8001_tmf_task *tmf) { struct sas_ssp_task ssp_task; if (!(dev->tproto & SAS_PROTOCOL_SSP)) return TMF_RESP_FUNC_ESUPP; strncpy((u8 *)&ssp_task.LUN, lun, 8); return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), tmf); } /* retry commands by ha, by task and/or by device */ void pm8001_open_reject_retry( struct pm8001_hba_info *pm8001_ha, struct sas_task *task_to_close, struct pm8001_device *device_to_close) { int i; unsigned long flags; if (pm8001_ha == NULL) return; spin_lock_irqsave(&pm8001_ha->lock, flags); for (i = 0; i < PM8001_MAX_CCB; i++) { struct sas_task *task; struct task_status_struct *ts; struct pm8001_device *pm8001_dev; unsigned long flags1; u32 tag; struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; pm8001_dev = ccb->device; if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) continue; if (!device_to_close) { uintptr_t d = (uintptr_t)pm8001_dev - (uintptr_t)&pm8001_ha->devices; if (((d % sizeof(*pm8001_dev)) != 0) || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES)) continue; } else if (pm8001_dev != device_to_close) continue; tag = ccb->ccb_tag; if (!tag || (tag == 0xFFFFFFFF)) continue; task = ccb->task; if (!task || !task->task_done) continue; if (task_to_close && (task != task_to_close)) continue; ts = &task->task_status; ts->resp = SAS_TASK_COMPLETE; /* Force the midlayer to retry */ ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; if (pm8001_dev) pm8001_dev->running_req--; spin_lock_irqsave(&task->task_state_lock, flags1); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&task->task_state_lock, flags1); pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); } else { spin_unlock_irqrestore(&task->task_state_lock, flags1); pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); mb();/* in order to force CPU ordering */ spin_unlock_irqrestore(&pm8001_ha->lock, flags); task->task_done(task); spin_lock_irqsave(&pm8001_ha->lock, flags); } } spin_unlock_irqrestore(&pm8001_ha->lock, flags); } /** * Standard mandates link reset for ATA (type 0) and hard reset for * SSP (type 1) , only for RECOVERY */ int pm8001_I_T_nexus_reset(struct domain_device *dev) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_device *pm8001_dev; struct pm8001_hba_info *pm8001_ha; struct sas_phy *phy; if (!dev || !dev->lldd_dev) return -1; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); phy = sas_get_local_phy(dev); if (dev_is_sata(dev)) { DECLARE_COMPLETION_ONSTACK(completion_setstate); if (scsi_is_sas_phy_local(phy)) { rc = 0; goto out; } rc = sas_phy_reset(phy, 1); msleep(2000); rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); pm8001_dev->setds_completion = &completion_setstate; rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, pm8001_dev, 0x01); wait_for_completion(&completion_setstate); } else { rc = sas_phy_reset(phy, 1); msleep(2000); } PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", pm8001_dev->device_id, rc)); out: sas_put_local_phy(phy); return rc; } /* mandatory SAM-3, the task reset the specified LUN*/ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); if (dev_is_sata(dev)) { struct sas_phy *phy = sas_get_local_phy(dev); rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); rc = sas_phy_reset(phy, 1); sas_put_local_phy(phy); rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, pm8001_dev, 0x01); msleep(2000); } else { tmf_task.tmf = TMF_LU_RESET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); } /* If failed, fall-through I_T_Nexus reset */ PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n", pm8001_dev->device_id, rc)); return rc; } /* optional SAM-3 */ int pm8001_query_task(struct sas_task *task) { u32 tag = 0xdeadbeef; int i = 0; struct scsi_lun lun; struct pm8001_tmf_task tmf_task; int rc = TMF_RESP_FUNC_FAILED; if (unlikely(!task || !task->lldd_task || !task->dev)) return rc; if (task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd *cmnd = task->uldd_task; struct domain_device *dev = task->dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); int_to_scsilun(cmnd->device->lun, &lun); rc = pm8001_find_tag(task, &tag); if (rc == 0) { rc = TMF_RESP_FUNC_FAILED; return rc; } PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:[")); for (i = 0; i < 16; i++) printk(KERN_INFO "%02x ", cmnd->cmnd[i]); printk(KERN_INFO "]\n"); tmf_task.tmf = TMF_QUERY_TASK; tmf_task.tag_of_task_to_be_managed = tag; rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); switch (rc) { /* The task is still in Lun, release it then */ case TMF_RESP_FUNC_SUCC: PM8001_EH_DBG(pm8001_ha, pm8001_printk("The task is still in Lun\n")); break; /* The task is not in Lun or failed, reset the phy */ case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: PM8001_EH_DBG(pm8001_ha, pm8001_printk("The task is not in Lun or failed," " reset the phy\n")); break; } } pm8001_printk(":rc= %d\n", rc); return rc; } /* mandatory SAM-3, still need free task/ccb info, abord the specified task */ int pm8001_abort_task(struct sas_task *task) { unsigned long flags; u32 tag = 0xdeadbeef; u32 device_id; struct domain_device *dev ; struct pm8001_hba_info *pm8001_ha = NULL; struct pm8001_ccb_info *ccb; struct scsi_lun lun; struct pm8001_device *pm8001_dev; struct pm8001_tmf_task tmf_task; int rc = TMF_RESP_FUNC_FAILED; if (unlikely(!task || !task->lldd_task || !task->dev)) return rc; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; } spin_unlock_irqrestore(&task->task_state_lock, flags); if (task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd *cmnd = task->uldd_task; dev = task->dev; ccb = task->lldd_task; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); int_to_scsilun(cmnd->device->lun, &lun); rc = pm8001_find_tag(task, &tag); if (rc == 0) { printk(KERN_INFO "No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } device_id = pm8001_dev->device_id; PM8001_EH_DBG(pm8001_ha, pm8001_printk("abort io to deviceid= %d\n", device_id)); tmf_task.tmf = TMF_ABORT_TASK; tmf_task.tag_of_task_to_be_managed = tag; rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, pm8001_dev->sas_device, 0, tag); } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { dev = task->dev; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); rc = pm8001_find_tag(task, &tag); if (rc == 0) { printk(KERN_INFO "No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, pm8001_dev->sas_device, 0, tag); } else if (task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ dev = task->dev; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); rc = pm8001_find_tag(task, &tag); if (rc == 0) { printk(KERN_INFO "No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, pm8001_dev->sas_device, 0, tag); } out: if (rc != TMF_RESP_FUNC_COMPLETE) pm8001_printk("rc= %d\n", rc); return rc; } int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; tmf_task.tmf = TMF_ABORT_TASK_SET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); return rc; } int pm8001_clear_aca(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; tmf_task.tmf = TMF_CLEAR_ACA; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); return rc; } int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); PM8001_EH_DBG(pm8001_ha, pm8001_printk("I_T_L_Q clear task set[%x]\n", pm8001_dev->device_id)); tmf_task.tmf = TMF_CLEAR_TASK_SET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); return rc; }
gpl-2.0
SlimRoms/kernel_sony_msm8974
net/ax25/ax25_addr.c
4853
6193
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> /* * The default broadcast address of an interface is QST-0; the default address * is LINUX-1. The null address is defined as a callsign of all spaces with * an SSID of zero. */ const ax25_address ax25_bcast = {{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}}; const ax25_address ax25_defaddr = {{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, 1 << 1}}; const ax25_address null_ax25_address = {{' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}}; EXPORT_SYMBOL_GPL(ax25_bcast); EXPORT_SYMBOL_GPL(ax25_defaddr); EXPORT_SYMBOL(null_ax25_address); /* * ax25 -> ascii conversion */ char *ax2asc(char *buf, const ax25_address *a) { char c, *s; int n; for (n = 0, s = buf; n < 6; n++) { c = (a->ax25_call[n] >> 1) & 0x7F; if (c != ' ') *s++ = c; } *s++ = '-'; if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { *s++ = '1'; n -= 10; } *s++ = n + '0'; *s++ = '\0'; if (*buf == '\0' || *buf == '-') return "*"; return buf; } EXPORT_SYMBOL(ax2asc); /* * ascii -> ax25 conversion */ void asc2ax(ax25_address *addr, const char *callsign) { const char *s; int n; for (s = callsign, n = 0; n < 6; n++) { if (*s != '\0' && *s != '-') addr->ax25_call[n] = *s++; else addr->ax25_call[n] = ' '; addr->ax25_call[n] <<= 1; addr->ax25_call[n] &= 0xFE; } if (*s++ == '\0') { addr->ax25_call[6] = 0x00; return; } addr->ax25_call[6] = *s++ - '0'; if (*s != '\0') { addr->ax25_call[6] *= 10; addr->ax25_call[6] += *s++ - '0'; } addr->ax25_call[6] <<= 1; addr->ax25_call[6] &= 0x1E; } EXPORT_SYMBOL(asc2ax); /* * Compare two ax.25 addresses */ int ax25cmp(const ax25_address *a, const ax25_address *b) { int ct = 0; while (ct < 6) { if ((a->ax25_call[ct] & 0xFE) != (b->ax25_call[ct] & 0xFE)) /* Clean off repeater bits */ return 1; ct++; } if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */ return 0; return 2; /* Partial match */ } EXPORT_SYMBOL(ax25cmp); /* * Compare two AX.25 digipeater paths. */ int ax25digicmp(const ax25_digi *digi1, const ax25_digi *digi2) { int i; if (digi1->ndigi != digi2->ndigi) return 1; if (digi1->lastrepeat != digi2->lastrepeat) return 1; for (i = 0; i < digi1->ndigi; i++) if (ax25cmp(&digi1->calls[i], &digi2->calls[i]) != 0) return 1; return 0; } /* * Given an AX.25 address pull of to, from, digi list, command/response and the start of data * */ const unsigned char *ax25_addr_parse(const unsigned char *buf, int len, ax25_address *src, ax25_address *dest, ax25_digi *digi, int *flags, int *dama) { int d = 0; if (len < 14) return NULL; if (flags != NULL) { *flags = 0; if (buf[6] & AX25_CBIT) *flags = AX25_COMMAND; if (buf[13] & AX25_CBIT) *flags = AX25_RESPONSE; } if (dama != NULL) *dama = ~buf[13] & AX25_DAMA_FLAG; /* Copy to, from */ if (dest != NULL) memcpy(dest, buf + 0, AX25_ADDR_LEN); if (src != NULL) memcpy(src, buf + 7, AX25_ADDR_LEN); buf += 2 * AX25_ADDR_LEN; len -= 2 * AX25_ADDR_LEN; digi->lastrepeat = -1; digi->ndigi = 0; while (!(buf[-1] & AX25_EBIT)) { if (d >= AX25_MAX_DIGIS) return NULL; /* Max of 6 digis */ if (len < 7) return NULL; /* Short packet */ memcpy(&digi->calls[d], buf, AX25_ADDR_LEN); digi->ndigi = d + 1; if (buf[6] & AX25_HBIT) { digi->repeated[d] = 1; digi->lastrepeat = d; } else { digi->repeated[d] = 0; } buf += AX25_ADDR_LEN; len -= AX25_ADDR_LEN; d++; } return buf; } /* * Assemble an AX.25 header from the bits */ int ax25_addr_build(unsigned char *buf, const ax25_address *src, const ax25_address *dest, const ax25_digi *d, int flag, int modulus) { int len = 0; int ct = 0; memcpy(buf, dest, AX25_ADDR_LEN); buf[6] &= ~(AX25_EBIT | AX25_CBIT); buf[6] |= AX25_SSSID_SPARE; if (flag == AX25_COMMAND) buf[6] |= AX25_CBIT; buf += AX25_ADDR_LEN; len += AX25_ADDR_LEN; memcpy(buf, src, AX25_ADDR_LEN); buf[6] &= ~(AX25_EBIT | AX25_CBIT); buf[6] &= ~AX25_SSSID_SPARE; if (modulus == AX25_MODULUS) buf[6] |= AX25_SSSID_SPARE; else buf[6] |= AX25_ESSID_SPARE; if (flag == AX25_RESPONSE) buf[6] |= AX25_CBIT; /* * Fast path the normal digiless path */ if (d == NULL || d->ndigi == 0) { buf[6] |= AX25_EBIT; return 2 * AX25_ADDR_LEN; } buf += AX25_ADDR_LEN; len += AX25_ADDR_LEN; while (ct < d->ndigi) { memcpy(buf, &d->calls[ct], AX25_ADDR_LEN); if (d->repeated[ct]) buf[6] |= AX25_HBIT; else buf[6] &= ~AX25_HBIT; buf[6] &= ~AX25_EBIT; buf[6] |= AX25_SSSID_SPARE; buf += AX25_ADDR_LEN; len += AX25_ADDR_LEN; ct++; } buf[-1] |= AX25_EBIT; return len; } int ax25_addr_size(const ax25_digi *dp) { if (dp == NULL) return 2 * AX25_ADDR_LEN; return AX25_ADDR_LEN * (2 + dp->ndigi); } /* * Reverse Digipeat List. May not pass both parameters as same struct */ void ax25_digi_invert(const ax25_digi *in, ax25_digi *out) { int ct; out->ndigi = in->ndigi; out->lastrepeat = in->ndigi - in->lastrepeat - 2; /* Invert the digipeaters */ for (ct = 0; ct < in->ndigi; ct++) { out->calls[ct] = in->calls[in->ndigi - ct - 1]; if (ct <= out->lastrepeat) { out->calls[ct].ax25_call[6] |= AX25_HBIT; out->repeated[ct] = 1; } else { out->calls[ct].ax25_call[6] &= ~AX25_HBIT; out->repeated[ct] = 0; } } }
gpl-2.0
GustavoVinicius/Source-23.0.1.A.5.77
arch/arm/mach-s3c24xx/bast-irq.c
5109
3624
/* linux/arch/arm/mach-s3c2410/bast-irq.c * * Copyright 2003-2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * http://www.simtec.co.uk/products/EB2410ITX/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/io.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/regs-irq.h> #include <mach/bast-map.h> #include <mach/bast-irq.h> #include <plat/irq.h> #if 0 #include <asm/debug-ll.h> #endif #define irqdbf(x...) #define irqdbf2(x...) /* handle PC104 ISA interrupts from the system CPLD */ /* table of ISA irq nos to the relevant mask... zero means * the irq is not implemented */ static unsigned char bast_pc104_irqmasks[] = { 0, /* 0 */ 0, /* 1 */ 0, /* 2 */ 1, /* 3 */ 0, /* 4 */ 2, /* 5 */ 0, /* 6 */ 4, /* 7 */ 0, /* 8 */ 0, /* 9 */ 8, /* 10 */ 0, /* 11 */ 0, /* 12 */ 0, /* 13 */ 0, /* 14 */ 0, /* 15 */ }; static unsigned char bast_pc104_irqs[] = { 3, 5, 7, 10 }; static void bast_pc104_mask(struct irq_data *data) { unsigned long temp; temp = __raw_readb(BAST_VA_PC104_IRQMASK); temp &= ~bast_pc104_irqmasks[data->irq]; __raw_writeb(temp, BAST_VA_PC104_IRQMASK); } static void bast_pc104_maskack(struct irq_data *data) { struct irq_desc *desc = irq_desc + IRQ_ISA; bast_pc104_mask(data); desc->irq_data.chip->irq_ack(&desc->irq_data); } static void bast_pc104_unmask(struct irq_data *data) { unsigned long temp; temp = __raw_readb(BAST_VA_PC104_IRQMASK); temp |= bast_pc104_irqmasks[data->irq]; __raw_writeb(temp, BAST_VA_PC104_IRQMASK); } static struct irq_chip bast_pc104_chip = { .irq_mask = bast_pc104_mask, .irq_unmask = bast_pc104_unmask, .irq_ack = bast_pc104_maskack }; static void bast_irq_pc104_demux(unsigned int irq, struct irq_desc *desc) { unsigned int stat; unsigned int irqno; int i; stat = __raw_readb(BAST_VA_PC104_IRQREQ) & 0xf; if (unlikely(stat == 0)) { /* ack if we get an irq with nothing (ie, startup) */ desc = irq_desc + IRQ_ISA; desc->irq_data.chip->irq_ack(&desc->irq_data); } else { /* handle the IRQ */ for (i = 0; stat != 0; i++, stat >>= 1) { if (stat & 1) { irqno = bast_pc104_irqs[i]; generic_handle_irq(irqno); } } } } static __init int bast_irq_init(void) { unsigned int i; if (machine_is_bast()) { printk(KERN_INFO "BAST PC104 IRQ routing, Copyright 2005 Simtec Electronics\n"); /* zap all the IRQs */ __raw_writeb(0x0, BAST_VA_PC104_IRQMASK); irq_set_chained_handler(IRQ_ISA, bast_irq_pc104_demux); /* register our IRQs */ for (i = 0; i < 4; i++) { unsigned int irqno = bast_pc104_irqs[i]; irq_set_chip_and_handler(irqno, &bast_pc104_chip, handle_level_irq); set_irq_flags(irqno, IRQF_VALID); } } return 0; } arch_initcall(bast_irq_init);
gpl-2.0
xdabbeb/g2-kk-kernel
drivers/net/ethernet/chelsio/cxgb3/sge.c
5109
93465
/* * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/prefetch.h> #include <net/arp.h> #include "common.h" #include "regs.h" #include "sge_defs.h" #include "t3_cpl.h" #include "firmware_exports.h" #include "cxgb3_offload.h" #define USE_GTS 0 #define SGE_RX_SM_BUF_SIZE 1536 #define SGE_RX_COPY_THRES 256 #define SGE_RX_PULL_LEN 128 #define SGE_PG_RSVD SMP_CACHE_BYTES /* * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs * directly. */ #define FL0_PG_CHUNK_SIZE 2048 #define FL0_PG_ORDER 0 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) #define SGE_RX_DROP_THRES 16 #define RX_RECLAIM_PERIOD (HZ/4) /* * Max number of Rx buffers we replenish at a time. */ #define MAX_RX_REFILL 16U /* * Period of the Tx buffer reclaim timer. This timer does not need to run * frequently as Tx buffers are usually reclaimed by new Tx packets. */ #define TX_RECLAIM_PERIOD (HZ / 4) #define TX_RECLAIM_TIMER_CHUNK 64U #define TX_RECLAIM_CHUNK 16U /* WR size in bytes */ #define WR_LEN (WR_FLITS * 8) /* * Types of Tx queues in each queue set. Order here matters, do not change. */ enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; /* Values for sge_txq.flags */ enum { TXQ_RUNNING = 1 << 0, /* fetch engine is running */ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ }; struct tx_desc { __be64 flit[TX_DESC_FLITS]; }; struct rx_desc { __be32 addr_lo; __be32 len_gen; __be32 gen2; __be32 addr_hi; }; struct tx_sw_desc { /* SW state per Tx descriptor */ struct sk_buff *skb; u8 eop; /* set if last descriptor for packet */ u8 addr_idx; /* buffer index of first SGL entry in descriptor */ u8 fragidx; /* first page fragment associated with descriptor */ s8 sflit; /* start flit of first SGL entry in descriptor */ }; struct rx_sw_desc { /* SW state per Rx descriptor */ union { struct sk_buff *skb; struct fl_pg_chunk pg_chunk; }; DEFINE_DMA_UNMAP_ADDR(dma_addr); }; struct rsp_desc { /* response queue descriptor */ struct rss_header rss_hdr; __be32 flags; __be32 len_cq; u8 imm_data[47]; u8 intr_gen; }; /* * Holds unmapping information for Tx packets that need deferred unmapping. * This structure lives at skb->head and must be allocated by callers. */ struct deferred_unmap_info { struct pci_dev *pdev; dma_addr_t addr[MAX_SKB_FRAGS + 1]; }; /* * Maps a number of flits to the number of Tx descriptors that can hold them. * The formula is * * desc = 1 + (flits - 2) / (WR_FLITS - 1). * * HW allows up to 4 descriptors to be combined into a WR. */ static u8 flit_desc_map[] = { 0, #if SGE_NUM_GENBITS == 1 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 #elif SGE_NUM_GENBITS == 2 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, #else # error "SGE_NUM_GENBITS must be 1 or 2" #endif }; static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) { return container_of(q, struct sge_qset, fl[qidx]); } static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) { return container_of(q, struct sge_qset, rspq); } static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) { return container_of(q, struct sge_qset, txq[qidx]); } /** * refill_rspq - replenish an SGE response queue * @adapter: the adapter * @q: the response queue to replenish * @credits: how many new responses to make available * * Replenishes a response queue by making the supplied number of responses * available to HW. */ static inline void refill_rspq(struct adapter *adapter, const struct sge_rspq *q, unsigned int credits) { rmb(); t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); } /** * need_skb_unmap - does the platform need unmapping of sk_buffs? * * Returns true if the platform needs sk_buff unmapping. The compiler * optimizes away unnecessary code if this returns true. */ static inline int need_skb_unmap(void) { #ifdef CONFIG_NEED_DMA_MAP_STATE return 1; #else return 0; #endif } /** * unmap_skb - unmap a packet main body and its page fragments * @skb: the packet * @q: the Tx queue containing Tx descriptors for the packet * @cidx: index of Tx descriptor * @pdev: the PCI device * * Unmap the main body of an sk_buff and its page fragments, if any. * Because of the fairly complicated structure of our SGLs and the desire * to conserve space for metadata, the information necessary to unmap an * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx * descriptors (the physical addresses of the various data buffers), and * the SW descriptor state (assorted indices). The send functions * initialize the indices for the first packet descriptor so we can unmap * the buffers held in the first Tx descriptor here, and we have enough * information at this point to set the state for the next Tx descriptor. * * Note that it is possible to clean up the first descriptor of a packet * before the send routines have written the next descriptors, but this * race does not cause any problem. We just end up writing the unmapping * info for the descriptor first. */ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, unsigned int cidx, struct pci_dev *pdev) { const struct sg_ent *sgp; struct tx_sw_desc *d = &q->sdesc[cidx]; int nfrags, frag_idx, curflit, j = d->addr_idx; sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; frag_idx = d->fragidx; if (frag_idx == 0 && skb_headlen(skb)) { pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), skb_headlen(skb), PCI_DMA_TODEVICE); j = 1; } curflit = d->sflit + 1 + j; nfrags = skb_shinfo(skb)->nr_frags; while (frag_idx < nfrags && curflit < WR_FLITS) { pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), PCI_DMA_TODEVICE); j ^= 1; if (j == 0) { sgp++; curflit++; } curflit++; frag_idx++; } if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ d = cidx + 1 == q->size ? q->sdesc : d + 1; d->fragidx = frag_idx; d->addr_idx = j; d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ } } /** * free_tx_desc - reclaims Tx descriptors and their buffers * @adapter: the adapter * @q: the Tx queue to reclaim descriptors from * @n: the number of descriptors to reclaim * * Reclaims Tx descriptors from an SGE Tx queue and frees the associated * Tx buffers. Called with the Tx queue lock held. */ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, unsigned int n) { struct tx_sw_desc *d; struct pci_dev *pdev = adapter->pdev; unsigned int cidx = q->cidx; const int need_unmap = need_skb_unmap() && q->cntxt_id >= FW_TUNNEL_SGEEC_START; d = &q->sdesc[cidx]; while (n--) { if (d->skb) { /* an SGL is present */ if (need_unmap) unmap_skb(d->skb, q, cidx, pdev); if (d->eop) { kfree_skb(d->skb); d->skb = NULL; } } ++d; if (++cidx == q->size) { cidx = 0; d = q->sdesc; } } q->cidx = cidx; } /** * reclaim_completed_tx - reclaims completed Tx descriptors * @adapter: the adapter * @q: the Tx queue to reclaim completed descriptors from * @chunk: maximum number of descriptors to reclaim * * Reclaims Tx descriptors that the SGE has indicated it has processed, * and frees the associated buffers if possible. Called with the Tx * queue's lock held. */ static inline unsigned int reclaim_completed_tx(struct adapter *adapter, struct sge_txq *q, unsigned int chunk) { unsigned int reclaim = q->processed - q->cleaned; reclaim = min(chunk, reclaim); if (reclaim) { free_tx_desc(adapter, q, reclaim); q->cleaned += reclaim; q->in_use -= reclaim; } return q->processed - q->cleaned; } /** * should_restart_tx - are there enough resources to restart a Tx queue? * @q: the Tx queue * * Checks if there are enough descriptors to restart a suspended Tx queue. */ static inline int should_restart_tx(const struct sge_txq *q) { unsigned int r = q->processed - q->cleaned; return q->in_use - r < (q->size >> 1); } static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, struct rx_sw_desc *d) { if (q->use_pages && d->pg_chunk.page) { (*d->pg_chunk.p_cnt)--; if (!*d->pg_chunk.p_cnt) pci_unmap_page(pdev, d->pg_chunk.mapping, q->alloc_size, PCI_DMA_FROMDEVICE); put_page(d->pg_chunk.page); d->pg_chunk.page = NULL; } else { pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), q->buf_size, PCI_DMA_FROMDEVICE); kfree_skb(d->skb); d->skb = NULL; } } /** * free_rx_bufs - free the Rx buffers on an SGE free list * @pdev: the PCI device associated with the adapter * @rxq: the SGE free list to clean up * * Release the buffers on an SGE free-buffer Rx queue. HW fetching from * this queue should be stopped before calling this function. */ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) { unsigned int cidx = q->cidx; while (q->credits--) { struct rx_sw_desc *d = &q->sdesc[cidx]; clear_rx_desc(pdev, q, d); if (++cidx == q->size) cidx = 0; } if (q->pg_chunk.page) { __free_pages(q->pg_chunk.page, q->order); q->pg_chunk.page = NULL; } } /** * add_one_rx_buf - add a packet buffer to a free-buffer list * @va: buffer start VA * @len: the buffer length * @d: the HW Rx descriptor to write * @sd: the SW Rx descriptor to write * @gen: the generation bit value * @pdev: the PCI device associated with the adapter * * Add a buffer of the given length to the supplied HW and SW Rx * descriptors. */ static inline int add_one_rx_buf(void *va, unsigned int len, struct rx_desc *d, struct rx_sw_desc *sd, unsigned int gen, struct pci_dev *pdev) { dma_addr_t mapping; mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(pdev, mapping))) return -ENOMEM; dma_unmap_addr_set(sd, dma_addr, mapping); d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); return 0; } static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, unsigned int gen) { d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); return 0; } static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, unsigned int order) { if (!q->pg_chunk.page) { dma_addr_t mapping; q->pg_chunk.page = alloc_pages(gfp, order); if (unlikely(!q->pg_chunk.page)) return -ENOMEM; q->pg_chunk.va = page_address(q->pg_chunk.page); q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - SGE_PG_RSVD; q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; prefetch(sd->pg_chunk.p_cnt); q->pg_chunk.offset += q->buf_size; if (q->pg_chunk.offset == (PAGE_SIZE << order)) q->pg_chunk.page = NULL; else { q->pg_chunk.va += q->buf_size; get_page(q->pg_chunk.page); } if (sd->pg_chunk.offset == 0) *sd->pg_chunk.p_cnt = 1; else *sd->pg_chunk.p_cnt += 1; return 0; } static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) { if (q->pend_cred >= q->credits / 4) { q->pend_cred = 0; wmb(); t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); } } /** * refill_fl - refill an SGE free-buffer list * @adapter: the adapter * @q: the free-list to refill * @n: the number of new buffers to allocate * @gfp: the gfp flags for allocating new buffers * * (Re)populate an SGE free-buffer list with up to @n new packet buffers, * allocated with the supplied gfp flags. The caller must assure that * @n does not exceed the queue's capacity. */ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) { struct rx_sw_desc *sd = &q->sdesc[q->pidx]; struct rx_desc *d = &q->desc[q->pidx]; unsigned int count = 0; while (n--) { dma_addr_t mapping; int err; if (q->use_pages) { if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, q->order))) { nomem: q->alloc_failed++; break; } mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; dma_unmap_addr_set(sd, dma_addr, mapping); add_one_rx_chunk(mapping, d, q->gen); pci_dma_sync_single_for_device(adap->pdev, mapping, q->buf_size - SGE_PG_RSVD, PCI_DMA_FROMDEVICE); } else { void *buf_start; struct sk_buff *skb = alloc_skb(q->buf_size, gfp); if (!skb) goto nomem; sd->skb = skb; buf_start = skb->data; err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, adap->pdev); if (unlikely(err)) { clear_rx_desc(adap->pdev, q, sd); break; } } d++; sd++; if (++q->pidx == q->size) { q->pidx = 0; q->gen ^= 1; sd = q->sdesc; d = q->desc; } count++; } q->credits += count; q->pend_cred += count; ring_fl_db(adap, q); return count; } static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) { refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), GFP_ATOMIC | __GFP_COMP); } /** * recycle_rx_buf - recycle a receive buffer * @adapter: the adapter * @q: the SGE free list * @idx: index of buffer to recycle * * Recycles the specified buffer on the given free list by adding it at * the next available slot on the list. */ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, unsigned int idx) { struct rx_desc *from = &q->desc[idx]; struct rx_desc *to = &q->desc[q->pidx]; q->sdesc[q->pidx] = q->sdesc[idx]; to->addr_lo = from->addr_lo; /* already big endian */ to->addr_hi = from->addr_hi; /* likewise */ wmb(); to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); if (++q->pidx == q->size) { q->pidx = 0; q->gen ^= 1; } q->credits++; q->pend_cred++; ring_fl_db(adap, q); } /** * alloc_ring - allocate resources for an SGE descriptor ring * @pdev: the PCI device * @nelem: the number of descriptors * @elem_size: the size of each descriptor * @sw_size: the size of the SW state associated with each ring element * @phys: the physical address of the allocated ring * @metadata: address of the array holding the SW state for the ring * * Allocates resources for an SGE descriptor ring, such as Tx queues, * free buffer lists, or response queues. Each SGE ring requires * space for its HW descriptors plus, optionally, space for the SW state * associated with each HW entry (the metadata). The function returns * three values: the virtual address for the HW ring (the return value * of the function), the physical address of the HW ring, and the address * of the SW ring. */ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, size_t sw_size, dma_addr_t * phys, void *metadata) { size_t len = nelem * elem_size; void *s = NULL; void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); if (!p) return NULL; if (sw_size && metadata) { s = kcalloc(nelem, sw_size, GFP_KERNEL); if (!s) { dma_free_coherent(&pdev->dev, len, p, *phys); return NULL; } *(void **)metadata = s; } memset(p, 0, len); return p; } /** * t3_reset_qset - reset a sge qset * @q: the queue set * * Reset the qset structure. * the NAPI structure is preserved in the event of * the qset's reincarnation, for example during EEH recovery. */ static void t3_reset_qset(struct sge_qset *q) { if (q->adap && !(q->adap->flags & NAPI_INIT)) { memset(q, 0, sizeof(*q)); return; } q->adap = NULL; memset(&q->rspq, 0, sizeof(q->rspq)); memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); q->txq_stopped = 0; q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ q->rx_reclaim_timer.function = NULL; q->nomem = 0; napi_free_frags(&q->napi); } /** * free_qset - free the resources of an SGE queue set * @adapter: the adapter owning the queue set * @q: the queue set * * Release the HW and SW resources associated with an SGE queue set, such * as HW contexts, packet buffers, and descriptor rings. Traffic to the * queue set must be quiesced prior to calling this. */ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) { int i; struct pci_dev *pdev = adapter->pdev; for (i = 0; i < SGE_RXQ_PER_SET; ++i) if (q->fl[i].desc) { spin_lock_irq(&adapter->sge.reg_lock); t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); spin_unlock_irq(&adapter->sge.reg_lock); free_rx_bufs(pdev, &q->fl[i]); kfree(q->fl[i].sdesc); dma_free_coherent(&pdev->dev, q->fl[i].size * sizeof(struct rx_desc), q->fl[i].desc, q->fl[i].phys_addr); } for (i = 0; i < SGE_TXQ_PER_SET; ++i) if (q->txq[i].desc) { spin_lock_irq(&adapter->sge.reg_lock); t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); spin_unlock_irq(&adapter->sge.reg_lock); if (q->txq[i].sdesc) { free_tx_desc(adapter, &q->txq[i], q->txq[i].in_use); kfree(q->txq[i].sdesc); } dma_free_coherent(&pdev->dev, q->txq[i].size * sizeof(struct tx_desc), q->txq[i].desc, q->txq[i].phys_addr); __skb_queue_purge(&q->txq[i].sendq); } if (q->rspq.desc) { spin_lock_irq(&adapter->sge.reg_lock); t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); spin_unlock_irq(&adapter->sge.reg_lock); dma_free_coherent(&pdev->dev, q->rspq.size * sizeof(struct rsp_desc), q->rspq.desc, q->rspq.phys_addr); } t3_reset_qset(q); } /** * init_qset_cntxt - initialize an SGE queue set context info * @qs: the queue set * @id: the queue set id * * Initializes the TIDs and context ids for the queues of a queue set. */ static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) { qs->rspq.cntxt_id = id; qs->fl[0].cntxt_id = 2 * id; qs->fl[1].cntxt_id = 2 * id + 1; qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; } /** * sgl_len - calculates the size of an SGL of the given capacity * @n: the number of SGL entries * * Calculates the number of flits needed for a scatter/gather list that * can hold the given number of entries. */ static inline unsigned int sgl_len(unsigned int n) { /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ return (3 * n) / 2 + (n & 1); } /** * flits_to_desc - returns the num of Tx descriptors for the given flits * @n: the number of flits * * Calculates the number of Tx descriptors needed for the supplied number * of flits. */ static inline unsigned int flits_to_desc(unsigned int n) { BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); return flit_desc_map[n]; } /** * get_packet - return the next ingress packet buffer from a free list * @adap: the adapter that received the packet * @fl: the SGE free list holding the packet * @len: the packet length including any SGE padding * @drop_thres: # of remaining buffers before we start dropping packets * * Get the next packet from a free list and complete setup of the * sk_buff. If the packet is small we make a copy and recycle the * original buffer, otherwise we use the original buffer itself. If a * positive drop threshold is supplied packets are dropped and their * buffers recycled if (a) the number of remaining buffers is under the * threshold and the packet is too big to copy, or (b) the packet should * be copied but there is no memory for the copy. */ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, unsigned int len, unsigned int drop_thres) { struct sk_buff *skb = NULL; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; prefetch(sd->skb->data); fl->credits--; if (len <= SGE_RX_COPY_THRES) { skb = alloc_skb(len, GFP_ATOMIC); if (likely(skb != NULL)) { __skb_put(skb, len); pci_dma_sync_single_for_cpu(adap->pdev, dma_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); memcpy(skb->data, sd->skb->data, len); pci_dma_sync_single_for_device(adap->pdev, dma_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); } else if (!drop_thres) goto use_orig_buf; recycle: recycle_rx_buf(adap, fl, fl->cidx); return skb; } if (unlikely(fl->credits < drop_thres) && refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), GFP_ATOMIC | __GFP_COMP) == 0) goto recycle; use_orig_buf: pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), fl->buf_size, PCI_DMA_FROMDEVICE); skb = sd->skb; skb_put(skb, len); __refill_fl(adap, fl); return skb; } /** * get_packet_pg - return the next ingress packet buffer from a free list * @adap: the adapter that received the packet * @fl: the SGE free list holding the packet * @len: the packet length including any SGE padding * @drop_thres: # of remaining buffers before we start dropping packets * * Get the next packet from a free list populated with page chunks. * If the packet is small we make a copy and recycle the original buffer, * otherwise we attach the original buffer as a page fragment to a fresh * sk_buff. If a positive drop threshold is supplied packets are dropped * and their buffers recycled if (a) the number of remaining buffers is * under the threshold and the packet is too big to copy, or (b) there's * no system memory. * * Note: this function is similar to @get_packet but deals with Rx buffers * that are page chunks rather than sk_buffs. */ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, struct sge_rspq *q, unsigned int len, unsigned int drop_thres) { struct sk_buff *newskb, *skb; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); newskb = skb = q->pg_skb; if (!skb && (len <= SGE_RX_COPY_THRES)) { newskb = alloc_skb(len, GFP_ATOMIC); if (likely(newskb != NULL)) { __skb_put(newskb, len); pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); memcpy(newskb->data, sd->pg_chunk.va, len); pci_dma_sync_single_for_device(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); } else if (!drop_thres) return NULL; recycle: fl->credits--; recycle_rx_buf(adap, fl, fl->cidx); q->rx_recycle_buf++; return newskb; } if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) goto recycle; prefetch(sd->pg_chunk.p_cnt); if (!skb) newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); if (unlikely(!newskb)) { if (!drop_thres) return NULL; goto recycle; } pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) pci_unmap_page(adap->pdev, sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); if (!skb) { __skb_put(newskb, SGE_RX_PULL_LEN); memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, sd->pg_chunk.offset + SGE_RX_PULL_LEN, len - SGE_RX_PULL_LEN); newskb->len = len; newskb->data_len = len - SGE_RX_PULL_LEN; newskb->truesize += newskb->data_len; } else { skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, sd->pg_chunk.page, sd->pg_chunk.offset, len); newskb->len += len; newskb->data_len += len; newskb->truesize += len; } fl->credits--; /* * We do not refill FLs here, we let the caller do it to overlap a * prefetch. */ return newskb; } /** * get_imm_packet - return the next ingress packet buffer from a response * @resp: the response descriptor containing the packet data * * Return a packet containing the immediate data of the given response. */ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) { struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); if (skb) { __skb_put(skb, IMMED_PKT_SIZE); skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); } return skb; } /** * calc_tx_descs - calculate the number of Tx descriptors for a packet * @skb: the packet * * Returns the number of Tx descriptors needed for the given Ethernet * packet. Ethernet packets require addition of WR and CPL headers. */ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) { unsigned int flits; if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) return 1; flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; if (skb_shinfo(skb)->gso_size) flits++; return flits_to_desc(flits); } /** * make_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL * @pdev: the PCI device * * Generates a scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ static inline unsigned int make_sgl(const struct sk_buff *skb, struct sg_ent *sgp, unsigned char *start, unsigned int len, struct pci_dev *pdev) { dma_addr_t mapping; unsigned int i, j = 0, nfrags; if (len) { mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); sgp->addr[0] = cpu_to_be64(mapping); j = 1; } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); sgp->addr[j] = cpu_to_be64(mapping); j ^= 1; if (j == 0) ++sgp; } if (j) sgp->len[j] = 0; return ((nfrags + (len != 0)) * 3) / 2 + j; } /** * check_ring_tx_db - check and potentially ring a Tx queue's doorbell * @adap: the adapter * @q: the Tx queue * * Ring the doorbel if a Tx queue is asleep. There is a natural race, * where the HW is going to sleep just after we checked, however, * then the interrupt handler will detect the outstanding TX packet * and ring the doorbell for us. * * When GTS is disabled we unconditionally ring the doorbell. */ static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) { #if USE_GTS clear_bit(TXQ_LAST_PKT_DB, &q->flags); if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { set_bit(TXQ_LAST_PKT_DB, &q->flags); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } #else wmb(); /* write descriptors before telling HW */ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); #endif } static inline void wr_gen2(struct tx_desc *d, unsigned int gen) { #if SGE_NUM_GENBITS == 2 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); #endif } /** * write_wr_hdr_sgl - write a WR header and, optionally, SGL * @ndesc: number of Tx descriptors spanned by the SGL * @skb: the packet corresponding to the WR * @d: first Tx descriptor to be written * @pidx: index of above descriptors * @q: the SGE Tx queue * @sgl: the SGL * @flits: number of flits to the start of the SGL in the first descriptor * @sgl_flits: the SGL size in flits * @gen: the Tx descriptor generation * @wr_hi: top 32 bits of WR header based on WR type (big endian) * @wr_lo: low 32 bits of WR header based on WR type (big endian) * * Write a work request header and an associated SGL. If the SGL is * small enough to fit into one Tx descriptor it has already been written * and we just need to write the WR header. Otherwise we distribute the * SGL across the number of descriptors it spans. */ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, struct tx_desc *d, unsigned int pidx, const struct sge_txq *q, const struct sg_ent *sgl, unsigned int flits, unsigned int sgl_flits, unsigned int gen, __be32 wr_hi, __be32 wr_lo) { struct work_request_hdr *wrp = (struct work_request_hdr *)d; struct tx_sw_desc *sd = &q->sdesc[pidx]; sd->skb = skb; if (need_skb_unmap()) { sd->fragidx = 0; sd->addr_idx = 0; sd->sflit = flits; } if (likely(ndesc == 1)) { sd->eop = 1; wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | V_WR_SGLSFLT(flits)) | wr_hi; wmb(); wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(gen)) | wr_lo; wr_gen2(d, gen); } else { unsigned int ogen = gen; const u64 *fp = (const u64 *)sgl; struct work_request_hdr *wp = wrp; wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | V_WR_SGLSFLT(flits)) | wr_hi; while (sgl_flits) { unsigned int avail = WR_FLITS - flits; if (avail > sgl_flits) avail = sgl_flits; memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); sgl_flits -= avail; ndesc--; if (!sgl_flits) break; fp += avail; d++; sd->eop = 0; sd++; if (++pidx == q->size) { pidx = 0; gen ^= 1; d = q->desc; sd = q->sdesc; } sd->skb = skb; wrp = (struct work_request_hdr *)d; wrp->wr_hi = htonl(V_WR_DATATYPE(1) | V_WR_SGLSFLT(1)) | wr_hi; wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, sgl_flits + 1)) | V_WR_GEN(gen)) | wr_lo; wr_gen2(d, gen); flits = 1; } sd->eop = 1; wrp->wr_hi |= htonl(F_WR_EOP); wmb(); wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; wr_gen2((struct tx_desc *)wp, ogen); WARN_ON(ndesc != 0); } } /** * write_tx_pkt_wr - write a TX_PKT work request * @adap: the adapter * @skb: the packet to send * @pi: the egress interface * @pidx: index of the first Tx descriptor to write * @gen: the generation value to use * @q: the Tx queue * @ndesc: number of descriptors the packet will occupy * @compl: the value of the COMPL bit to use * * Generate a TX_PKT work request to send the supplied packet. */ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, unsigned int compl) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; struct tx_desc *d = &q->desc[pidx]; struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; cpl->len = htonl(skb->len); cntrl = V_TXPKT_INTF(pi->port_id); if (vlan_tx_tag_present(skb)) cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); if (tso_info) { int eth_type; struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; d->flit[2] = 0; cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); hdr->cntrl = htonl(cntrl); eth_type = skb_network_offset(skb) == ETH_HLEN ? CPL_ETH_II : CPL_ETH_II_VLAN; tso_info |= V_LSO_ETH_TYPE(eth_type) | V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); hdr->lso_info = htonl(tso_info); flits = 3; } else { cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); cpl->cntrl = htonl(cntrl); if (skb->len <= WR_LEN - sizeof(*cpl)) { q->sdesc[pidx].skb = NULL; if (!skb->data_len) skb_copy_from_linear_data(skb, &d->flit[2], skb->len); else skb_copy_bits(skb, 0, &d->flit[2], skb->len); flits = (skb->len + 7) / 8 + 2; cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | F_WR_SOP | F_WR_EOP | compl); wmb(); cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | V_WR_TID(q->token)); wr_gen2(d, gen); kfree_skb(skb); return; } flits = 2; } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), htonl(V_WR_TID(q->token))); } static inline void t3_stop_tx_queue(struct netdev_queue *txq, struct sge_qset *qs, struct sge_txq *q) { netif_tx_stop_queue(txq); set_bit(TXQ_ETH, &qs->txq_stopped); q->stops++; } /** * eth_xmit - add a packet to the Ethernet Tx queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Tx queue. Runs with softirqs disabled. */ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) { int qidx; unsigned int ndesc, pidx, credits, gen, compl; const struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; /* * The chip min packet length is 9 octets but play safe and reject * anything shorter than an Ethernet header. */ if (unlikely(skb->len < ETH_HLEN)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } qidx = skb_get_queue_mapping(skb); qs = &pi->qs[qidx]; q = &qs->txq[TXQ_ETH]; txq = netdev_get_tx_queue(dev, qidx); reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); credits = q->size - q->in_use; ndesc = calc_tx_descs(skb); if (unlikely(credits < ndesc)) { t3_stop_tx_queue(txq, qs, q); dev_err(&adap->pdev->dev, "%s: Tx ring %u full while queue awake!\n", dev->name, q->cntxt_id & 7); return NETDEV_TX_BUSY; } q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); if (should_restart_tx(q) && test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { q->restarts++; netif_tx_start_queue(txq); } } gen = q->gen; q->unacked += ndesc; compl = (q->unacked & 8) << (S_WR_COMPL - 3); q->unacked &= 7; pidx = q->pidx; q->pidx += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; } /* update port statistics */ if (skb->ip_summed == CHECKSUM_COMPLETE) qs->port_stats[SGE_PSTAT_TX_CSUM]++; if (skb_shinfo(skb)->gso_size) qs->port_stats[SGE_PSTAT_TSO]++; if (vlan_tx_tag_present(skb)) qs->port_stats[SGE_PSTAT_VLANINS]++; /* * We do not use Tx completion interrupts to free DMAd Tx packets. * This is good for performance but means that we rely on new Tx * packets arriving to run the destructors of completed packets, * which open up space in their sockets' send queues. Sometimes * we do not get such new packets causing Tx to stall. A single * UDP transmitter is a good example of this situation. We have * a clean up timer that periodically reclaims completed packets * but it doesn't run often enough (nor do we want it to) to prevent * lengthy stalls. A solution to this problem is to run the * destructor early, after the packet is queued but before it's DMAd. * A cons is that we lie to socket memory accounting, but the amount * of extra memory is reasonable (limited by the number of Tx * descriptors), the packets do actually get freed quickly by new * packets almost always, and for protocols like TCP that wait for * acks to really free up the data the extra memory is even less. * On the positive side we run the destructors on the sending CPU * rather than on a potentially different completing CPU, usually a * good thing. We also run them without holding our Tx queue lock, * unlike what reclaim_completed_tx() would otherwise do. * * Run the destructor before telling the DMA engine about the packet * to make sure it doesn't complete and get freed prematurely. */ if (likely(!skb_shared(skb))) skb_orphan(skb); write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } /** * write_imm - write a packet into a Tx descriptor as immediate data * @d: the Tx descriptor to write * @skb: the packet * @len: the length of packet data to write as immediate data * @gen: the generation bit value to write * * Writes a packet as immediate data into a Tx descriptor. The packet * contains a work request at its beginning. We must write the packet * carefully so the SGE doesn't read it accidentally before it's written * in its entirety. */ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, unsigned int len, unsigned int gen) { struct work_request_hdr *from = (struct work_request_hdr *)skb->data; struct work_request_hdr *to = (struct work_request_hdr *)d; if (likely(!skb->data_len)) memcpy(&to[1], &from[1], len - sizeof(*from)); else skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | V_WR_BCNTLFLT(len & 7)); wmb(); to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8)); wr_gen2(d, gen); kfree_skb(skb); } /** * check_desc_avail - check descriptor availability on a send queue * @adap: the adapter * @q: the send queue * @skb: the packet needing the descriptors * @ndesc: the number of Tx descriptors needed * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) * * Checks if the requested number of Tx descriptors is available on an * SGE send queue. If the queue is already suspended or not enough * descriptors are available the packet is queued for later transmission. * Must be called with the Tx queue locked. * * Returns 0 if enough descriptors are available, 1 if there aren't * enough descriptors and the packet has been queued, and 2 if the caller * needs to retry because there weren't enough descriptors at the * beginning of the call but some freed up in the mean time. */ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, struct sk_buff *skb, unsigned int ndesc, unsigned int qid) { if (unlikely(!skb_queue_empty(&q->sendq))) { addq_exit:__skb_queue_tail(&q->sendq, skb); return 1; } if (unlikely(q->size - q->in_use < ndesc)) { struct sge_qset *qs = txq_to_qset(q, qid); set_bit(qid, &qs->txq_stopped); smp_mb__after_clear_bit(); if (should_restart_tx(q) && test_and_clear_bit(qid, &qs->txq_stopped)) return 2; q->stops++; goto addq_exit; } return 0; } /** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue * * This is a variant of reclaim_completed_tx() that is used for Tx queues * that send only immediate data (presently just the control queues) and * thus do not have any sk_buffs to release. */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { unsigned int reclaim = q->processed - q->cleaned; q->in_use -= reclaim; q->cleaned += reclaim; } static inline int immediate(const struct sk_buff *skb) { return skb->len <= WR_LEN; } /** * ctrl_xmit - send a packet through an SGE control Tx queue * @adap: the adapter * @q: the control queue * @skb: the packet * * Send a packet through an SGE control Tx queue. Packets sent through * a control queue must fit entirely as immediate data in a single Tx * descriptor and have no page fragments. */ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, struct sk_buff *skb) { int ret; struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; if (unlikely(!immediate(skb))) { WARN_ON(1); dev_kfree_skb(skb); return NET_XMIT_SUCCESS; } wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); wrp->wr_lo = htonl(V_WR_TID(q->token)); spin_lock(&q->lock); again:reclaim_completed_tx_imm(q); ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); if (unlikely(ret)) { if (ret == 1) { spin_unlock(&q->lock); return NET_XMIT_CN; } goto again; } write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); q->in_use++; if (++q->pidx >= q->size) { q->pidx = 0; q->gen ^= 1; } spin_unlock(&q->lock); wmb(); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); return NET_XMIT_SUCCESS; } /** * restart_ctrlq - restart a suspended control queue * @qs: the queue set cotaining the control queue * * Resumes transmission on a suspended Tx control queue. */ static void restart_ctrlq(unsigned long data) { struct sk_buff *skb; struct sge_qset *qs = (struct sge_qset *)data; struct sge_txq *q = &qs->txq[TXQ_CTRL]; spin_lock(&q->lock); again:reclaim_completed_tx_imm(q); while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) { write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); if (++q->pidx >= q->size) { q->pidx = 0; q->gen ^= 1; } q->in_use++; } if (!skb_queue_empty(&q->sendq)) { set_bit(TXQ_CTRL, &qs->txq_stopped); smp_mb__after_clear_bit(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) goto again; q->stops++; } spin_unlock(&q->lock); wmb(); t3_write_reg(qs->adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /* * Send a management message through control queue 0 */ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) { int ret; local_bh_disable(); ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); local_bh_enable(); return ret; } /** * deferred_unmap_destructor - unmap a packet when it is freed * @skb: the packet * * This is the packet destructor used for Tx packets that need to remain * mapped until they are freed rather than until their Tx descriptors are * freed. */ static void deferred_unmap_destructor(struct sk_buff *skb) { int i; const dma_addr_t *p; const struct skb_shared_info *si; const struct deferred_unmap_info *dui; dui = (struct deferred_unmap_info *)skb->head; p = dui->addr; if (skb->tail - skb->transport_header) pci_unmap_single(dui->pdev, *p++, skb->tail - skb->transport_header, PCI_DMA_TODEVICE); si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++) pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), PCI_DMA_TODEVICE); } static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, const struct sg_ent *sgl, int sgl_flits) { dma_addr_t *p; struct deferred_unmap_info *dui; dui = (struct deferred_unmap_info *)skb->head; dui->pdev = pdev; for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { *p++ = be64_to_cpu(sgl->addr[0]); *p++ = be64_to_cpu(sgl->addr[1]); } if (sgl_flits) *p = be64_to_cpu(sgl->addr[0]); } /** * write_ofld_wr - write an offload work request * @adap: the adapter * @skb: the packet to send * @q: the Tx queue * @pidx: index of the first Tx descriptor to write * @gen: the generation value to use * @ndesc: number of descriptors the packet will occupy * * Write an offload work request to send the supplied packet. The packet * data already carry the work request with most fields populated. */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, unsigned int gen, unsigned int ndesc) { unsigned int sgl_flits, flits; struct work_request_hdr *from; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; struct tx_desc *d = &q->desc[pidx]; if (immediate(skb)) { q->sdesc[pidx].skb = NULL; write_imm(d, skb, skb->len, gen); return; } /* Only TX_DATA builds SGLs */ from = (struct work_request_hdr *)skb->data; memcpy(&d->flit[1], &from[1], skb_transport_offset(skb) - sizeof(*from)); flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), skb->tail - skb->transport_header, adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; } write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, from->wr_hi, from->wr_lo); } /** * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet * @skb: the packet * * Returns the number of Tx descriptors needed for the given offload * packet. These packets are already fully constructed. */ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) { unsigned int flits, cnt; if (skb->len <= WR_LEN) return 1; /* packet fits as immediate data */ flits = skb_transport_offset(skb) / 8; /* headers */ cnt = skb_shinfo(skb)->nr_frags; if (skb->tail != skb->transport_header) cnt++; return flits_to_desc(flits + sgl_len(cnt)); } /** * ofld_xmit - send a packet through an offload queue * @adap: the adapter * @q: the Tx offload queue * @skb: the packet * * Send an offload packet through an SGE offload queue. */ static int ofld_xmit(struct adapter *adap, struct sge_txq *q, struct sk_buff *skb) { int ret; unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); if (unlikely(ret)) { if (ret == 1) { skb->priority = ndesc; /* save for restart */ spin_unlock(&q->lock); return NET_XMIT_CN; } goto again; } gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; } spin_unlock(&q->lock); write_ofld_wr(adap, skb, q, pidx, gen, ndesc); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } /** * restart_offloadq - restart a suspended offload queue * @qs: the queue set cotaining the offload queue * * Resumes transmission on a suspended Tx offload queue. */ static void restart_offloadq(unsigned long data) { struct sk_buff *skb; struct sge_qset *qs = (struct sge_qset *)data; struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); while ((skb = skb_peek(&q->sendq)) != NULL) { unsigned int gen, pidx; unsigned int ndesc = skb->priority; if (unlikely(q->size - q->in_use < ndesc)) { set_bit(TXQ_OFLD, &qs->txq_stopped); smp_mb__after_clear_bit(); if (should_restart_tx(q) && test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) goto again; q->stops++; break; } gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; } __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); write_ofld_wr(adap, skb, q, pidx, gen, ndesc); spin_lock(&q->lock); } spin_unlock(&q->lock); #if USE_GTS set_bit(TXQ_RUNNING, &q->flags); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** * queue_set - return the queue set a packet should use * @skb: the packet * * Maps a packet to the SGE queue set it should use. The desired queue * set is carried in bits 1-3 in the packet's priority. */ static inline int queue_set(const struct sk_buff *skb) { return skb->priority >> 1; } /** * is_ctrl_pkt - return whether an offload packet is a control packet * @skb: the packet * * Determines whether an offload packet should use an OFLD or a CTRL * Tx queue. This is indicated by bit 0 in the packet's priority. */ static inline int is_ctrl_pkt(const struct sk_buff *skb) { return skb->priority & 1; } /** * t3_offload_tx - send an offload packet * @tdev: the offload device to send to * @skb: the packet * * Sends an offload packet. We use the packet priority to select the * appropriate Tx queue as follows: bit 0 indicates whether the packet * should be sent as regular or control, bits 1-3 select the queue set. */ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) { struct adapter *adap = tdev2adap(tdev); struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; if (unlikely(is_ctrl_pkt(skb))) return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); } /** * offload_enqueue - add an offload packet to an SGE offload receive queue * @q: the SGE response queue * @skb: the packet * * Add a new offload packet to an SGE response queue's offload packet * queue. If the packet is the first on the queue it schedules the RX * softirq to process the queue. */ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) { int was_empty = skb_queue_empty(&q->rx_queue); __skb_queue_tail(&q->rx_queue, skb); if (was_empty) { struct sge_qset *qs = rspq_to_qset(q); napi_schedule(&qs->napi); } } /** * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts * @tdev: the offload device that will be receiving the packets * @q: the SGE response queue that assembled the bundle * @skbs: the partial bundle * @n: the number of packets in the bundle * * Delivers a (partial) bundle of Rx offload packets to an offload device. */ static inline void deliver_partial_bundle(struct t3cdev *tdev, struct sge_rspq *q, struct sk_buff *skbs[], int n) { if (n) { q->offload_bundles++; tdev->recv(tdev, skbs, n); } } /** * ofld_poll - NAPI handler for offload packets in interrupt mode * @dev: the network device doing the polling * @budget: polling budget * * The NAPI handler for offload packets when a response queue is serviced * by the hard interrupt handler, i.e., when it's operating in non-polling * mode. Creates small packet batches and sends them through the offload * receive handler. Batches need to be of modest size as we do prefetches * on the packets in each. */ static int ofld_poll(struct napi_struct *napi, int budget) { struct sge_qset *qs = container_of(napi, struct sge_qset, napi); struct sge_rspq *q = &qs->rspq; struct adapter *adapter = qs->adap; int work_done = 0; while (work_done < budget) { struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; struct sk_buff_head queue; int ngathered; spin_lock_irq(&q->lock); __skb_queue_head_init(&queue); skb_queue_splice_init(&q->rx_queue, &queue); if (skb_queue_empty(&queue)) { napi_complete(napi); spin_unlock_irq(&q->lock); return work_done; } spin_unlock_irq(&q->lock); ngathered = 0; skb_queue_walk_safe(&queue, skb, tmp) { if (work_done >= budget) break; work_done++; __skb_unlink(skb, &queue); prefetch(skb->data); skbs[ngathered] = skb; if (++ngathered == RX_BUNDLE_SIZE) { q->offload_bundles++; adapter->tdev.recv(&adapter->tdev, skbs, ngathered); ngathered = 0; } } if (!skb_queue_empty(&queue)) { /* splice remaining packets back onto Rx queue */ spin_lock_irq(&q->lock); skb_queue_splice(&queue, &q->rx_queue); spin_unlock_irq(&q->lock); } deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); } return work_done; } /** * rx_offload - process a received offload packet * @tdev: the offload device receiving the packet * @rq: the response queue that received the packet * @skb: the packet * @rx_gather: a gather list of packets if we are building a bundle * @gather_idx: index of the next available slot in the bundle * * Process an ingress offload pakcet and add it to the offload ingress * queue. Returns the index of the next available slot in the bundle. */ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, struct sk_buff *skb, struct sk_buff *rx_gather[], unsigned int gather_idx) { skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); if (rq->polling) { rx_gather[gather_idx++] = skb; if (gather_idx == RX_BUNDLE_SIZE) { tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); gather_idx = 0; rq->offload_bundles++; } } else offload_enqueue(rq, skb); return gather_idx; } /** * restart_tx - check whether to restart suspended Tx queues * @qs: the queue set to resume * * Restarts suspended Tx queues of an SGE queue set if they have enough * free resources to resume operation. */ static void restart_tx(struct sge_qset *qs) { if (test_bit(TXQ_ETH, &qs->txq_stopped) && should_restart_tx(&qs->txq[TXQ_ETH]) && test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { qs->txq[TXQ_ETH].restarts++; if (netif_running(qs->netdev)) netif_tx_wake_queue(qs->tx_q); } if (test_bit(TXQ_OFLD, &qs->txq_stopped) && should_restart_tx(&qs->txq[TXQ_OFLD]) && test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { qs->txq[TXQ_OFLD].restarts++; tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); } if (test_bit(TXQ_CTRL, &qs->txq_stopped) && should_restart_tx(&qs->txq[TXQ_CTRL]) && test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { qs->txq[TXQ_CTRL].restarts++; tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); } } /** * cxgb3_arp_process - process an ARP request probing a private IP address * @adapter: the adapter * @skb: the skbuff containing the ARP request * * Check if the ARP request is probing the private IP address * dedicated to iSCSI, generate an ARP reply if so. */ static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) { struct net_device *dev = skb->dev; struct arphdr *arp; unsigned char *arp_ptr; unsigned char *sha; __be32 sip, tip; if (!dev) return; skb_reset_network_header(skb); arp = arp_hdr(skb); if (arp->ar_op != htons(ARPOP_REQUEST)) return; arp_ptr = (unsigned char *)(arp + 1); sha = arp_ptr; arp_ptr += dev->addr_len; memcpy(&sip, arp_ptr, sizeof(sip)); arp_ptr += sizeof(sip); arp_ptr += dev->addr_len; memcpy(&tip, arp_ptr, sizeof(tip)); if (tip != pi->iscsi_ipv4addr) return; arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, pi->iscsic.mac_addr, sha); } static inline int is_arp(struct sk_buff *skb) { return skb->protocol == htons(ETH_P_ARP); } static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, struct sk_buff *skb) { if (is_arp(skb)) { cxgb3_arp_process(pi, skb); return; } if (pi->iscsic.recv) pi->iscsic.recv(pi, skb); } /** * rx_eth - process an ingress ethernet packet * @adap: the adapter * @rq: the response queue that received the packet * @skb: the packet * @pad: amount of padding at the start of the buffer * * Process an ingress ethernet pakcet and deliver it to the stack. * The padding is 2 if the packet was delivered in an Rx buffer and 0 * if it was immediate data in a response. */ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, struct sk_buff *skb, int pad, int lro) { struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); struct sge_qset *qs = rspq_to_qset(rq); struct port_info *pi; skb_pull(skb, sizeof(*p) + pad); skb->protocol = eth_type_trans(skb, adap->port[p->iff]); pi = netdev_priv(skb->dev); if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && p->csum == htons(0xffff) && !p->fragment) { qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else skb_checksum_none_assert(skb); skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); if (p->vlan_valid) { qs->port_stats[SGE_PSTAT_VLANEX]++; __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); } if (rq->polling) { if (lro) napi_gro_receive(&qs->napi, skb); else { if (unlikely(pi->iscsic.flags)) cxgb3_process_iscsi_prov_pack(pi, skb); netif_receive_skb(skb); } } else netif_rx(skb); } static inline int is_eth_tcp(u32 rss) { return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; } /** * lro_add_page - add a page chunk to an LRO session * @adap: the adapter * @qs: the associated queue set * @fl: the free list containing the page chunk to add * @len: packet length * @complete: Indicates the last fragment of a frame * * Add a received packet contained in a page chunk to an existing LRO * session. */ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len, int complete) { struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl; struct skb_frag_struct *rx_frag; int nr_frags; int offset = 0; if (!qs->nomem) { skb = napi_get_frags(&qs->napi); qs->nomem = !skb; } fl->credits--; pci_dma_sync_single_for_cpu(adap->pdev, dma_unmap_addr(sd, dma_addr), fl->buf_size - SGE_PG_RSVD, PCI_DMA_FROMDEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) pci_unmap_page(adap->pdev, sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); if (!skb) { put_page(sd->pg_chunk.page); if (complete) qs->nomem = 0; return; } rx_frag = skb_shinfo(skb)->frags; nr_frags = skb_shinfo(skb)->nr_frags; if (!nr_frags) { offset = 2 + sizeof(struct cpl_rx_pkt); cpl = qs->lro_va = sd->pg_chunk.va + 2; if ((qs->netdev->features & NETIF_F_RXCSUM) && cpl->csum_valid && cpl->csum == htons(0xffff)) { skb->ip_summed = CHECKSUM_UNNECESSARY; qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; } else skb->ip_summed = CHECKSUM_NONE; } else cpl = qs->lro_va; len -= offset; rx_frag += nr_frags; __skb_frag_set_page(rx_frag, sd->pg_chunk.page); rx_frag->page_offset = sd->pg_chunk.offset + offset; skb_frag_size_set(rx_frag, len); skb->len += len; skb->data_len += len; skb->truesize += len; skb_shinfo(skb)->nr_frags++; if (!complete) return; skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); if (cpl->vlan_valid) __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); napi_gro_frags(&qs->napi); } /** * handle_rsp_cntrl_info - handles control information in a response * @qs: the queue set corresponding to the response * @flags: the response control flags * * Handles the control information of an SGE response, such as GTS * indications and completion credits for the queue set's Tx queues. * HW coalesces credits, we don't do any extra SW coalescing. */ static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) { unsigned int credits; #if USE_GTS if (flags & F_RSPD_TXQ0_GTS) clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); #endif credits = G_RSPD_TXQ0_CR(flags); if (credits) qs->txq[TXQ_ETH].processed += credits; credits = G_RSPD_TXQ2_CR(flags); if (credits) qs->txq[TXQ_CTRL].processed += credits; # if USE_GTS if (flags & F_RSPD_TXQ1_GTS) clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); # endif credits = G_RSPD_TXQ1_CR(flags); if (credits) qs->txq[TXQ_OFLD].processed += credits; } /** * check_ring_db - check if we need to ring any doorbells * @adapter: the adapter * @qs: the queue set whose Tx queues are to be examined * @sleeping: indicates which Tx queue sent GTS * * Checks if some of a queue set's Tx queues need to ring their doorbells * to resume transmission after idling while they still have unprocessed * descriptors. */ static void check_ring_db(struct adapter *adap, struct sge_qset *qs, unsigned int sleeping) { if (sleeping & F_RSPD_TXQ0_GTS) { struct sge_txq *txq = &qs->txq[TXQ_ETH]; if (txq->cleaned + txq->in_use != txq->processed && !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { set_bit(TXQ_RUNNING, &txq->flags); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(txq->cntxt_id)); } } if (sleeping & F_RSPD_TXQ1_GTS) { struct sge_txq *txq = &qs->txq[TXQ_OFLD]; if (txq->cleaned + txq->in_use != txq->processed && !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { set_bit(TXQ_RUNNING, &txq->flags); t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(txq->cntxt_id)); } } } /** * is_new_response - check if a response is newly written * @r: the response descriptor * @q: the response queue * * Returns true if a response descriptor contains a yet unprocessed * response. */ static inline int is_new_response(const struct rsp_desc *r, const struct sge_rspq *q) { return (r->intr_gen & F_RSPD_GEN2) == q->gen; } static inline void clear_rspq_bufstate(struct sge_rspq * const q) { q->pg_skb = NULL; q->rx_recycle_buf = 0; } #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ #define NOMEM_INTR_DELAY 2500 /** * process_responses - process responses from an SGE response queue * @adap: the adapter * @qs: the queue set to which the response queue belongs * @budget: how many responses can be processed in this round * * Process responses from an SGE response queue up to the supplied budget. * Responses include received packets as well as credits and other events * for the queues that belong to the response queue's queue set. * A negative budget is effectively unlimited. * * Additionally choose the interrupt holdoff time for the next interrupt * on this queue. If the system is under memory shortage use a fairly * long delay to help recovery. */ static int process_responses(struct adapter *adap, struct sge_qset *qs, int budget) { struct sge_rspq *q = &qs->rspq; struct rsp_desc *r = &q->desc[q->cidx]; int budget_left = budget; unsigned int sleeping = 0; struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; int ngathered = 0; q->next_holdoff = q->holdoff_tmr; while (likely(budget_left && is_new_response(r, q))) { int packet_complete, eth, ethpad = 2; int lro = !!(qs->netdev->features & NETIF_F_GRO); struct sk_buff *skb = NULL; u32 len, flags; __be32 rss_hi, rss_lo; rmb(); eth = r->rss_hdr.opcode == CPL_RX_PKT; rss_hi = *(const __be32 *)r; rss_lo = r->rss_hdr.rss_hash_val; flags = ntohl(r->flags); if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); if (!skb) goto no_mem; memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); skb->data[0] = CPL_ASYNC_NOTIF; rss_hi = htonl(CPL_ASYNC_NOTIF << 24); q->async_notif++; } else if (flags & F_RSPD_IMM_DATA_VALID) { skb = get_imm_packet(r); if (unlikely(!skb)) { no_mem: q->next_holdoff = NOMEM_INTR_DELAY; q->nomem++; /* consume one credit since we tried */ budget_left--; break; } q->imm_data++; ethpad = 0; } else if ((len = ntohl(r->len_cq)) != 0) { struct sge_fl *fl; lro &= eth && is_eth_tcp(rss_hi); fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; if (fl->use_pages) { void *addr = fl->sdesc[fl->cidx].pg_chunk.va; prefetch(addr); #if L1_CACHE_BYTES < 128 prefetch(addr + L1_CACHE_BYTES); #endif __refill_fl(adap, fl); if (lro > 0) { lro_add_page(adap, qs, fl, G_RSPD_LEN(len), flags & F_RSPD_EOP); goto next_fl; } skb = get_packet_pg(adap, fl, q, G_RSPD_LEN(len), eth ? SGE_RX_DROP_THRES : 0); q->pg_skb = skb; } else skb = get_packet(adap, fl, G_RSPD_LEN(len), eth ? SGE_RX_DROP_THRES : 0); if (unlikely(!skb)) { if (!eth) goto no_mem; q->rx_drops++; } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) __skb_pull(skb, 2); next_fl: if (++fl->cidx == fl->size) fl->cidx = 0; } else q->pure_rsps++; if (flags & RSPD_CTRL_MASK) { sleeping |= flags & RSPD_GTS_MASK; handle_rsp_cntrl_info(qs, flags); } r++; if (unlikely(++q->cidx == q->size)) { q->cidx = 0; q->gen ^= 1; r = q->desc; } prefetch(r); if (++q->credits >= (q->size / 4)) { refill_rspq(adap, q, q->credits); q->credits = 0; } packet_complete = flags & (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | F_RSPD_ASYNC_NOTIF); if (skb != NULL && packet_complete) { if (eth) rx_eth(adap, q, skb, ethpad, lro); else { q->offload_pkts++; /* Preserve the RSS info in csum & priority */ skb->csum = rss_hi; skb->priority = rss_lo; ngathered = rx_offload(&adap->tdev, q, skb, offload_skbs, ngathered); } if (flags & F_RSPD_EOP) clear_rspq_bufstate(q); } --budget_left; } deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); if (sleeping) check_ring_db(adap, qs, sleeping); smp_mb(); /* commit Tx queue .processed updates */ if (unlikely(qs->txq_stopped != 0)) restart_tx(qs); budget -= budget_left; return budget; } static inline int is_pure_response(const struct rsp_desc *r) { __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); return (n | r->len_cq) == 0; } /** * napi_rx_handler - the NAPI handler for Rx processing * @napi: the napi instance * @budget: how many packets we can process in this round * * Handler for new data events when using NAPI. */ static int napi_rx_handler(struct napi_struct *napi, int budget) { struct sge_qset *qs = container_of(napi, struct sge_qset, napi); struct adapter *adap = qs->adap; int work_done = process_responses(adap, qs, budget); if (likely(work_done < budget)) { napi_complete(napi); /* * Because we don't atomically flush the following * write it is possible that in very rare cases it can * reach the device in a way that races with a new * response being written plus an error interrupt * causing the NAPI interrupt handler below to return * unhandled status to the OS. To protect against * this would require flushing the write and doing * both the write and the flush with interrupts off. * Way too expensive and unjustifiable given the * rarity of the race. * * The race cannot happen at all with MSI-X. */ t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | V_NEWTIMER(qs->rspq.next_holdoff) | V_NEWINDEX(qs->rspq.cidx)); } return work_done; } /* * Returns true if the device is already scheduled for polling. */ static inline int napi_is_scheduled(struct napi_struct *napi) { return test_bit(NAPI_STATE_SCHED, &napi->state); } /** * process_pure_responses - process pure responses from a response queue * @adap: the adapter * @qs: the queue set owning the response queue * @r: the first pure response to process * * A simpler version of process_responses() that handles only pure (i.e., * non data-carrying) responses. Such respones are too light-weight to * justify calling a softirq under NAPI, so we handle them specially in * the interrupt handler. The function is called with a pointer to a * response, which the caller must ensure is a valid pure response. * * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. */ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, struct rsp_desc *r) { struct sge_rspq *q = &qs->rspq; unsigned int sleeping = 0; do { u32 flags = ntohl(r->flags); r++; if (unlikely(++q->cidx == q->size)) { q->cidx = 0; q->gen ^= 1; r = q->desc; } prefetch(r); if (flags & RSPD_CTRL_MASK) { sleeping |= flags & RSPD_GTS_MASK; handle_rsp_cntrl_info(qs, flags); } q->pure_rsps++; if (++q->credits >= (q->size / 4)) { refill_rspq(adap, q, q->credits); q->credits = 0; } if (!is_new_response(r, q)) break; rmb(); } while (is_pure_response(r)); if (sleeping) check_ring_db(adap, qs, sleeping); smp_mb(); /* commit Tx queue .processed updates */ if (unlikely(qs->txq_stopped != 0)) restart_tx(qs); return is_new_response(r, q); } /** * handle_responses - decide what to do with new responses in NAPI mode * @adap: the adapter * @q: the response queue * * This is used by the NAPI interrupt handlers to decide what to do with * new SGE responses. If there are no new responses it returns -1. If * there are new responses and they are pure (i.e., non-data carrying) * it handles them straight in hard interrupt context as they are very * cheap and don't deliver any packets. Finally, if there are any data * signaling responses it schedules the NAPI handler. Returns 1 if it * schedules NAPI, 0 if all new responses were pure. * * The caller must ascertain NAPI is not already running. */ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) { struct sge_qset *qs = rspq_to_qset(q); struct rsp_desc *r = &q->desc[q->cidx]; if (!is_new_response(r, q)) return -1; rmb(); if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); return 0; } napi_schedule(&qs->napi); return 1; } /* * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case * (i.e., response queue serviced in hard interrupt). */ static irqreturn_t t3_sge_intr_msix(int irq, void *cookie) { struct sge_qset *qs = cookie; struct adapter *adap = qs->adap; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); if (process_responses(adap, qs, -1) == 0) q->unhandled_irqs++; t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); spin_unlock(&q->lock); return IRQ_HANDLED; } /* * The MSI-X interrupt handler for an SGE response queue for the NAPI case * (i.e., response queue serviced by NAPI polling). */ static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) { struct sge_qset *qs = cookie; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); if (handle_responses(qs->adap, q) < 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; } /* * The non-NAPI MSI interrupt handler. This needs to handle data events from * SGE response queues as well as error and other async events as they all use * the same MSI vector. We use one SGE response queue per port in this mode * and protect all response queues with queue 0's lock. */ static irqreturn_t t3_intr_msi(int irq, void *cookie) { int new_packets = 0; struct adapter *adap = cookie; struct sge_rspq *q = &adap->sge.qs[0].rspq; spin_lock(&q->lock); if (process_responses(adap, &adap->sge.qs[0], -1)) { t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); new_packets = 1; } if (adap->params.nports == 2 && process_responses(adap, &adap->sge.qs[1], -1)) { struct sge_rspq *q1 = &adap->sge.qs[1].rspq; t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | V_NEWTIMER(q1->next_holdoff) | V_NEWINDEX(q1->cidx)); new_packets = 1; } if (!new_packets && t3_slow_intr_handler(adap) == 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; } static int rspq_check_napi(struct sge_qset *qs) { struct sge_rspq *q = &qs->rspq; if (!napi_is_scheduled(&qs->napi) && is_new_response(&q->desc[q->cidx], q)) { napi_schedule(&qs->napi); return 1; } return 0; } /* * The MSI interrupt handler for the NAPI case (i.e., response queues serviced * by NAPI polling). Handles data events from SGE response queues as well as * error and other async events as they all use the same MSI vector. We use * one SGE response queue per port in this mode and protect all response * queues with queue 0's lock. */ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) { int new_packets; struct adapter *adap = cookie; struct sge_rspq *q = &adap->sge.qs[0].rspq; spin_lock(&q->lock); new_packets = rspq_check_napi(&adap->sge.qs[0]); if (adap->params.nports == 2) new_packets += rspq_check_napi(&adap->sge.qs[1]); if (!new_packets && t3_slow_intr_handler(adap) == 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; } /* * A helper function that processes responses and issues GTS. */ static inline int process_responses_gts(struct adapter *adap, struct sge_rspq *rq) { int work; work = process_responses(adap, rspq_to_qset(rq), -1); t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); return work; } /* * The legacy INTx interrupt handler. This needs to handle data events from * SGE response queues as well as error and other async events as they all use * the same interrupt pin. We use one SGE response queue per port in this mode * and protect all response queues with queue 0's lock. */ static irqreturn_t t3_intr(int irq, void *cookie) { int work_done, w0, w1; struct adapter *adap = cookie; struct sge_rspq *q0 = &adap->sge.qs[0].rspq; struct sge_rspq *q1 = &adap->sge.qs[1].rspq; spin_lock(&q0->lock); w0 = is_new_response(&q0->desc[q0->cidx], q0); w1 = adap->params.nports == 2 && is_new_response(&q1->desc[q1->cidx], q1); if (likely(w0 | w1)) { t3_write_reg(adap, A_PL_CLI, 0); t3_read_reg(adap, A_PL_CLI); /* flush */ if (likely(w0)) process_responses_gts(adap, q0); if (w1) process_responses_gts(adap, q1); work_done = w0 | w1; } else work_done = t3_slow_intr_handler(adap); spin_unlock(&q0->lock); return IRQ_RETVAL(work_done != 0); } /* * Interrupt handler for legacy INTx interrupts for T3B-based cards. * Handles data events from SGE response queues as well as error and other * async events as they all use the same interrupt pin. We use one SGE * response queue per port in this mode and protect all response queues with * queue 0's lock. */ static irqreturn_t t3b_intr(int irq, void *cookie) { u32 map; struct adapter *adap = cookie; struct sge_rspq *q0 = &adap->sge.qs[0].rspq; t3_write_reg(adap, A_PL_CLI, 0); map = t3_read_reg(adap, A_SG_DATA_INTR); if (unlikely(!map)) /* shared interrupt, most likely */ return IRQ_NONE; spin_lock(&q0->lock); if (unlikely(map & F_ERRINTR)) t3_slow_intr_handler(adap); if (likely(map & 1)) process_responses_gts(adap, q0); if (map & 2) process_responses_gts(adap, &adap->sge.qs[1].rspq); spin_unlock(&q0->lock); return IRQ_HANDLED; } /* * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. * Handles data events from SGE response queues as well as error and other * async events as they all use the same interrupt pin. We use one SGE * response queue per port in this mode and protect all response queues with * queue 0's lock. */ static irqreturn_t t3b_intr_napi(int irq, void *cookie) { u32 map; struct adapter *adap = cookie; struct sge_qset *qs0 = &adap->sge.qs[0]; struct sge_rspq *q0 = &qs0->rspq; t3_write_reg(adap, A_PL_CLI, 0); map = t3_read_reg(adap, A_SG_DATA_INTR); if (unlikely(!map)) /* shared interrupt, most likely */ return IRQ_NONE; spin_lock(&q0->lock); if (unlikely(map & F_ERRINTR)) t3_slow_intr_handler(adap); if (likely(map & 1)) napi_schedule(&qs0->napi); if (map & 2) napi_schedule(&adap->sge.qs[1].napi); spin_unlock(&q0->lock); return IRQ_HANDLED; } /** * t3_intr_handler - select the top-level interrupt handler * @adap: the adapter * @polling: whether using NAPI to service response queues * * Selects the top-level interrupt handler based on the type of interrupts * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the * response queues. */ irq_handler_t t3_intr_handler(struct adapter *adap, int polling) { if (adap->flags & USING_MSIX) return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; if (adap->flags & USING_MSI) return polling ? t3_intr_msi_napi : t3_intr_msi; if (adap->params.rev > 0) return polling ? t3b_intr_napi : t3b_intr; return t3_intr; } #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ F_HIRCQPARITYERROR) #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ F_RSPQDISABLED) /** * t3_sge_err_intr_handler - SGE async event interrupt handler * @adapter: the adapter * * Interrupt handler for SGE asynchronous (non-data) events. */ void t3_sge_err_intr_handler(struct adapter *adapter) { unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & ~F_FLEMPTY; if (status & SGE_PARERR) CH_ALERT(adapter, "SGE parity error (0x%x)\n", status & SGE_PARERR); if (status & SGE_FRAMINGERR) CH_ALERT(adapter, "SGE framing error (0x%x)\n", status & SGE_FRAMINGERR); if (status & F_RSPQCREDITOVERFOW) CH_ALERT(adapter, "SGE response queue credit overflow\n"); if (status & F_RSPQDISABLED) { v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); CH_ALERT(adapter, "packet delivered to disabled response queue " "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); } if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) queue_work(cxgb3_wq, &adapter->db_drop_task); if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) queue_work(cxgb3_wq, &adapter->db_full_task); if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) queue_work(cxgb3_wq, &adapter->db_empty_task); t3_write_reg(adapter, A_SG_INT_CAUSE, status); if (status & SGE_FATALERR) t3_fatal_err(adapter); } /** * sge_timer_tx - perform periodic maintenance of an SGE qset * @data: the SGE queue set to maintain * * Runs periodically from a timer to perform maintenance of an SGE queue * set. It performs two tasks: * * Cleans up any completed Tx descriptors that may still be pending. * Normal descriptor cleanup happens when new packets are added to a Tx * queue so this timer is relatively infrequent and does any cleanup only * if the Tx queue has not seen any new packets in a while. We make a * best effort attempt to reclaim descriptors, in that we don't wait * around if we cannot get a queue's lock (which most likely is because * someone else is queueing new packets and so will also handle the clean * up). Since control queues use immediate data exclusively we don't * bother cleaning them up here. * */ static void sge_timer_tx(unsigned long data) { struct sge_qset *qs = (struct sge_qset *)data; struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; unsigned long next_period; if (__netif_tx_trylock(qs->tx_q)) { tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], TX_RECLAIM_TIMER_CHUNK); __netif_tx_unlock(qs->tx_q); } if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], TX_RECLAIM_TIMER_CHUNK); spin_unlock(&qs->txq[TXQ_OFLD].lock); } next_period = TX_RECLAIM_PERIOD >> (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / TX_RECLAIM_TIMER_CHUNK); mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); } /* * sge_timer_rx - perform periodic maintenance of an SGE qset * @data: the SGE queue set to maintain * * a) Replenishes Rx queues that have run out due to memory shortage. * Normally new Rx buffers are added when existing ones are consumed but * when out of memory a queue can become empty. We try to add only a few * buffers here, the queue will be replenished fully as these new buffers * are used up if memory shortage has subsided. * * b) Return coalesced response queue credits in case a response queue is * starved. * */ static void sge_timer_rx(unsigned long data) { spinlock_t *lock; struct sge_qset *qs = (struct sge_qset *)data; struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; u32 status; lock = adap->params.rev > 0 ? &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; if (!spin_trylock_irq(lock)) goto out; if (napi_is_scheduled(&qs->napi)) goto unlock; if (adap->params.rev < 4) { status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); if (status & (1 << qs->rspq.cntxt_id)) { qs->rspq.starved++; if (qs->rspq.credits) { qs->rspq.credits--; refill_rspq(adap, &qs->rspq, 1); qs->rspq.restarted++; t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, 1 << qs->rspq.cntxt_id); } } } if (qs->fl[0].credits < qs->fl[0].size) __refill_fl(adap, &qs->fl[0]); if (qs->fl[1].credits < qs->fl[1].size) __refill_fl(adap, &qs->fl[1]); unlock: spin_unlock_irq(lock); out: mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); } /** * t3_update_qset_coalesce - update coalescing settings for a queue set * @qs: the SGE queue set * @p: new queue set parameters * * Update the coalescing settings for an SGE queue set. Nothing is done * if the queue set is not initialized yet. */ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) { qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ qs->rspq.polling = p->polling; qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; } /** * t3_sge_alloc_qset - initialize an SGE queue set * @adapter: the adapter * @id: the queue set id * @nports: how many Ethernet ports will be using this queue set * @irq_vec_idx: the IRQ vector index for response queue interrupts * @p: configuration parameters for this queue set * @ntxq: number of Tx queues for the queue set * @netdev: net device associated with this queue set * @netdevq: net device TX queue associated with this queue set * * Allocate resources and initialize an SGE queue set. A queue set * comprises a response queue, two Rx free-buffer queues, and up to 3 * Tx queues. The Tx queues are assigned roles in the order Ethernet * queue, offload queue, and control queue. */ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int irq_vec_idx, const struct qset_params *p, int ntxq, struct net_device *dev, struct netdev_queue *netdevq) { int i, avail, ret = -ENOMEM; struct sge_qset *q = &adapter->sge.qs[id]; init_qset_cntxt(q, id); setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, sizeof(struct rx_desc), sizeof(struct rx_sw_desc), &q->fl[0].phys_addr, &q->fl[0].sdesc); if (!q->fl[0].desc) goto err; q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, sizeof(struct rx_desc), sizeof(struct rx_sw_desc), &q->fl[1].phys_addr, &q->fl[1].sdesc); if (!q->fl[1].desc) goto err; q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, sizeof(struct rsp_desc), 0, &q->rspq.phys_addr, NULL); if (!q->rspq.desc) goto err; for (i = 0; i < ntxq; ++i) { /* * The control queue always uses immediate data so does not * need to keep track of any sk_buffs. */ size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], sizeof(struct tx_desc), sz, &q->txq[i].phys_addr, &q->txq[i].sdesc); if (!q->txq[i].desc) goto err; q->txq[i].gen = 1; q->txq[i].size = p->txq_size[i]; spin_lock_init(&q->txq[i].lock); skb_queue_head_init(&q->txq[i].sendq); } tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, (unsigned long)q); tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, (unsigned long)q); q->fl[0].gen = q->fl[1].gen = 1; q->fl[0].size = p->fl_size; q->fl[1].size = p->jumbo_size; q->rspq.gen = 1; q->rspq.size = p->rspq_size; spin_lock_init(&q->rspq.lock); skb_queue_head_init(&q->rspq.rx_queue); q->txq[TXQ_ETH].stop_thres = nports * flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); #if FL0_PG_CHUNK_SIZE > 0 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; #else q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); #endif #if FL1_PG_CHUNK_SIZE > 0 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; #else q->fl[1].buf_size = is_offload(adapter) ? (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); #endif q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; q->fl[0].order = FL0_PG_ORDER; q->fl[1].order = FL1_PG_ORDER; q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; spin_lock_irq(&adapter->sge.reg_lock); /* FL threshold comparison uses < */ ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, q->rspq.phys_addr, q->rspq.size, q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); if (ret) goto err_unlock; for (i = 0; i < SGE_RXQ_PER_SET; ++i) { ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, q->fl[i].phys_addr, q->fl[i].size, q->fl[i].buf_size - SGE_PG_RSVD, p->cong_thres, 1, 0); if (ret) goto err_unlock; } ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, 1, 0); if (ret) goto err_unlock; if (ntxq > 1) { ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, USE_GTS, SGE_CNTXT_OFLD, id, q->txq[TXQ_OFLD].phys_addr, q->txq[TXQ_OFLD].size, 0, 1, 0); if (ret) goto err_unlock; } if (ntxq > 2) { ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, SGE_CNTXT_CTRL, id, q->txq[TXQ_CTRL].phys_addr, q->txq[TXQ_CTRL].size, q->txq[TXQ_CTRL].token, 1, 0); if (ret) goto err_unlock; } spin_unlock_irq(&adapter->sge.reg_lock); q->adap = adapter; q->netdev = dev; q->tx_q = netdevq; t3_update_qset_coalesce(q, p); avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL | __GFP_COMP); if (!avail) { CH_ALERT(adapter, "free list queue 0 initialization failed\n"); goto err; } if (avail < q->fl[0].size) CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", avail); avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL | __GFP_COMP); if (avail < q->fl[1].size) CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", avail); refill_rspq(adapter, &q->rspq, q->rspq.size - 1); t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | V_NEWTIMER(q->rspq.holdoff_tmr)); return 0; err_unlock: spin_unlock_irq(&adapter->sge.reg_lock); err: t3_free_qset(adapter, q); return ret; } /** * t3_start_sge_timers - start SGE timer call backs * @adap: the adapter * * Starts each SGE queue set's timer call back */ void t3_start_sge_timers(struct adapter *adap) { int i; for (i = 0; i < SGE_QSETS; ++i) { struct sge_qset *q = &adap->sge.qs[i]; if (q->tx_reclaim_timer.function) mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); if (q->rx_reclaim_timer.function) mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); } } /** * t3_stop_sge_timers - stop SGE timer call backs * @adap: the adapter * * Stops each SGE queue set's timer call back */ void t3_stop_sge_timers(struct adapter *adap) { int i; for (i = 0; i < SGE_QSETS; ++i) { struct sge_qset *q = &adap->sge.qs[i]; if (q->tx_reclaim_timer.function) del_timer_sync(&q->tx_reclaim_timer); if (q->rx_reclaim_timer.function) del_timer_sync(&q->rx_reclaim_timer); } } /** * t3_free_sge_resources - free SGE resources * @adap: the adapter * * Frees resources used by the SGE queue sets. */ void t3_free_sge_resources(struct adapter *adap) { int i; for (i = 0; i < SGE_QSETS; ++i) t3_free_qset(adap, &adap->sge.qs[i]); } /** * t3_sge_start - enable SGE * @adap: the adapter * * Enables the SGE for DMAs. This is the last step in starting packet * transfers. */ void t3_sge_start(struct adapter *adap) { t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); } /** * t3_sge_stop - disable SGE operation * @adap: the adapter * * Disables the DMA engine. This can be called in emeregencies (e.g., * from error interrupts) or from normal process context. In the latter * case it also disables any pending queue restart tasklets. Note that * if it is called in interrupt context it cannot disable the restart * tasklets as it cannot wait, however the tasklets will have no effect * since the doorbells are disabled and the driver will call this again * later from process context, at which time the tasklets will be stopped * if they are still running. */ void t3_sge_stop(struct adapter *adap) { t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); if (!in_interrupt()) { int i; for (i = 0; i < SGE_QSETS; ++i) { struct sge_qset *qs = &adap->sge.qs[i]; tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); } } } /** * t3_sge_init - initialize SGE * @adap: the adapter * @p: the SGE parameters * * Performs SGE initialization needed every time after a chip reset. * We do not initialize any of the queue sets here, instead the driver * top-level must request those individually. We also do not enable DMA * here, that should be done after the queues have been set up. */ void t3_sge_init(struct adapter *adap, struct sge_params *p) { unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; #if SGE_NUM_GENBITS == 1 ctrl |= F_EGRGENCTRL; #endif if (adap->params.rev > 0) { if (!(adap->flags & (USING_MSIX | USING_MSI))) ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; } t3_write_reg(adap, A_SG_CONTROL, ctrl); t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | V_LORCQDRBTHRSH(512)); t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | V_TIMEOUT(200 * core_ticks_per_usec(adap))); t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, adap->params.rev < T3_REV_C ? 1000 : 500); t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); } /** * t3_sge_prep - one-time SGE initialization * @adap: the associated adapter * @p: SGE parameters * * Performs one-time initialization of SGE SW state. Includes determining * defaults for the assorted SGE parameters, which admins can change until * they are used to initialize the SGE. */ void t3_sge_prep(struct adapter *adap, struct sge_params *p) { int i; p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); for (i = 0; i < SGE_QSETS; ++i) { struct qset_params *q = p->qset + i; q->polling = adap->params.rev > 0; q->coalesce_usecs = 5; q->rspq_size = 1024; q->fl_size = 1024; q->jumbo_size = 512; q->txq_size[TXQ_ETH] = 1024; q->txq_size[TXQ_OFLD] = 1024; q->txq_size[TXQ_CTRL] = 256; q->cong_thres = 0; } spin_lock_init(&adap->sge.reg_lock); }
gpl-2.0