repo_name
string
path
string
copies
string
size
string
content
string
license
string
vwmofo/vigor_mofokernel
arch/x86/kernel/devicetree.c
2218
9159
/* * Architecture specific OF callbacks. */ #include <linux/bootmem.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/of_pci.h> #include <linux/initrd.h> #include <asm/hpet.h> #include <asm/irq_controller.h> #include <asm/apic.h> #include <asm/pci_x86.h> __initdata u64 initial_dtb; char __initdata cmd_line[COMMAND_LINE_SIZE]; static LIST_HEAD(irq_domains); static DEFINE_RAW_SPINLOCK(big_irq_lock); int __initdata of_ioapic; #ifdef CONFIG_X86_IO_APIC static void add_interrupt_host(struct irq_domain *ih) { unsigned long flags; raw_spin_lock_irqsave(&big_irq_lock, flags); list_add(&ih->l, &irq_domains); raw_spin_unlock_irqrestore(&big_irq_lock, flags); } #endif static struct irq_domain *get_ih_from_node(struct device_node *controller) { struct irq_domain *ih, *found = NULL; unsigned long flags; raw_spin_lock_irqsave(&big_irq_lock, flags); list_for_each_entry(ih, &irq_domains, l) { if (ih->controller == controller) { found = ih; break; } } raw_spin_unlock_irqrestore(&big_irq_lock, flags); return found; } unsigned int irq_create_of_mapping(struct device_node *controller, const u32 *intspec, unsigned int intsize) { struct irq_domain *ih; u32 virq, type; int ret; ih = get_ih_from_node(controller); if (!ih) return 0; ret = ih->xlate(ih, intspec, intsize, &virq, &type); if (ret) return 0; if (type == IRQ_TYPE_NONE) return virq; irq_set_irq_type(virq, type); return virq; } EXPORT_SYMBOL_GPL(irq_create_of_mapping); unsigned long pci_address_to_pio(phys_addr_t address) { /* * The ioport address can be directly used by inX / outX */ BUG_ON(address >= (1 << 16)); return (unsigned long)address; } EXPORT_SYMBOL_GPL(pci_address_to_pio); void __init early_init_dt_scan_chosen_arch(unsigned long node) { BUG(); } void __init early_init_dt_add_memory_arch(u64 base, u64 size) { BUG(); } void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } #ifdef CONFIG_BLK_DEV_INITRD void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end) { initrd_start = (unsigned long)__va(start); initrd_end = (unsigned long)__va(end); initrd_below_start_ok = 1; } #endif void __init add_dtb(u64 data) { initial_dtb = data + offsetof(struct setup_data, data); } /* * CE4100 ids. Will be moved to machine_device_initcall() once we have it. */ static struct of_device_id __initdata ce4100_ids[] = { { .compatible = "intel,ce4100-cp", }, { .compatible = "isa", }, { .compatible = "pci", }, {}, }; static int __init add_bus_probe(void) { if (!of_have_populated_dt()) return 0; return of_platform_bus_probe(NULL, ce4100_ids, NULL); } module_init(add_bus_probe); #ifdef CONFIG_PCI static int x86_of_pci_irq_enable(struct pci_dev *dev) { struct of_irq oirq; u32 virq; int ret; u8 pin; ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (ret) return ret; if (!pin) return 0; ret = of_irq_map_pci(dev, &oirq); if (ret) return ret; virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); if (virq == 0) return -EINVAL; dev->irq = virq; return 0; } static void x86_of_pci_irq_disable(struct pci_dev *dev) { } void __cpuinit x86_of_pci_init(void) { struct device_node *np; pcibios_enable_irq = x86_of_pci_irq_enable; pcibios_disable_irq = x86_of_pci_irq_disable; for_each_node_by_type(np, "pci") { const void *prop; struct pci_bus *bus; unsigned int bus_min; struct device_node *child; prop = of_get_property(np, "bus-range", NULL); if (!prop) continue; bus_min = be32_to_cpup(prop); bus = pci_find_bus(0, bus_min); if (!bus) { printk(KERN_ERR "Can't find a node for bus %s.\n", np->full_name); continue; } if (bus->self) bus->self->dev.of_node = np; else bus->dev.of_node = np; for_each_child_of_node(np, child) { struct pci_dev *dev; u32 devfn; prop = of_get_property(child, "reg", NULL); if (!prop) continue; devfn = (be32_to_cpup(prop) >> 8) & 0xff; dev = pci_get_slot(bus, devfn); if (!dev) continue; dev->dev.of_node = child; pci_dev_put(dev); } } } #endif static void __init dtb_setup_hpet(void) { #ifdef CONFIG_HPET_TIMER struct device_node *dn; struct resource r; int ret; dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-hpet"); if (!dn) return; ret = of_address_to_resource(dn, 0, &r); if (ret) { WARN_ON(1); return; } hpet_address = r.start; #endif } static void __init dtb_lapic_setup(void) { #ifdef CONFIG_X86_LOCAL_APIC struct device_node *dn; struct resource r; int ret; dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic"); if (!dn) return; ret = of_address_to_resource(dn, 0, &r); if (WARN_ON(ret)) return; /* Did the boot loader setup the local APIC ? */ if (!cpu_has_apic) { if (apic_force_enable(r.start)) return; } smp_found_config = 1; pic_mode = 1; register_lapic_address(r.start); generic_processor_info(boot_cpu_physical_apicid, GET_APIC_VERSION(apic_read(APIC_LVR))); #endif } #ifdef CONFIG_X86_IO_APIC static unsigned int ioapic_id; static void __init dtb_add_ioapic(struct device_node *dn) { struct resource r; int ret; ret = of_address_to_resource(dn, 0, &r); if (ret) { printk(KERN_ERR "Can't obtain address from node %s.\n", dn->full_name); return; } mp_register_ioapic(++ioapic_id, r.start, gsi_top); } static void __init dtb_ioapic_setup(void) { struct device_node *dn; for_each_compatible_node(dn, NULL, "intel,ce4100-ioapic") dtb_add_ioapic(dn); if (nr_ioapics) { of_ioapic = 1; return; } printk(KERN_ERR "Error: No information about IO-APIC in OF.\n"); } #else static void __init dtb_ioapic_setup(void) {} #endif static void __init dtb_apic_setup(void) { dtb_lapic_setup(); dtb_ioapic_setup(); } #ifdef CONFIG_OF_FLATTREE static void __init x86_flattree_get_config(void) { u32 size, map_len; void *new_dtb; if (!initial_dtb) return; map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)sizeof(struct boot_param_header)); initial_boot_params = early_memremap(initial_dtb, map_len); size = be32_to_cpu(initial_boot_params->totalsize); if (map_len < size) { early_iounmap(initial_boot_params, map_len); initial_boot_params = early_memremap(initial_dtb, size); map_len = size; } new_dtb = alloc_bootmem(size); memcpy(new_dtb, initial_boot_params, size); early_iounmap(initial_boot_params, map_len); initial_boot_params = new_dtb; /* root level address cells */ of_scan_flat_dt(early_init_dt_scan_root, NULL); unflatten_device_tree(); } #else static inline void x86_flattree_get_config(void) { } #endif void __init x86_dtb_init(void) { x86_flattree_get_config(); if (!of_have_populated_dt()) return; dtb_setup_hpet(); dtb_apic_setup(); } #ifdef CONFIG_X86_IO_APIC struct of_ioapic_type { u32 out_type; u32 trigger; u32 polarity; }; static struct of_ioapic_type of_ioapic_type[] = { { .out_type = IRQ_TYPE_EDGE_RISING, .trigger = IOAPIC_EDGE, .polarity = 1, }, { .out_type = IRQ_TYPE_LEVEL_LOW, .trigger = IOAPIC_LEVEL, .polarity = 0, }, { .out_type = IRQ_TYPE_LEVEL_HIGH, .trigger = IOAPIC_LEVEL, .polarity = 1, }, { .out_type = IRQ_TYPE_EDGE_FALLING, .trigger = IOAPIC_EDGE, .polarity = 0, }, }; static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, u32 *out_hwirq, u32 *out_type) { struct mp_ioapic_gsi *gsi_cfg; struct io_apic_irq_attr attr; struct of_ioapic_type *it; u32 line, idx, type; if (intsize < 2) return -EINVAL; line = *intspec; idx = (u32) id->priv; gsi_cfg = mp_ioapic_gsi_routing(idx); *out_hwirq = line + gsi_cfg->gsi_base; intspec++; type = *intspec; if (type >= ARRAY_SIZE(of_ioapic_type)) return -EINVAL; it = of_ioapic_type + type; *out_type = it->out_type; set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr); } static void __init ioapic_add_ofnode(struct device_node *np) { struct resource r; int i, ret; ret = of_address_to_resource(np, 0, &r); if (ret) { printk(KERN_ERR "Failed to obtain address for %s\n", np->full_name); return; } for (i = 0; i < nr_ioapics; i++) { if (r.start == mpc_ioapic_addr(i)) { struct irq_domain *id; id = kzalloc(sizeof(*id), GFP_KERNEL); BUG_ON(!id); id->controller = np; id->xlate = ioapic_xlate; id->priv = (void *)i; add_interrupt_host(id); return; } } printk(KERN_ERR "IOxAPIC at %s is not registered.\n", np->full_name); } void __init x86_add_irq_domains(void) { struct device_node *dp; if (!of_have_populated_dt()) return; for_each_node_with_property(dp, "interrupt-controller") { if (of_device_is_compatible(dp, "intel,ce4100-ioapic")) ioapic_add_ofnode(dp); } } #else void __init x86_add_irq_domains(void) { } #endif
gpl-2.0
glewarne/testing
drivers/i2c/busses/i2c-ali15x3.c
2474
15762
/* Copyright (c) 1999 Frodo Looijaard <frodol@dds.nl> and Philip Edelbrock <phil@netroedge.com> and Mark D. Studebaker <mdsxyz123@yahoo.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* This is the driver for the SMB Host controller on Acer Labs Inc. (ALI) M1541 and M1543C South Bridges. The M1543C is a South bridge for desktop systems. The M1533 is a South bridge for portable systems. They are part of the following ALI chipsets: "Aladdin Pro 2": Includes the M1621 Slot 1 North bridge with AGP and 100MHz CPU Front Side bus "Aladdin V": Includes the M1541 Socket 7 North bridge with AGP and 100MHz CPU Front Side bus "Aladdin IV": Includes the M1541 Socket 7 North bridge with host bus up to 83.3 MHz. For an overview of these chips see http://www.acerlabs.com The M1533/M1543C devices appear as FOUR separate devices on the PCI bus. An output of lspci will show something similar to the following: 00:02.0 USB Controller: Acer Laboratories Inc. M5237 00:03.0 Bridge: Acer Laboratories Inc. M7101 00:07.0 ISA bridge: Acer Laboratories Inc. M1533 00:0f.0 IDE interface: Acer Laboratories Inc. M5229 The SMB controller is part of the 7101 device, which is an ACPI-compliant Power Management Unit (PMU). The whole 7101 device has to be enabled for the SMB to work. You can't just enable the SMB alone. The SMB and the ACPI have separate I/O spaces. We make sure that the SMB is enabled. We leave the ACPI alone. This driver controls the SMB Host only. The SMB Slave controller on the M15X3 is not enabled. This driver does not use interrupts. */ /* Note: we assume there can only be one ALI15X3, with one SMBus interface */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/io.h> /* ALI15X3 SMBus address offsets */ #define SMBHSTSTS (0 + ali15x3_smba) #define SMBHSTCNT (1 + ali15x3_smba) #define SMBHSTSTART (2 + ali15x3_smba) #define SMBHSTCMD (7 + ali15x3_smba) #define SMBHSTADD (3 + ali15x3_smba) #define SMBHSTDAT0 (4 + ali15x3_smba) #define SMBHSTDAT1 (5 + ali15x3_smba) #define SMBBLKDAT (6 + ali15x3_smba) /* PCI Address Constants */ #define SMBCOM 0x004 #define SMBBA 0x014 #define SMBATPC 0x05B /* used to unlock xxxBA registers */ #define SMBHSTCFG 0x0E0 #define SMBSLVC 0x0E1 #define SMBCLK 0x0E2 #define SMBREV 0x008 /* Other settings */ #define MAX_TIMEOUT 200 /* times 1/100 sec */ #define ALI15X3_SMB_IOSIZE 32 /* this is what the Award 1004 BIOS sets them to on a ASUS P5A MB. We don't use these here. If the bases aren't set to some value we tell user to upgrade BIOS and we fail. */ #define ALI15X3_SMB_DEFAULTBASE 0xE800 /* ALI15X3 address lock bits */ #define ALI15X3_LOCK 0x06 /* ALI15X3 command constants */ #define ALI15X3_ABORT 0x02 #define ALI15X3_T_OUT 0x04 #define ALI15X3_QUICK 0x00 #define ALI15X3_BYTE 0x10 #define ALI15X3_BYTE_DATA 0x20 #define ALI15X3_WORD_DATA 0x30 #define ALI15X3_BLOCK_DATA 0x40 #define ALI15X3_BLOCK_CLR 0x80 /* ALI15X3 status register bits */ #define ALI15X3_STS_IDLE 0x04 #define ALI15X3_STS_BUSY 0x08 #define ALI15X3_STS_DONE 0x10 #define ALI15X3_STS_DEV 0x20 /* device error */ #define ALI15X3_STS_COLL 0x40 /* collision or no response */ #define ALI15X3_STS_TERM 0x80 /* terminated by abort */ #define ALI15X3_STS_ERR 0xE0 /* all the bad error bits */ /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static u16 force_addr; module_param(force_addr, ushort, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"); static struct pci_driver ali15x3_driver; static unsigned short ali15x3_smba; static int ali15x3_setup(struct pci_dev *ALI15X3_dev) { u16 a; unsigned char temp; /* Check the following things: - SMB I/O address is initialized - Device is enabled - We can use the addresses */ /* Unlock the register. The data sheet says that the address registers are read-only if the lock bits are 1, but in fact the address registers are zero unless you clear the lock bits. */ pci_read_config_byte(ALI15X3_dev, SMBATPC, &temp); if (temp & ALI15X3_LOCK) { temp &= ~ALI15X3_LOCK; pci_write_config_byte(ALI15X3_dev, SMBATPC, temp); } /* Determine the address of the SMBus area */ pci_read_config_word(ALI15X3_dev, SMBBA, &ali15x3_smba); ali15x3_smba &= (0xffff & ~(ALI15X3_SMB_IOSIZE - 1)); if (ali15x3_smba == 0 && force_addr == 0) { dev_err(&ALI15X3_dev->dev, "ALI15X3_smb region uninitialized " "- upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if(force_addr) ali15x3_smba = force_addr & ~(ALI15X3_SMB_IOSIZE - 1); if (acpi_check_region(ali15x3_smba, ALI15X3_SMB_IOSIZE, ali15x3_driver.name)) return -EBUSY; if (!request_region(ali15x3_smba, ALI15X3_SMB_IOSIZE, ali15x3_driver.name)) { dev_err(&ALI15X3_dev->dev, "ALI15X3_smb region 0x%x already in use!\n", ali15x3_smba); return -ENODEV; } if(force_addr) { dev_info(&ALI15X3_dev->dev, "forcing ISA address 0x%04X\n", ali15x3_smba); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(ALI15X3_dev, SMBBA, ali15x3_smba)) goto error; if (PCIBIOS_SUCCESSFUL != pci_read_config_word(ALI15X3_dev, SMBBA, &a)) goto error; if ((a & ~(ALI15X3_SMB_IOSIZE - 1)) != ali15x3_smba) { /* make sure it works */ dev_err(&ALI15X3_dev->dev, "force address failed - not supported?\n"); goto error; } } /* check if whole device is enabled */ pci_read_config_byte(ALI15X3_dev, SMBCOM, &temp); if ((temp & 1) == 0) { dev_info(&ALI15X3_dev->dev, "enabling SMBus device\n"); pci_write_config_byte(ALI15X3_dev, SMBCOM, temp | 0x01); } /* Is SMB Host controller enabled? */ pci_read_config_byte(ALI15X3_dev, SMBHSTCFG, &temp); if ((temp & 1) == 0) { dev_info(&ALI15X3_dev->dev, "enabling SMBus controller\n"); pci_write_config_byte(ALI15X3_dev, SMBHSTCFG, temp | 0x01); } /* set SMB clock to 74KHz as recommended in data sheet */ pci_write_config_byte(ALI15X3_dev, SMBCLK, 0x20); /* The interrupt routing for SMB is set up in register 0x77 in the 1533 ISA Bridge device, NOT in the 7101 device. Don't bother with finding the 1533 device and reading the register. if ((....... & 0x0F) == 1) dev_dbg(&ALI15X3_dev->dev, "ALI15X3 using Interrupt 9 for SMBus.\n"); */ pci_read_config_byte(ALI15X3_dev, SMBREV, &temp); dev_dbg(&ALI15X3_dev->dev, "SMBREV = 0x%X\n", temp); dev_dbg(&ALI15X3_dev->dev, "iALI15X3_smba = 0x%X\n", ali15x3_smba); return 0; error: release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE); return -ENODEV; } /* Another internally used function */ static int ali15x3_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; dev_dbg(&adap->dev, "Transaction (pre): STS=%02x, CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* get status */ temp = inb_p(SMBHSTSTS); /* Make sure the SMBus host is ready to start transmitting */ /* Check the busy bit first */ if (temp & ALI15X3_STS_BUSY) { /* If the host controller is still busy, it may have timed out in the previous transaction, resulting in a "SMBus Timeout" Dev. I've tried the following to reset a stuck busy bit. 1. Reset the controller with an ABORT command. (this doesn't seem to clear the controller if an external device is hung) 2. Reset the controller and the other SMBus devices with a T_OUT command. (this clears the host busy bit if an external device is hung, but it comes back upon a new access to a device) 3. Disable and reenable the controller in SMBHSTCFG Worst case, nothing seems to work except power reset. */ /* Abort - reset the host controller */ /* Try resetting entire SMB bus, including other devices - This may not work either - it clears the BUSY bit but then the BUSY bit may come back on when you try and use the chip again. If that's the case you are stuck. */ dev_info(&adap->dev, "Resetting entire SMB Bus to " "clear busy condition (%02x)\n", temp); outb_p(ALI15X3_T_OUT, SMBHSTCNT); temp = inb_p(SMBHSTSTS); } /* now check the error bits and the busy bit */ if (temp & (ALI15X3_STS_ERR | ALI15X3_STS_BUSY)) { /* do a clear-on-write */ outb_p(0xFF, SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) & (ALI15X3_STS_ERR | ALI15X3_STS_BUSY)) { /* this is probably going to be correctable only by a power reset as one of the bits now appears to be stuck */ /* This may be a bus or device with electrical problems. */ dev_err(&adap->dev, "SMBus reset failed! (0x%02x) - " "controller or device on bus is probably hung\n", temp); return -EBUSY; } } else { /* check and clear done bit */ if (temp & ALI15X3_STS_DONE) { outb_p(temp, SMBHSTSTS); } } /* start the transaction by writing anything to the start register */ outb_p(0xFF, SMBHSTSTART); /* We will always wait for a fraction of a second! */ timeout = 0; do { msleep(1); temp = inb_p(SMBHSTSTS); } while ((!(temp & (ALI15X3_STS_ERR | ALI15X3_STS_DONE))) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { result = -ETIMEDOUT; dev_err(&adap->dev, "SMBus Timeout!\n"); } if (temp & ALI15X3_STS_TERM) { result = -EIO; dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); } /* Unfortunately the ALI SMB controller maps "no response" and "bus collision" into a single bit. No response is the usual case so don't do a printk. This means that bus collisions go unreported. */ if (temp & ALI15X3_STS_COLL) { result = -ENXIO; dev_dbg(&adap->dev, "Error: no response or bus collision ADD=%02x\n", inb_p(SMBHSTADD)); } /* haven't ever seen this */ if (temp & ALI15X3_STS_DEV) { result = -EIO; dev_err(&adap->dev, "Error: device error\n"); } dev_dbg(&adap->dev, "Transaction (post): STS=%02x, CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTSTS), inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); return result; } /* Return negative errno on error. */ static s32 ali15x3_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int i, len; int temp; int timeout; /* clear all the bits (clear-on-write) */ outb_p(0xFF, SMBHSTSTS); /* make sure SMBus is idle */ temp = inb_p(SMBHSTSTS); for (timeout = 0; (timeout < MAX_TIMEOUT) && !(temp & ALI15X3_STS_IDLE); timeout++) { msleep(1); temp = inb_p(SMBHSTSTS); } if (timeout >= MAX_TIMEOUT) { dev_err(&adap->dev, "Idle wait Timeout! STS=0x%02x\n", temp); } switch (size) { case I2C_SMBUS_QUICK: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); size = ALI15X3_QUICK; break; case I2C_SMBUS_BYTE: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); size = ALI15X3_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); size = ALI15X3_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } size = ALI15X3_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) { len = 0; data->block[0] = len; } if (len > 32) { len = 32; data->block[0] = len; } outb_p(len, SMBHSTDAT0); /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTCNT) | ALI15X3_BLOCK_CLR, SMBHSTCNT); for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } size = ALI15X3_BLOCK_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p(size, SMBHSTCNT); /* output command */ temp = ali15x3_transaction(adap); if (temp) return temp; if ((read_write == I2C_SMBUS_WRITE) || (size == ALI15X3_QUICK)) return 0; switch (size) { case ALI15X3_BYTE: /* Result put in SMBHSTDAT0 */ data->byte = inb_p(SMBHSTDAT0); break; case ALI15X3_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case ALI15X3_WORD_DATA: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case ALI15X3_BLOCK_DATA: len = inb_p(SMBHSTDAT0); if (len > 32) len = 32; data->block[0] = len; /* Reset SMBBLKDAT */ outb_p(inb_p(SMBHSTCNT) | ALI15X3_BLOCK_CLR, SMBHSTCNT); for (i = 1; i <= data->block[0]; i++) { data->block[i] = inb_p(SMBBLKDAT); dev_dbg(&adap->dev, "Blk: len=%d, i=%d, data=%02x\n", len, i, data->block[i]); } break; } return 0; } static u32 ali15x3_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = ali15x3_access, .functionality = ali15x3_func, }; static struct i2c_adapter ali15x3_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static DEFINE_PCI_DEVICE_TABLE(ali15x3_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { 0, } }; MODULE_DEVICE_TABLE (pci, ali15x3_ids); static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (ali15x3_setup(dev)) { dev_err(&dev->dev, "ALI15X3 not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ ali15x3_adapter.dev.parent = &dev->dev; snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name), "SMBus ALI15X3 adapter at %04x", ali15x3_smba); return i2c_add_adapter(&ali15x3_adapter); } static void ali15x3_remove(struct pci_dev *dev) { i2c_del_adapter(&ali15x3_adapter); release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE); } static struct pci_driver ali15x3_driver = { .name = "ali15x3_smbus", .id_table = ali15x3_ids, .probe = ali15x3_probe, .remove = ali15x3_remove, }; module_pci_driver(ali15x3_driver); MODULE_AUTHOR ("Frodo Looijaard <frodol@dds.nl>, " "Philip Edelbrock <phil@netroedge.com>, " "and Mark D. Studebaker <mdsxyz123@yahoo.com>"); MODULE_DESCRIPTION("ALI15X3 SMBus driver"); MODULE_LICENSE("GPL");
gpl-2.0
one-2-z/a830s_kernel
arch/arm/mach-pxa/am300epd.c
2986
6572
/* * am300epd.c -- Platform device for AM300 EPD kit * * Copyright (C) 2008, Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * This work was made possible by help and equipment support from E-Ink * Corporation. http://support.eink.com/community * * This driver is written to be used with the Broadsheet display controller. * on the AM300 EPD prototype kit/development kit with an E-Ink 800x600 * Vizplex EPD on a Gumstix board using the Broadsheet interface board. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <mach/gumstix.h> #include <mach/mfp-pxa25x.h> #include <mach/pxafb.h> #include "generic.h" #include <video/broadsheetfb.h> static unsigned int panel_type = 6; static struct platform_device *am300_device; static struct broadsheet_board am300_board; static unsigned long am300_pin_config[] __initdata = { GPIO16_GPIO, GPIO17_GPIO, GPIO32_GPIO, GPIO48_GPIO, GPIO49_GPIO, GPIO51_GPIO, GPIO74_GPIO, GPIO75_GPIO, GPIO76_GPIO, GPIO77_GPIO, /* this is the 16-bit hdb bus 58-73 */ GPIO58_GPIO, GPIO59_GPIO, GPIO60_GPIO, GPIO61_GPIO, GPIO62_GPIO, GPIO63_GPIO, GPIO64_GPIO, GPIO65_GPIO, GPIO66_GPIO, GPIO67_GPIO, GPIO68_GPIO, GPIO69_GPIO, GPIO70_GPIO, GPIO71_GPIO, GPIO72_GPIO, GPIO73_GPIO, }; /* register offsets for gpio control */ #define PWR_GPIO_PIN 16 #define CFG_GPIO_PIN 17 #define RDY_GPIO_PIN 32 #define DC_GPIO_PIN 48 #define RST_GPIO_PIN 49 #define LED_GPIO_PIN 51 #define RD_GPIO_PIN 74 #define WR_GPIO_PIN 75 #define CS_GPIO_PIN 76 #define IRQ_GPIO_PIN 77 /* hdb bus */ #define DB0_GPIO_PIN 58 #define DB15_GPIO_PIN 73 static int gpios[] = { PWR_GPIO_PIN, CFG_GPIO_PIN, RDY_GPIO_PIN, DC_GPIO_PIN, RST_GPIO_PIN, RD_GPIO_PIN, WR_GPIO_PIN, CS_GPIO_PIN, IRQ_GPIO_PIN, LED_GPIO_PIN }; static char *gpio_names[] = { "PWR", "CFG", "RDY", "DC", "RST", "RD", "WR", "CS", "IRQ", "LED" }; static int am300_wait_event(struct broadsheetfb_par *par) { /* todo: improve err recovery */ wait_event(par->waitq, gpio_get_value(RDY_GPIO_PIN)); return 0; } static int am300_init_gpio_regs(struct broadsheetfb_par *par) { int i; int err; char dbname[8]; for (i = 0; i < ARRAY_SIZE(gpios); i++) { err = gpio_request(gpios[i], gpio_names[i]); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %s, err=%d\n", gpio_names[i], err); goto err_req_gpio; } } /* we also need to take care of the hdb bus */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) { sprintf(dbname, "DB%d", i); err = gpio_request(i, dbname); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %d, err=%d\n", i, err); goto err_req_gpio2; } } /* setup the outputs and init values */ gpio_direction_output(PWR_GPIO_PIN, 0); gpio_direction_output(CFG_GPIO_PIN, 1); gpio_direction_output(DC_GPIO_PIN, 0); gpio_direction_output(RD_GPIO_PIN, 1); gpio_direction_output(WR_GPIO_PIN, 1); gpio_direction_output(CS_GPIO_PIN, 1); gpio_direction_output(RST_GPIO_PIN, 0); /* setup the inputs */ gpio_direction_input(RDY_GPIO_PIN); gpio_direction_input(IRQ_GPIO_PIN); /* start the hdb bus as an input */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_direction_output(i, 0); /* go into command mode */ gpio_set_value(CFG_GPIO_PIN, 1); gpio_set_value(RST_GPIO_PIN, 0); msleep(10); gpio_set_value(RST_GPIO_PIN, 1); msleep(10); am300_wait_event(par); return 0; err_req_gpio2: while (--i >= DB0_GPIO_PIN) gpio_free(i); i = ARRAY_SIZE(gpios); err_req_gpio: while (--i >= 0) gpio_free(gpios[i]); return err; } static int am300_init_board(struct broadsheetfb_par *par) { return am300_init_gpio_regs(par); } static void am300_cleanup(struct broadsheetfb_par *par) { int i; free_irq(IRQ_GPIO(RDY_GPIO_PIN), par); for (i = 0; i < ARRAY_SIZE(gpios); i++) gpio_free(gpios[i]); for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_free(i); } static u16 am300_get_hdb(struct broadsheetfb_par *par) { u16 res = 0; int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) res |= (gpio_get_value(DB0_GPIO_PIN + i)) ? (1 << i) : 0; return res; } static void am300_set_hdb(struct broadsheetfb_par *par, u16 data) { int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) gpio_set_value(DB0_GPIO_PIN + i, (data >> i) & 0x01); } static void am300_set_ctl(struct broadsheetfb_par *par, unsigned char bit, u8 state) { switch (bit) { case BS_CS: gpio_set_value(CS_GPIO_PIN, state); break; case BS_DC: gpio_set_value(DC_GPIO_PIN, state); break; case BS_WR: gpio_set_value(WR_GPIO_PIN, state); break; } } static int am300_get_panel_type(void) { return panel_type; } static irqreturn_t am300_handle_irq(int irq, void *dev_id) { struct broadsheetfb_par *par = dev_id; wake_up(&par->waitq); return IRQ_HANDLED; } static int am300_setup_irq(struct fb_info *info) { int ret; struct broadsheetfb_par *par = info->par; ret = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am300_handle_irq, IRQF_DISABLED|IRQF_TRIGGER_RISING, "AM300", par); if (ret) dev_err(&am300_device->dev, "request_irq failed: %d\n", ret); return ret; } static struct broadsheet_board am300_board = { .owner = THIS_MODULE, .init = am300_init_board, .cleanup = am300_cleanup, .set_hdb = am300_set_hdb, .get_hdb = am300_get_hdb, .set_ctl = am300_set_ctl, .wait_for_rdy = am300_wait_event, .get_panel_type = am300_get_panel_type, .setup_irq = am300_setup_irq, }; int __init am300_init(void) { int ret; pxa2xx_mfp_config(ARRAY_AND_SIZE(am300_pin_config)); /* request our platform independent driver */ request_module("broadsheetfb"); am300_device = platform_device_alloc("broadsheetfb", -1); if (!am300_device) return -ENOMEM; /* the am300_board that will be seen by broadsheetfb is a copy */ platform_device_add_data(am300_device, &am300_board, sizeof(am300_board)); ret = platform_device_add(am300_device); if (ret) { platform_device_put(am300_device); return ret; } return 0; } module_param(panel_type, uint, 0); MODULE_PARM_DESC(panel_type, "Select the panel type: 37, 6, 97"); MODULE_DESCRIPTION("board driver for am300 epd kit"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
gpl-2.0
Negamann303/kernel-ng2-negalite
drivers/gpu/drm/radeon/atombios_i2c.c
4266
3959
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher * */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #define TARGET_HW_I2C_CLOCK 50 /* these are a limitation of ProcessI2cChannelTransaction not the hw */ #define ATOM_MAX_HW_I2C_WRITE 2 #define ATOM_MAX_HW_I2C_READ 255 static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, u8 slave_addr, u8 flags, u8 *buf, u8 num) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; u16 out; memset(&args, 0, sizeof(args)); base = (unsigned char *)rdev->mode_info.atom_context->scratch; if (flags & HW_I2C_WRITE) { if (num > ATOM_MAX_HW_I2C_WRITE) { DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num); return -EINVAL; } memcpy(&out, buf, num); args.lpI2CDataOut = cpu_to_le16(out); } else { if (num > ATOM_MAX_HW_I2C_READ) { DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); return -EINVAL; } } args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; args.ucRegIndex = 0; args.ucTransBytes = num; args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { DRM_DEBUG_KMS("hw_i2c error\n"); return -EIO; } if (!(flags & HW_I2C_WRITE)) memcpy(buf, base, num); return 0; } int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct i2c_msg *p; int i, remaining, current_count, buffer_offset, max_bytes, ret; u8 buf = 0, flags; /* check for bus probe */ p = &msgs[0]; if ((num == 1) && (p->len == 0)) { ret = radeon_process_i2c_ch(i2c, p->addr, HW_I2C_WRITE, &buf, 1); if (ret) return ret; else return num; } for (i = 0; i < num; i++) { p = &msgs[i]; remaining = p->len; buffer_offset = 0; /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */ if (p->flags & I2C_M_RD) { max_bytes = ATOM_MAX_HW_I2C_READ; flags = HW_I2C_READ; } else { max_bytes = ATOM_MAX_HW_I2C_WRITE; flags = HW_I2C_WRITE; } while (remaining) { if (remaining > max_bytes) current_count = max_bytes; else current_count = remaining; ret = radeon_process_i2c_ch(i2c, p->addr, flags, &p->buf[buffer_offset], current_count); if (ret) return ret; remaining -= current_count; buffer_offset += current_count; } } return num; } u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; }
gpl-2.0
blkredstarV/kernel_asus_moorefield
drivers/pinctrl/pinctrl-ab9540.c
4522
22546
/* * Copyright (C) ST-Ericsson SA 2012 * * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/pinctrl/pinctrl.h> #include <linux/mfd/abx500/ab8500.h> #include "pinctrl-abx500.h" /* All the pins that can be used for GPIO and some other functions */ #define ABX500_GPIO(offset) (offset) #define AB9540_PIN_R4 ABX500_GPIO(1) #define AB9540_PIN_V3 ABX500_GPIO(2) #define AB9540_PIN_T4 ABX500_GPIO(3) #define AB9540_PIN_T5 ABX500_GPIO(4) /* hole */ #define AB9540_PIN_B18 ABX500_GPIO(10) #define AB9540_PIN_C18 ABX500_GPIO(11) /* hole */ #define AB9540_PIN_D18 ABX500_GPIO(13) #define AB9540_PIN_B19 ABX500_GPIO(14) #define AB9540_PIN_C19 ABX500_GPIO(15) #define AB9540_PIN_D19 ABX500_GPIO(16) #define AB9540_PIN_R3 ABX500_GPIO(17) #define AB9540_PIN_T2 ABX500_GPIO(18) #define AB9540_PIN_U2 ABX500_GPIO(19) #define AB9540_PIN_V2 ABX500_GPIO(20) #define AB9540_PIN_N17 ABX500_GPIO(21) #define AB9540_PIN_N16 ABX500_GPIO(22) #define AB9540_PIN_M19 ABX500_GPIO(23) #define AB9540_PIN_T3 ABX500_GPIO(24) #define AB9540_PIN_W2 ABX500_GPIO(25) /* hole */ #define AB9540_PIN_H4 ABX500_GPIO(27) #define AB9540_PIN_F1 ABX500_GPIO(28) #define AB9540_PIN_F4 ABX500_GPIO(29) #define AB9540_PIN_F2 ABX500_GPIO(30) #define AB9540_PIN_E4 ABX500_GPIO(31) #define AB9540_PIN_F3 ABX500_GPIO(32) /* hole */ #define AB9540_PIN_J13 ABX500_GPIO(34) /* hole */ #define AB9540_PIN_L17 ABX500_GPIO(40) #define AB9540_PIN_L16 ABX500_GPIO(41) #define AB9540_PIN_W3 ABX500_GPIO(42) #define AB9540_PIN_N4 ABX500_GPIO(50) #define AB9540_PIN_G12 ABX500_GPIO(51) #define AB9540_PIN_E17 ABX500_GPIO(52) #define AB9540_PIN_D11 ABX500_GPIO(53) #define AB9540_PIN_M18 ABX500_GPIO(54) /* indicates the highest GPIO number */ #define AB9540_GPIO_MAX_NUMBER 54 /* * The names of the pins are denoted by GPIO number and ball name, even * though they can be used for other things than GPIO, this is the first * column in the table of the data sheet and often used on schematics and * such. */ static const struct pinctrl_pin_desc ab9540_pins[] = { PINCTRL_PIN(AB9540_PIN_R4, "GPIO1_R4"), PINCTRL_PIN(AB9540_PIN_V3, "GPIO2_V3"), PINCTRL_PIN(AB9540_PIN_T4, "GPIO3_T4"), PINCTRL_PIN(AB9540_PIN_T5, "GPIO4_T5"), /* hole */ PINCTRL_PIN(AB9540_PIN_B18, "GPIO10_B18"), PINCTRL_PIN(AB9540_PIN_C18, "GPIO11_C18"), /* hole */ PINCTRL_PIN(AB9540_PIN_D18, "GPIO13_D18"), PINCTRL_PIN(AB9540_PIN_B19, "GPIO14_B19"), PINCTRL_PIN(AB9540_PIN_C19, "GPIO15_C19"), PINCTRL_PIN(AB9540_PIN_D19, "GPIO16_D19"), PINCTRL_PIN(AB9540_PIN_R3, "GPIO17_R3"), PINCTRL_PIN(AB9540_PIN_T2, "GPIO18_T2"), PINCTRL_PIN(AB9540_PIN_U2, "GPIO19_U2"), PINCTRL_PIN(AB9540_PIN_V2, "GPIO20_V2"), PINCTRL_PIN(AB9540_PIN_N17, "GPIO21_N17"), PINCTRL_PIN(AB9540_PIN_N16, "GPIO22_N16"), PINCTRL_PIN(AB9540_PIN_M19, "GPIO23_M19"), PINCTRL_PIN(AB9540_PIN_T3, "GPIO24_T3"), PINCTRL_PIN(AB9540_PIN_W2, "GPIO25_W2"), /* hole */ PINCTRL_PIN(AB9540_PIN_H4, "GPIO27_H4"), PINCTRL_PIN(AB9540_PIN_F1, "GPIO28_F1"), PINCTRL_PIN(AB9540_PIN_F4, "GPIO29_F4"), PINCTRL_PIN(AB9540_PIN_F2, "GPIO30_F2"), PINCTRL_PIN(AB9540_PIN_E4, "GPIO31_E4"), PINCTRL_PIN(AB9540_PIN_F3, "GPIO32_F3"), /* hole */ PINCTRL_PIN(AB9540_PIN_J13, "GPIO34_J13"), /* hole */ PINCTRL_PIN(AB9540_PIN_L17, "GPIO40_L17"), PINCTRL_PIN(AB9540_PIN_L16, "GPIO41_L16"), PINCTRL_PIN(AB9540_PIN_W3, "GPIO42_W3"), PINCTRL_PIN(AB9540_PIN_N4, "GPIO50_N4"), PINCTRL_PIN(AB9540_PIN_G12, "GPIO51_G12"), PINCTRL_PIN(AB9540_PIN_E17, "GPIO52_E17"), PINCTRL_PIN(AB9540_PIN_D11, "GPIO53_D11"), PINCTRL_PIN(AB9540_PIN_M18, "GPIO60_M18"), }; /* * Maps local GPIO offsets to local pin numbers */ static const struct abx500_pinrange ab9540_pinranges[] = { ABX500_PINRANGE(1, 4, ABX500_ALT_A), ABX500_PINRANGE(10, 2, ABX500_DEFAULT), ABX500_PINRANGE(13, 1, ABX500_DEFAULT), ABX500_PINRANGE(14, 12, ABX500_ALT_A), ABX500_PINRANGE(27, 6, ABX500_ALT_A), ABX500_PINRANGE(34, 1, ABX500_ALT_A), ABX500_PINRANGE(40, 3, ABX500_ALT_A), ABX500_PINRANGE(50, 1, ABX500_DEFAULT), ABX500_PINRANGE(51, 3, ABX500_ALT_A), ABX500_PINRANGE(54, 1, ABX500_DEFAULT), }; /* * Read the pin group names like this: * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function * * The groups are arranged as sets per altfunction column, so we can * mux in one group at a time by selecting the same altfunction for them * all. When functions require pins on different altfunctions, you need * to combine several groups. */ /* default column */ static const unsigned sysclkreq2_d_1_pins[] = { AB9540_PIN_R4 }; static const unsigned sysclkreq3_d_1_pins[] = { AB9540_PIN_V3 }; static const unsigned sysclkreq4_d_1_pins[] = { AB9540_PIN_T4 }; static const unsigned sysclkreq6_d_1_pins[] = { AB9540_PIN_T5 }; static const unsigned gpio10_d_1_pins[] = { AB9540_PIN_B18 }; static const unsigned gpio11_d_1_pins[] = { AB9540_PIN_C18 }; static const unsigned gpio13_d_1_pins[] = { AB9540_PIN_D18 }; static const unsigned pwmout1_d_1_pins[] = { AB9540_PIN_B19 }; static const unsigned pwmout2_d_1_pins[] = { AB9540_PIN_C19 }; static const unsigned pwmout3_d_1_pins[] = { AB9540_PIN_D19 }; /* audio data interface 1*/ static const unsigned adi1_d_1_pins[] = { AB9540_PIN_R3, AB9540_PIN_T2, AB9540_PIN_U2, AB9540_PIN_V2 }; /* USBUICC */ static const unsigned usbuicc_d_1_pins[] = { AB9540_PIN_N17, AB9540_PIN_N16, AB9540_PIN_M19 }; static const unsigned sysclkreq7_d_1_pins[] = { AB9540_PIN_T3 }; static const unsigned sysclkreq8_d_1_pins[] = { AB9540_PIN_W2 }; /* Digital microphone 1 and 2 */ static const unsigned dmic12_d_1_pins[] = { AB9540_PIN_H4, AB9540_PIN_F1 }; /* Digital microphone 3 and 4 */ static const unsigned dmic34_d_1_pins[] = { AB9540_PIN_F4, AB9540_PIN_F2 }; /* Digital microphone 5 and 6 */ static const unsigned dmic56_d_1_pins[] = { AB9540_PIN_E4, AB9540_PIN_F3 }; static const unsigned extcpena_d_1_pins[] = { AB9540_PIN_J13 }; /* modem SDA/SCL */ static const unsigned modsclsda_d_1_pins[] = { AB9540_PIN_L17, AB9540_PIN_L16 }; static const unsigned sysclkreq5_d_1_pins[] = { AB9540_PIN_W3 }; static const unsigned gpio50_d_1_pins[] = { AB9540_PIN_N4 }; static const unsigned batremn_d_1_pins[] = { AB9540_PIN_G12 }; static const unsigned resethw_d_1_pins[] = { AB9540_PIN_E17 }; static const unsigned service_d_1_pins[] = { AB9540_PIN_D11 }; static const unsigned gpio60_d_1_pins[] = { AB9540_PIN_M18 }; /* Altfunction A column */ static const unsigned gpio1_a_1_pins[] = { AB9540_PIN_R4 }; static const unsigned gpio2_a_1_pins[] = { AB9540_PIN_V3 }; static const unsigned gpio3_a_1_pins[] = { AB9540_PIN_T4 }; static const unsigned gpio4_a_1_pins[] = { AB9540_PIN_T5 }; static const unsigned hiqclkena_a_1_pins[] = { AB9540_PIN_B18 }; static const unsigned pdmclk_a_1_pins[] = { AB9540_PIN_C18 }; static const unsigned uartdata_a_1_pins[] = { AB9540_PIN_D18, AB9540_PIN_N4 }; static const unsigned gpio14_a_1_pins[] = { AB9540_PIN_B19 }; static const unsigned gpio15_a_1_pins[] = { AB9540_PIN_C19 }; static const unsigned gpio16_a_1_pins[] = { AB9540_PIN_D19 }; static const unsigned gpio17_a_1_pins[] = { AB9540_PIN_R3 }; static const unsigned gpio18_a_1_pins[] = { AB9540_PIN_T2 }; static const unsigned gpio19_a_1_pins[] = { AB9540_PIN_U2 }; static const unsigned gpio20_a_1_pins[] = { AB9540_PIN_V2 }; static const unsigned gpio21_a_1_pins[] = { AB9540_PIN_N17 }; static const unsigned gpio22_a_1_pins[] = { AB9540_PIN_N16 }; static const unsigned gpio23_a_1_pins[] = { AB9540_PIN_M19 }; static const unsigned gpio24_a_1_pins[] = { AB9540_PIN_T3 }; static const unsigned gpio25_a_1_pins[] = { AB9540_PIN_W2 }; static const unsigned gpio27_a_1_pins[] = { AB9540_PIN_H4 }; static const unsigned gpio28_a_1_pins[] = { AB9540_PIN_F1 }; static const unsigned gpio29_a_1_pins[] = { AB9540_PIN_F4 }; static const unsigned gpio30_a_1_pins[] = { AB9540_PIN_F2 }; static const unsigned gpio31_a_1_pins[] = { AB9540_PIN_E4 }; static const unsigned gpio32_a_1_pins[] = { AB9540_PIN_F3 }; static const unsigned gpio34_a_1_pins[] = { AB9540_PIN_J13 }; static const unsigned gpio40_a_1_pins[] = { AB9540_PIN_L17 }; static const unsigned gpio41_a_1_pins[] = { AB9540_PIN_L16 }; static const unsigned gpio42_a_1_pins[] = { AB9540_PIN_W3 }; static const unsigned gpio51_a_1_pins[] = { AB9540_PIN_G12 }; static const unsigned gpio52_a_1_pins[] = { AB9540_PIN_E17 }; static const unsigned gpio53_a_1_pins[] = { AB9540_PIN_D11 }; static const unsigned usbuiccpd_a_1_pins[] = { AB9540_PIN_M18 }; /* Altfunction B colum */ static const unsigned pdmdata_b_1_pins[] = { AB9540_PIN_B18 }; static const unsigned pwmextvibra1_b_1_pins[] = { AB9540_PIN_D18 }; static const unsigned pwmextvibra2_b_1_pins[] = { AB9540_PIN_N4 }; /* Altfunction C column */ static const unsigned usbvdat_c_1_pins[] = { AB9540_PIN_D18 }; #define AB9540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \ .npins = ARRAY_SIZE(a##_pins), .altsetting = b } static const struct abx500_pingroup ab9540_groups[] = { /* default column */ AB9540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(usbuicc_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(sysclkreq7_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(sysclkreq8_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(gpio50_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(resethw_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(service_d_1, ABX500_DEFAULT), AB9540_PIN_GROUP(gpio60_d_1, ABX500_DEFAULT), /* Altfunction A column */ AB9540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(hiqclkena_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(pdmclk_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(uartdata_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio21_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio22_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio23_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio24_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio25_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio34_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio40_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio41_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A), AB9540_PIN_GROUP(usbuiccpd_a_1, ABX500_ALT_A), /* Altfunction B column */ AB9540_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B), AB9540_PIN_GROUP(pwmextvibra1_b_1, ABX500_ALT_B), AB9540_PIN_GROUP(pwmextvibra2_b_1, ABX500_ALT_B), /* Altfunction C column */ AB9540_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C), }; /* We use this macro to define the groups applicable to a function */ #define AB9540_FUNC_GROUPS(a, b...) \ static const char * const a##_groups[] = { b }; AB9540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1", "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1", "sysclkreq7_d_1", "sysclkreq8_d_1"); AB9540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1", "gpio10_d_1", "gpio11_d_1", "gpio13_d_1", "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1", "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio21_a_1", "gpio22_a_1", "gpio23_a_1", "gpio24_a_1", "gpio25_a_1", "gpio27_a_1", "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1", "gpio32_a_1", "gpio34_a_1", "gpio40_a_1", "gpio41_a_1", "gpio42_a_1", "gpio50_d_1", "gpio51_a_1", "gpio52_a_1", "gpio53_a_1", "gpio60_d_1"); AB9540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1"); AB9540_FUNC_GROUPS(adi1, "adi1_d_1"); AB9540_FUNC_GROUPS(usbuicc, "usbuicc_d_1", "usbuiccpd_a_1"); AB9540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1"); AB9540_FUNC_GROUPS(extcpena, "extcpena_d_1"); AB9540_FUNC_GROUPS(modsclsda, "modsclsda_d_1"); AB9540_FUNC_GROUPS(batremn, "batremn_d_1"); AB9540_FUNC_GROUPS(resethw, "resethw_d_1"); AB9540_FUNC_GROUPS(service, "service_d_1"); AB9540_FUNC_GROUPS(hiqclkena, "hiqclkena_a_1"); AB9540_FUNC_GROUPS(pdm, "pdmdata_b_1", "pdmclk_a_1"); AB9540_FUNC_GROUPS(uartdata, "uartdata_a_1"); AB9540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_b_1", "pwmextvibra2_b_1"); AB9540_FUNC_GROUPS(usbvdat, "usbvdat_c_1"); #define FUNCTION(fname) \ { \ .name = #fname, \ .groups = fname##_groups, \ .ngroups = ARRAY_SIZE(fname##_groups), \ } static const struct abx500_function ab9540_functions[] = { FUNCTION(sysclkreq), FUNCTION(gpio), FUNCTION(pwmout), FUNCTION(adi1), FUNCTION(usbuicc), FUNCTION(dmic), FUNCTION(extcpena), FUNCTION(modsclsda), FUNCTION(batremn), FUNCTION(resethw), FUNCTION(service), FUNCTION(hiqclkena), FUNCTION(pdm), FUNCTION(uartdata), FUNCTION(pwmextvibra), FUNCTION(usbvdat), }; /* * this table translates what's is in the AB9540 specification regarding the * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C). * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1, * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val), * * example : * * ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2), * means that pin AB9540_PIN_D18 (pin 13) supports 4 mux (default/ALT_A, * ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to * select the mux. ALTA, ALTB and ALTC val indicates values to write in * ALTERNATFUNC register. We need to specifies these values as SOC * designers didn't apply the same logic on how to select mux in the * ABx500 family. * * As this pins supports at least ALT_B mux, default mux is * selected by writing 1 in GPIOSEL bit : * * | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3 * default | 1 | 0 | 0 * alt_A | 0 | 0 | 1 * alt_B | 0 | 0 | 0 * alt_C | 0 | 1 | 0 * * ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED), * means that pin AB9540_PIN_R4 (pin 1) supports 2 mux, so only GPIOSEL * register is used to select the mux. As this pins doesn't support at * least ALT_B mux, default mux is by writing 0 in GPIOSEL bit : * * | GPIOSEL bit=0 | alternatfunc bit2= | alternatfunc bit1= * default | 0 | 0 | 0 * alt_A | 1 | 0 | 0 */ static struct alternate_functions ab9540alternate_functions[AB9540_GPIO_MAX_NUMBER + 1] = { /* GPIOSEL1 - bits 4-7 are reserved */ ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */ ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */ ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/ ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/ ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */ ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */ ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */ ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */ /* GPIOSEL2 - bits 0 and 3 are reserved */ ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */ ALTERNATE_FUNCTIONS(10, 1, 0, UNUSED, 1, 0, 0), /* GPIO10, altA and altB controlled by bit 0 */ ALTERNATE_FUNCTIONS(11, 2, 1, UNUSED, 0, 0, 0), /* GPIO11, altA controlled by bit 1 */ ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */ ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */ ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */ ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */ ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */ /* GPIOSEL3 - bit 1-3 reserved * pins 17 to 20 are special case, only bit 0 is used to select * alternate function for these 4 pins. * bits 1 to 3 are reserved */ ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(18, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(19, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(20, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(21, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO21, altA controlled by bit 4 */ ALTERNATE_FUNCTIONS(22, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO22, altA controlled by bit 5 */ ALTERNATE_FUNCTIONS(23, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO23, altA controlled by bit 6 */ ALTERNATE_FUNCTIONS(24, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO24, altA controlled by bit 7 */ /* GPIOSEL4 - bit 1 reserved */ ALTERNATE_FUNCTIONS(25, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO25, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */ ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */ ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */ ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */ ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */ ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */ ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */ /* GPIOSEL5 - bit 0, 2-6 are reserved */ ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */ ALTERNATE_FUNCTIONS(34, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */ ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */ ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */ ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */ ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */ ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */ ALTERNATE_FUNCTIONS(40, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7 */ /* GPIOSEL6 - bit 2-7 are reserved */ ALTERNATE_FUNCTIONS(41, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */ ALTERNATE_FUNCTIONS(43, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO43 */ ALTERNATE_FUNCTIONS(44, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO44 */ ALTERNATE_FUNCTIONS(45, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO45 */ ALTERNATE_FUNCTIONS(46, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO46 */ ALTERNATE_FUNCTIONS(47, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO47 */ ALTERNATE_FUNCTIONS(48, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO48 */ /* * GPIOSEL7 - bit 0 and 6-7 are reserved * special case with GPIO60, wich is located at offset 5 of gpiosel7 * don't know why it has been called GPIO60 in AB9540 datasheet, * GPIO54 would be logical..., so at SOC point of view we consider * GPIO60 = GPIO54 */ ALTERNATE_FUNCTIONS(49, 0, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */ ALTERNATE_FUNCTIONS(50, 1, 2, UNUSED, 1, 0, 0), /* GPIO50, altA and altB controlled by bit 1 */ ALTERNATE_FUNCTIONS(51, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */ ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */ ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */ ALTERNATE_FUNCTIONS(54, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54 = GPIO60, altA controlled by bit 5 */ }; static struct abx500_gpio_irq_cluster ab9540_gpio_irq_cluster[] = { GPIO_IRQ_CLUSTER(10, 13, AB8500_INT_GPIO10R), GPIO_IRQ_CLUSTER(24, 25, AB8500_INT_GPIO24R), GPIO_IRQ_CLUSTER(40, 41, AB8500_INT_GPIO40R), GPIO_IRQ_CLUSTER(50, 54, AB9540_INT_GPIO50R), }; static struct abx500_pinctrl_soc_data ab9540_soc = { .gpio_ranges = ab9540_pinranges, .gpio_num_ranges = ARRAY_SIZE(ab9540_pinranges), .pins = ab9540_pins, .npins = ARRAY_SIZE(ab9540_pins), .functions = ab9540_functions, .nfunctions = ARRAY_SIZE(ab9540_functions), .groups = ab9540_groups, .ngroups = ARRAY_SIZE(ab9540_groups), .alternate_functions = ab9540alternate_functions, .gpio_irq_cluster = ab9540_gpio_irq_cluster, .ngpio_irq_cluster = ARRAY_SIZE(ab9540_gpio_irq_cluster), .irq_gpio_rising_offset = AB8500_INT_GPIO6R, .irq_gpio_falling_offset = AB8500_INT_GPIO6F, .irq_gpio_factor = 1, }; void abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc) { *soc = &ab9540_soc; }
gpl-2.0
friedrich420/Note-3-AEL-Kernel-SM-N9005_EUR_LL_Opensource_Update2.
drivers/staging/sep/sep_main.c
4778
126224
/* * * sep_main.c - Security Processor Driver main group of functions * * Copyright(c) 2009-2011 Intel Corporation. All rights reserved. * Contributions(c) 2009-2011 Discretix. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * CONTACTS: * * Mark Allyn mark.a.allyn@intel.com * Jayant Mangalampalli jayant.mangalampalli@intel.com * * CHANGES: * * 2009.06.26 Initial publish * 2010.09.14 Upgrade to Medfield * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c * 2011.02.22 Enable kernel crypto operation * * Please note that this driver is based on information in the Discretix * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System * Overview and Integration Guide. */ /* #define DEBUG */ /* #define SEP_PERF_DEBUG */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/kdev_t.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/wait.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/current.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/pagemap.h> #include <asm/cacheflush.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/async.h> #include <linux/crypto.h> #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include <crypto/sha.h> #include <crypto/md5.h> #include <crypto/aes.h> #include <crypto/des.h> #include <crypto/hash.h> #include "sep_driver_hw_defs.h" #include "sep_driver_config.h" #include "sep_driver_api.h" #include "sep_dev.h" #include "sep_crypto.h" #define CREATE_TRACE_POINTS #include "sep_trace_events.h" /* * Let's not spend cycles iterating over message * area contents if debugging not enabled */ #ifdef DEBUG #define sep_dump_message(sep) _sep_dump_message(sep) #else #define sep_dump_message(sep) #endif /** * Currenlty, there is only one SEP device per platform; * In event platforms in the future have more than one SEP * device, this will be a linked list */ struct sep_device *sep_dev; /** * sep_queue_status_remove - Removes transaction from status queue * @sep: SEP device * @sep_queue_info: pointer to status queue * * This function will removes information about transaction from the queue. */ void sep_queue_status_remove(struct sep_device *sep, struct sep_queue_info **queue_elem) { unsigned long lck_flags; dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n", current->pid); if (!queue_elem || !(*queue_elem)) { dev_dbg(&sep->pdev->dev, "PID%d %s null\n", current->pid, __func__); return; } spin_lock_irqsave(&sep->sep_queue_lock, lck_flags); list_del(&(*queue_elem)->list); sep->sep_queue_num--; spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); kfree(*queue_elem); *queue_elem = NULL; dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n", current->pid); return; } /** * sep_queue_status_add - Adds transaction to status queue * @sep: SEP device * @opcode: transaction opcode * @size: input data size * @pid: pid of current process * @name: current process name * @name_len: length of name (current process) * * This function adds information about about transaction started to the status * queue. */ struct sep_queue_info *sep_queue_status_add( struct sep_device *sep, u32 opcode, u32 size, u32 pid, u8 *name, size_t name_len) { unsigned long lck_flags; struct sep_queue_info *my_elem = NULL; my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL); if (!my_elem) return NULL; dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid); my_elem->data.opcode = opcode; my_elem->data.size = size; my_elem->data.pid = pid; if (name_len > TASK_COMM_LEN) name_len = TASK_COMM_LEN; memcpy(&my_elem->data.name, name, name_len); spin_lock_irqsave(&sep->sep_queue_lock, lck_flags); list_add_tail(&my_elem->list, &sep->sep_queue_status); sep->sep_queue_num++; spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); return my_elem; } /** * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables * @sep: SEP device * @dmatables_region: Destination pointer for the buffer * @dma_ctx: DMA context for the transaction * @table_count: Number of MLLI/DMA tables to create * The buffer created will not work as-is for DMA operations, * it needs to be copied over to the appropriate place in the * shared area. */ static int sep_allocate_dmatables_region(struct sep_device *sep, void **dmatables_region, struct sep_dma_context *dma_ctx, const u32 table_count) { const size_t new_len = SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1; void *tmp_region = NULL; dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n", current->pid, dma_ctx); dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n", current->pid, dmatables_region); if (!dma_ctx || !dmatables_region) { dev_warn(&sep->pdev->dev, "[PID%d] dma context/region uninitialized\n", current->pid); return -EINVAL; } dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n", current->pid, new_len); dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid, dma_ctx->dmatables_len); tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL); if (!tmp_region) { dev_warn(&sep->pdev->dev, "[PID%d] no mem for dma tables region\n", current->pid); return -ENOMEM; } /* Were there any previous tables that need to be preserved ? */ if (*dmatables_region) { memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len); kfree(*dmatables_region); *dmatables_region = NULL; } *dmatables_region = tmp_region; dma_ctx->dmatables_len += new_len; return 0; } /** * sep_wait_transaction - Used for synchronizing transactions * @sep: SEP device */ int sep_wait_transaction(struct sep_device *sep) { int error = 0; DEFINE_WAIT(wait); if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags)) { dev_dbg(&sep->pdev->dev, "[PID%d] no transactions, returning\n", current->pid); goto end_function_setpid; } /* * Looping needed even for exclusive waitq entries * due to process wakeup latencies, previous process * might have already created another transaction. */ for (;;) { /* * Exclusive waitq entry, so that only one process is * woken up from the queue at a time. */ prepare_to_wait_exclusive(&sep->event_transactions, &wait, TASK_INTERRUPTIBLE); if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags)) { dev_dbg(&sep->pdev->dev, "[PID%d] no transactions, breaking\n", current->pid); break; } dev_dbg(&sep->pdev->dev, "[PID%d] transactions ongoing, sleeping\n", current->pid); schedule(); dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid); if (signal_pending(current)) { dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n", current->pid); error = -EINTR; goto end_function; } } end_function_setpid: /* * The pid_doing_transaction indicates that this process * now owns the facilities to performa a transaction with * the SEP. While this process is performing a transaction, * no other process who has the SEP device open can perform * any transactions. This method allows more than one process * to have the device open at any given time, which provides * finer granularity for device utilization by multiple * processes. */ /* Only one process is able to progress here at a time */ sep->pid_doing_transaction = current->pid; end_function: finish_wait(&sep->event_transactions, &wait); return error; } /** * sep_check_transaction_owner - Checks if current process owns transaction * @sep: SEP device */ static inline int sep_check_transaction_owner(struct sep_device *sep) { dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n", current->pid, sep->pid_doing_transaction); if ((sep->pid_doing_transaction == 0) || (current->pid != sep->pid_doing_transaction)) { return -EACCES; } /* We own the transaction */ return 0; } #ifdef DEBUG /** * sep_dump_message - dump the message that is pending * @sep: SEP device * This will only print dump if DEBUG is set; it does * follow kernel debug print enabling */ static void _sep_dump_message(struct sep_device *sep) { int count; u32 *p = sep->shared_addr; for (count = 0; count < 10 * 4; count += 4) dev_dbg(&sep->pdev->dev, "[PID%d] Word %d of the message is %x\n", current->pid, count/4, *p++); } #endif /** * sep_map_and_alloc_shared_area -allocate shared block * @sep: security processor * @size: size of shared area */ static int sep_map_and_alloc_shared_area(struct sep_device *sep) { sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, sep->shared_size, &sep->shared_bus, GFP_KERNEL); if (!sep->shared_addr) { dev_dbg(&sep->pdev->dev, "[PID%d] shared memory dma_alloc_coherent failed\n", current->pid); return -ENOMEM; } dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n", current->pid, sep->shared_size, sep->shared_addr, (unsigned long long)sep->shared_bus); return 0; } /** * sep_unmap_and_free_shared_area - free shared block * @sep: security processor */ static void sep_unmap_and_free_shared_area(struct sep_device *sep) { dma_free_coherent(&sep->pdev->dev, sep->shared_size, sep->shared_addr, sep->shared_bus); } #ifdef DEBUG /** * sep_shared_bus_to_virt - convert bus/virt addresses * @sep: pointer to struct sep_device * @bus_address: address to convert * * Returns virtual address inside the shared area according * to the bus address. */ static void *sep_shared_bus_to_virt(struct sep_device *sep, dma_addr_t bus_address) { return sep->shared_addr + (bus_address - sep->shared_bus); } #endif /** * sep_open - device open method * @inode: inode of SEP device * @filp: file handle to SEP device * * Open method for the SEP device. Called when userspace opens * the SEP device node. * * Returns zero on success otherwise an error code. */ static int sep_open(struct inode *inode, struct file *filp) { struct sep_device *sep; struct sep_private_data *priv; dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid); if (filp->f_flags & O_NONBLOCK) return -ENOTSUPP; /* * Get the SEP device structure and use it for the * private_data field in filp for other methods */ priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; sep = sep_dev; priv->device = sep; filp->private_data = priv; dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n", current->pid, priv); /* Anyone can open; locking takes place at transaction level */ return 0; } /** * sep_free_dma_table_data_handler - free DMA table * @sep: pointere to struct sep_device * @dma_ctx: dma context * * Handles the request to free DMA table for synchronic actions */ int sep_free_dma_table_data_handler(struct sep_device *sep, struct sep_dma_context **dma_ctx) { int count; int dcb_counter; /* Pointer to the current dma_resource struct */ struct sep_dma_resource *dma; dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_table_data_handler\n", current->pid); if (!dma_ctx || !(*dma_ctx)) { /* No context or context already freed */ dev_dbg(&sep->pdev->dev, "[PID%d] no DMA context or context already freed\n", current->pid); return 0; } dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n", current->pid, (*dma_ctx)->nr_dcb_creat); for (dcb_counter = 0; dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) { dma = &(*dma_ctx)->dma_res_arr[dcb_counter]; /* Unmap and free input map array */ if (dma->in_map_array) { for (count = 0; count < dma->in_num_pages; count++) { dma_unmap_page(&sep->pdev->dev, dma->in_map_array[count].dma_addr, dma->in_map_array[count].size, DMA_TO_DEVICE); } kfree(dma->in_map_array); } /** * Output is handled different. If * this was a secure dma into restricted memory, * then we skip this step altogether as restricted * memory is not available to the o/s at all. */ if (((*dma_ctx)->secure_dma == false) && (dma->out_map_array)) { for (count = 0; count < dma->out_num_pages; count++) { dma_unmap_page(&sep->pdev->dev, dma->out_map_array[count].dma_addr, dma->out_map_array[count].size, DMA_FROM_DEVICE); } kfree(dma->out_map_array); } /* Free page cache for output */ if (dma->in_page_array) { for (count = 0; count < dma->in_num_pages; count++) { flush_dcache_page(dma->in_page_array[count]); page_cache_release(dma->in_page_array[count]); } kfree(dma->in_page_array); } /* Again, we do this only for non secure dma */ if (((*dma_ctx)->secure_dma == false) && (dma->out_page_array)) { for (count = 0; count < dma->out_num_pages; count++) { if (!PageReserved(dma->out_page_array[count])) SetPageDirty(dma-> out_page_array[count]); flush_dcache_page(dma->out_page_array[count]); page_cache_release(dma->out_page_array[count]); } kfree(dma->out_page_array); } /** * Note that here we use in_map_num_entries because we * don't have a page array; the page array is generated * only in the lock_user_pages, which is not called * for kernel crypto, which is what the sg (scatter gather * is used for exclusively */ if (dma->src_sg) { dma_unmap_sg(&sep->pdev->dev, dma->src_sg, dma->in_map_num_entries, DMA_TO_DEVICE); dma->src_sg = NULL; } if (dma->dst_sg) { dma_unmap_sg(&sep->pdev->dev, dma->dst_sg, dma->in_map_num_entries, DMA_FROM_DEVICE); dma->dst_sg = NULL; } /* Reset all the values */ dma->in_page_array = NULL; dma->out_page_array = NULL; dma->in_num_pages = 0; dma->out_num_pages = 0; dma->in_map_array = NULL; dma->out_map_array = NULL; dma->in_map_num_entries = 0; dma->out_map_num_entries = 0; } (*dma_ctx)->nr_dcb_creat = 0; (*dma_ctx)->num_lli_tables_created = 0; kfree(*dma_ctx); *dma_ctx = NULL; dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_table_data_handler end\n", current->pid); return 0; } /** * sep_end_transaction_handler - end transaction * @sep: pointer to struct sep_device * @dma_ctx: DMA context * @call_status: Call status * * This API handles the end transaction request. */ static int sep_end_transaction_handler(struct sep_device *sep, struct sep_dma_context **dma_ctx, struct sep_call_status *call_status, struct sep_queue_info **my_queue_elem) { dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid); /* * Extraneous transaction clearing would mess up PM * device usage counters and SEP would get suspended * just before we send a command to SEP in the next * transaction * */ if (sep_check_transaction_owner(sep)) { dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n", current->pid); return 0; } /* Update queue status */ sep_queue_status_remove(sep, my_queue_elem); /* Check that all the DMA resources were freed */ if (dma_ctx) sep_free_dma_table_data_handler(sep, dma_ctx); /* Reset call status for next transaction */ if (call_status) call_status->status = 0; /* Clear the message area to avoid next transaction reading * sensitive results from previous transaction */ memset(sep->shared_addr, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); /* start suspend delay */ #ifdef SEP_ENABLE_RUNTIME_PM if (sep->in_use) { sep->in_use = 0; pm_runtime_mark_last_busy(&sep->pdev->dev); pm_runtime_put_autosuspend(&sep->pdev->dev); } #endif clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags); sep->pid_doing_transaction = 0; /* Now it's safe for next process to proceed */ dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n", current->pid); clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags); wake_up(&sep->event_transactions); return 0; } /** * sep_release - close a SEP device * @inode: inode of SEP device * @filp: file handle being closed * * Called on the final close of a SEP device. */ static int sep_release(struct inode *inode, struct file *filp) { struct sep_private_data * const private_data = filp->private_data; struct sep_call_status *call_status = &private_data->call_status; struct sep_device *sep = private_data->device; struct sep_dma_context **dma_ctx = &private_data->dma_ctx; struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid); sep_end_transaction_handler(sep, dma_ctx, call_status, my_queue_elem); kfree(filp->private_data); return 0; } /** * sep_mmap - maps the shared area to user space * @filp: pointer to struct file * @vma: pointer to vm_area_struct * * Called on an mmap of our space via the normal SEP device */ static int sep_mmap(struct file *filp, struct vm_area_struct *vma) { struct sep_private_data * const private_data = filp->private_data; struct sep_call_status *call_status = &private_data->call_status; struct sep_device *sep = private_data->device; struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; dma_addr_t bus_addr; unsigned long error = 0; dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid); /* Set the transaction busy (own the device) */ /* * Problem for multithreaded applications is that here we're * possibly going to sleep while holding a write lock on * current->mm->mmap_sem, which will cause deadlock for ongoing * transaction trying to create DMA tables */ error = sep_wait_transaction(sep); if (error) /* Interrupted by signal, don't clear transaction */ goto end_function; /* Clear the message area to avoid next transaction reading * sensitive results from previous transaction */ memset(sep->shared_addr, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); /* * Check that the size of the mapped range is as the size of the message * shared area */ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { error = -EINVAL; goto end_function_with_error; } dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n", current->pid, sep->shared_addr); /* Get bus address */ bus_addr = sep->shared_bus; if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n", current->pid); error = -EAGAIN; goto end_function_with_error; } /* Update call status */ set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status); goto end_function; end_function_with_error: /* Clear our transaction */ sep_end_transaction_handler(sep, NULL, call_status, my_queue_elem); end_function: return error; } /** * sep_poll - poll handler * @filp: pointer to struct file * @wait: pointer to poll_table * * Called by the OS when the kernel is asked to do a poll on * a SEP file handle. */ static unsigned int sep_poll(struct file *filp, poll_table *wait) { struct sep_private_data * const private_data = filp->private_data; struct sep_call_status *call_status = &private_data->call_status; struct sep_device *sep = private_data->device; u32 mask = 0; u32 retval = 0; u32 retval2 = 0; unsigned long lock_irq_flag; /* Am I the process that owns the transaction? */ if (sep_check_transaction_owner(sep)) { dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n", current->pid); mask = POLLERR; goto end_function; } /* Check if send command or send_reply were activated previously */ if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &call_status->status)) { dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n", current->pid); mask = POLLERR; goto end_function; } /* Add the event to the polling wait table */ dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n", current->pid); poll_wait(filp, &sep->event_interrupt, wait); dev_dbg(&sep->pdev->dev, "[PID%d] poll: send_ct is %lx reply ct is %lx\n", current->pid, sep->send_ct, sep->reply_ct); /* Check if error occured during poll */ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); if ((retval2 != 0x0) && (retval2 != 0x8)) { dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n", current->pid, retval2); mask |= POLLERR; goto end_function; } spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag); if (sep->send_ct == sep->reply_ct) { spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); dev_dbg(&sep->pdev->dev, "[PID%d] poll: data ready check (GPR2) %x\n", current->pid, retval); /* Check if printf request */ if ((retval >> 30) & 0x1) { dev_dbg(&sep->pdev->dev, "[PID%d] poll: SEP printf request\n", current->pid); goto end_function; } /* Check if the this is SEP reply or request */ if (retval >> 31) { dev_dbg(&sep->pdev->dev, "[PID%d] poll: SEP request\n", current->pid); } else { dev_dbg(&sep->pdev->dev, "[PID%d] poll: normal return\n", current->pid); sep_dump_message(sep); dev_dbg(&sep->pdev->dev, "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n", current->pid); mask |= POLLIN | POLLRDNORM; } set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status); } else { spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); dev_dbg(&sep->pdev->dev, "[PID%d] poll; no reply; returning mask of 0\n", current->pid); mask = 0; } end_function: return mask; } /** * sep_time_address - address in SEP memory of time * @sep: SEP device we want the address from * * Return the address of the two dwords in memory used for time * setting. */ static u32 *sep_time_address(struct sep_device *sep) { return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES; } /** * sep_set_time - set the SEP time * @sep: the SEP we are setting the time for * * Calculates time and sets it at the predefined address. * Called with the SEP mutex held. */ static unsigned long sep_set_time(struct sep_device *sep) { struct timeval time; u32 *time_addr; /* Address of time as seen by the kernel */ do_gettimeofday(&time); /* Set value in the SYSTEM MEMORY offset */ time_addr = sep_time_address(sep); time_addr[0] = SEP_TIME_VAL_TOKEN; time_addr[1] = time.tv_sec; dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n", current->pid, time.tv_sec); dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n", current->pid, time_addr); dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n", current->pid, sep->shared_addr); return time.tv_sec; } /** * sep_send_command_handler - kick off a command * @sep: SEP being signalled * * This function raises interrupt to SEP that signals that is has a new * command from the host * * Note that this function does fall under the ioctl lock */ int sep_send_command_handler(struct sep_device *sep) { unsigned long lock_irq_flag; u32 *msg_pool; int error = 0; /* Basic sanity check; set msg pool to start of shared area */ msg_pool = (u32 *)sep->shared_addr; msg_pool += 2; /* Look for start msg token */ if (*msg_pool != SEP_START_MSG_TOKEN) { dev_warn(&sep->pdev->dev, "start message token not present\n"); error = -EPROTO; goto end_function; } /* Do we have a reasonable size? */ msg_pool += 1; if ((*msg_pool < 2) || (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) { dev_warn(&sep->pdev->dev, "invalid message size\n"); error = -EPROTO; goto end_function; } /* Does the command look reasonable? */ msg_pool += 1; if (*msg_pool < 2) { dev_warn(&sep->pdev->dev, "invalid message opcode\n"); error = -EPROTO; goto end_function; } #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM) dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n", current->pid, sep->pdev->dev.power.runtime_status); sep->in_use = 1; /* device is about to be used */ pm_runtime_get_sync(&sep->pdev->dev); #endif if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) { error = -EPROTO; goto end_function; } sep->in_use = 1; /* device is about to be used */ sep_set_time(sep); sep_dump_message(sep); /* Update counter */ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag); sep->send_ct++; spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); dev_dbg(&sep->pdev->dev, "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n", current->pid, sep->send_ct, sep->reply_ct); /* Send interrupt to SEP */ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); end_function: return error; } /** * sep_crypto_dma - * @sep: pointer to struct sep_device * @sg: pointer to struct scatterlist * @direction: * @dma_maps: pointer to place a pointer to array of dma maps * This is filled in; anything previous there will be lost * The structure for dma maps is sep_dma_map * @returns number of dma maps on success; negative on error * * This creates the dma table from the scatterlist * It is used only for kernel crypto as it works with scatterlists * representation of data buffers * */ static int sep_crypto_dma( struct sep_device *sep, struct scatterlist *sg, struct sep_dma_map **dma_maps, enum dma_data_direction direction) { struct scatterlist *temp_sg; u32 count_segment; u32 count_mapped; struct sep_dma_map *sep_dma; int ct1; if (sg->length == 0) return 0; /* Count the segments */ temp_sg = sg; count_segment = 0; while (temp_sg) { count_segment += 1; temp_sg = scatterwalk_sg_next(temp_sg); } dev_dbg(&sep->pdev->dev, "There are (hex) %x segments in sg\n", count_segment); /* DMA map segments */ count_mapped = dma_map_sg(&sep->pdev->dev, sg, count_segment, direction); dev_dbg(&sep->pdev->dev, "There are (hex) %x maps in sg\n", count_mapped); if (count_mapped == 0) { dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n"); return -ENOMEM; } sep_dma = kmalloc(sizeof(struct sep_dma_map) * count_mapped, GFP_ATOMIC); if (sep_dma == NULL) { dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n"); return -ENOMEM; } for_each_sg(sg, temp_sg, count_mapped, ct1) { sep_dma[ct1].dma_addr = sg_dma_address(temp_sg); sep_dma[ct1].size = sg_dma_len(temp_sg); dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n", ct1, (unsigned long)sep_dma[ct1].dma_addr, (unsigned long)sep_dma[ct1].size); } *dma_maps = sep_dma; return count_mapped; } /** * sep_crypto_lli - * @sep: pointer to struct sep_device * @sg: pointer to struct scatterlist * @data_size: total data size * @direction: * @dma_maps: pointer to place a pointer to array of dma maps * This is filled in; anything previous there will be lost * The structure for dma maps is sep_dma_map * @lli_maps: pointer to place a pointer to array of lli maps * This is filled in; anything previous there will be lost * The structure for dma maps is sep_dma_map * @returns number of dma maps on success; negative on error * * This creates the LLI table from the scatterlist * It is only used for kernel crypto as it works exclusively * with scatterlists (struct scatterlist) representation of * data buffers */ static int sep_crypto_lli( struct sep_device *sep, struct scatterlist *sg, struct sep_dma_map **maps, struct sep_lli_entry **llis, u32 data_size, enum dma_data_direction direction) { int ct1; struct sep_lli_entry *sep_lli; struct sep_dma_map *sep_map; int nbr_ents; nbr_ents = sep_crypto_dma(sep, sg, maps, direction); if (nbr_ents <= 0) { dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n", nbr_ents); return nbr_ents; } sep_map = *maps; sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC); if (sep_lli == NULL) { dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n"); kfree(*maps); *maps = NULL; return -ENOMEM; } for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) { sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr; /* Maximum for page is total data size */ if (sep_map[ct1].size > data_size) sep_map[ct1].size = data_size; sep_lli[ct1].block_size = (u32)sep_map[ct1].size; } *llis = sep_lli; return nbr_ents; } /** * sep_lock_kernel_pages - map kernel pages for DMA * @sep: pointer to struct sep_device * @kernel_virt_addr: address of data buffer in kernel * @data_size: size of data * @lli_array_ptr: lli array * @in_out_flag: input into device or output from device * * This function locks all the physical pages of the kernel virtual buffer * and construct a basic lli array, where each entry holds the physical * page address and the size that application data holds in this page * This function is used only during kernel crypto mod calls from within * the kernel (when ioctl is not used) * * This is used only for kernel crypto. Kernel pages * are handled differently as they are done via * scatter gather lists (struct scatterlist) */ static int sep_lock_kernel_pages(struct sep_device *sep, unsigned long kernel_virt_addr, u32 data_size, struct sep_lli_entry **lli_array_ptr, int in_out_flag, struct sep_dma_context *dma_ctx) { u32 num_pages; struct scatterlist *sg; /* Array of lli */ struct sep_lli_entry *lli_array; /* Map array */ struct sep_dma_map *map_array; enum dma_data_direction direction; lli_array = NULL; map_array = NULL; if (in_out_flag == SEP_DRIVER_IN_FLAG) { direction = DMA_TO_DEVICE; sg = dma_ctx->src_sg; } else { direction = DMA_FROM_DEVICE; sg = dma_ctx->dst_sg; } num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array, data_size, direction); if (num_pages <= 0) { dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n", num_pages); return -ENOMEM; } /* Put mapped kernel sg into kernel resource array */ /* Set output params acording to the in_out flag */ if (in_out_flag == SEP_DRIVER_IN_FLAG) { *lli_array_ptr = lli_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = map_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = dma_ctx->src_sg; } else { *lli_array_ptr = lli_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = map_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. out_map_num_entries = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = dma_ctx->dst_sg; } return 0; } /** * sep_lock_user_pages - lock and map user pages for DMA * @sep: pointer to struct sep_device * @app_virt_addr: user memory data buffer * @data_size: size of data buffer * @lli_array_ptr: lli array * @in_out_flag: input or output to device * * This function locks all the physical pages of the application * virtual buffer and construct a basic lli array, where each entry * holds the physical page address and the size that application * data holds in this physical pages */ static int sep_lock_user_pages(struct sep_device *sep, u32 app_virt_addr, u32 data_size, struct sep_lli_entry **lli_array_ptr, int in_out_flag, struct sep_dma_context *dma_ctx) { int error = 0; u32 count; int result; /* The the page of the end address of the user space buffer */ u32 end_page; /* The page of the start address of the user space buffer */ u32 start_page; /* The range in pages */ u32 num_pages; /* Array of pointers to page */ struct page **page_array; /* Array of lli */ struct sep_lli_entry *lli_array; /* Map array */ struct sep_dma_map *map_array; /* Set start and end pages and num pages */ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT; start_page = app_virt_addr >> PAGE_SHIFT; num_pages = end_page - start_page + 1; dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages app_virt_addr is %x\n", current->pid, app_virt_addr); dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n", current->pid, data_size); dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n", current->pid, start_page); dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n", current->pid, end_page); dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n", current->pid, num_pages); /* Allocate array of pages structure pointers */ page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC); if (!page_array) { error = -ENOMEM; goto end_function; } map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC); if (!map_array) { dev_warn(&sep->pdev->dev, "[PID%d] kmalloc for map_array failed\n", current->pid); error = -ENOMEM; goto end_function_with_error1; } lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages, GFP_ATOMIC); if (!lli_array) { dev_warn(&sep->pdev->dev, "[PID%d] kmalloc for lli_array failed\n", current->pid); error = -ENOMEM; goto end_function_with_error2; } /* Convert the application virtual address into a set of physical */ down_read(&current->mm->mmap_sem); result = get_user_pages(current, current->mm, app_virt_addr, num_pages, ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), 0, page_array, NULL); up_read(&current->mm->mmap_sem); /* Check the number of pages locked - if not all then exit with error */ if (result != num_pages) { dev_warn(&sep->pdev->dev, "[PID%d] not all pages locked by get_user_pages, " "result 0x%X, num_pages 0x%X\n", current->pid, result, num_pages); error = -ENOMEM; goto end_function_with_error3; } dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n", current->pid); /* * Fill the array using page array data and * map the pages - this action will also flush the cache as needed */ for (count = 0; count < num_pages; count++) { /* Fill the map array */ map_array[count].dma_addr = dma_map_page(&sep->pdev->dev, page_array[count], 0, PAGE_SIZE, DMA_BIDIRECTIONAL); map_array[count].size = PAGE_SIZE; /* Fill the lli array entry */ lli_array[count].bus_address = (u32)map_array[count].dma_addr; lli_array[count].block_size = PAGE_SIZE; dev_dbg(&sep->pdev->dev, "[PID%d] lli_array[%x].bus_address is %08lx, " "lli_array[%x].block_size is (hex) %x\n", current->pid, count, (unsigned long)lli_array[count].bus_address, count, lli_array[count].block_size); } /* Check the offset for the first page */ lli_array[0].bus_address = lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK)); /* Check that not all the data is in the first page only */ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size) lli_array[0].block_size = data_size; else lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK)); dev_dbg(&sep->pdev->dev, "[PID%d] After check if page 0 has all data\n", current->pid); dev_dbg(&sep->pdev->dev, "[PID%d] lli_array[0].bus_address is (hex) %08lx, " "lli_array[0].block_size is (hex) %x\n", current->pid, (unsigned long)lli_array[0].bus_address, lli_array[0].block_size); /* Check the size of the last page */ if (num_pages > 1) { lli_array[num_pages - 1].block_size = (app_virt_addr + data_size) & (~PAGE_MASK); if (lli_array[num_pages - 1].block_size == 0) lli_array[num_pages - 1].block_size = PAGE_SIZE; dev_dbg(&sep->pdev->dev, "[PID%d] After last page size adjustment\n", current->pid); dev_dbg(&sep->pdev->dev, "[PID%d] lli_array[%x].bus_address is (hex) %08lx, " "lli_array[%x].block_size is (hex) %x\n", current->pid, num_pages - 1, (unsigned long)lli_array[num_pages - 1].bus_address, num_pages - 1, lli_array[num_pages - 1].block_size); } /* Set output params acording to the in_out flag */ if (in_out_flag == SEP_DRIVER_IN_FLAG) { *lli_array_ptr = lli_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = page_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = map_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL; } else { *lli_array_ptr = lli_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = page_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = map_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. out_map_num_entries = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL; } goto end_function; end_function_with_error3: /* Free lli array */ kfree(lli_array); end_function_with_error2: kfree(map_array); end_function_with_error1: /* Free page array */ kfree(page_array); end_function: return error; } /** * sep_lli_table_secure_dma - get lli array for IMR addresses * @sep: pointer to struct sep_device * @app_virt_addr: user memory data buffer * @data_size: size of data buffer * @lli_array_ptr: lli array * @in_out_flag: not used * @dma_ctx: pointer to struct sep_dma_context * * This function creates lli tables for outputting data to * IMR memory, which is memory that cannot be accessed by the * the x86 processor. */ static int sep_lli_table_secure_dma(struct sep_device *sep, u32 app_virt_addr, u32 data_size, struct sep_lli_entry **lli_array_ptr, int in_out_flag, struct sep_dma_context *dma_ctx) { int error = 0; u32 count; /* The the page of the end address of the user space buffer */ u32 end_page; /* The page of the start address of the user space buffer */ u32 start_page; /* The range in pages */ u32 num_pages; /* Array of lli */ struct sep_lli_entry *lli_array; /* Set start and end pages and num pages */ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT; start_page = app_virt_addr >> PAGE_SHIFT; num_pages = end_page - start_page + 1; dev_dbg(&sep->pdev->dev, "[PID%d] lock user pages" " app_virt_addr is %x\n", current->pid, app_virt_addr); dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n", current->pid, data_size); dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n", current->pid, start_page); dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n", current->pid, end_page); dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n", current->pid, num_pages); lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages, GFP_ATOMIC); if (!lli_array) { dev_warn(&sep->pdev->dev, "[PID%d] kmalloc for lli_array failed\n", current->pid); return -ENOMEM; } /* * Fill the lli_array */ start_page = start_page << PAGE_SHIFT; for (count = 0; count < num_pages; count++) { /* Fill the lli array entry */ lli_array[count].bus_address = start_page; lli_array[count].block_size = PAGE_SIZE; start_page += PAGE_SIZE; dev_dbg(&sep->pdev->dev, "[PID%d] lli_array[%x].bus_address is %08lx, " "lli_array[%x].block_size is (hex) %x\n", current->pid, count, (unsigned long)lli_array[count].bus_address, count, lli_array[count].block_size); } /* Check the offset for the first page */ lli_array[0].bus_address = lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK)); /* Check that not all the data is in the first page only */ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size) lli_array[0].block_size = data_size; else lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK)); dev_dbg(&sep->pdev->dev, "[PID%d] After check if page 0 has all data\n" "lli_array[0].bus_address is (hex) %08lx, " "lli_array[0].block_size is (hex) %x\n", current->pid, (unsigned long)lli_array[0].bus_address, lli_array[0].block_size); /* Check the size of the last page */ if (num_pages > 1) { lli_array[num_pages - 1].block_size = (app_virt_addr + data_size) & (~PAGE_MASK); if (lli_array[num_pages - 1].block_size == 0) lli_array[num_pages - 1].block_size = PAGE_SIZE; dev_dbg(&sep->pdev->dev, "[PID%d] After last page size adjustment\n" "lli_array[%x].bus_address is (hex) %08lx, " "lli_array[%x].block_size is (hex) %x\n", current->pid, num_pages - 1, (unsigned long)lli_array[num_pages - 1].bus_address, num_pages - 1, lli_array[num_pages - 1].block_size); } *lli_array_ptr = lli_array; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0; return error; } /** * sep_calculate_lli_table_max_size - size the LLI table * @sep: pointer to struct sep_device * @lli_in_array_ptr * @num_array_entries * @last_table_flag * * This function calculates the size of data that can be inserted into * the lli table from this array, such that either the table is full * (all entries are entered), or there are no more entries in the * lli array */ static u32 sep_calculate_lli_table_max_size(struct sep_device *sep, struct sep_lli_entry *lli_in_array_ptr, u32 num_array_entries, u32 *last_table_flag) { u32 counter; /* Table data size */ u32 table_data_size = 0; /* Data size for the next table */ u32 next_table_data_size; *last_table_flag = 0; /* * Calculate the data in the out lli table till we fill the whole * table or till the data has ended */ for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++) table_data_size += lli_in_array_ptr[counter].block_size; /* * Check if we reached the last entry, * meaning this ia the last table to build, * and no need to check the block alignment */ if (counter == num_array_entries) { /* Set the last table flag */ *last_table_flag = 1; goto end_function; } /* * Calculate the data size of the next table. * Stop if no entries left or if data size is more the DMA restriction */ next_table_data_size = 0; for (; counter < num_array_entries; counter++) { next_table_data_size += lli_in_array_ptr[counter].block_size; if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) break; } /* * Check if the next table data size is less then DMA rstriction. * if it is - recalculate the current table size, so that the next * table data size will be adaquete for DMA */ if (next_table_data_size && next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE - next_table_data_size); end_function: return table_data_size; } /** * sep_build_lli_table - build an lli array for the given table * @sep: pointer to struct sep_device * @lli_array_ptr: pointer to lli array * @lli_table_ptr: pointer to lli table * @num_processed_entries_ptr: pointer to number of entries * @num_table_entries_ptr: pointer to number of tables * @table_data_size: total data size * * Builds ant lli table from the lli_array according to * the given size of data */ static void sep_build_lli_table(struct sep_device *sep, struct sep_lli_entry *lli_array_ptr, struct sep_lli_entry *lli_table_ptr, u32 *num_processed_entries_ptr, u32 *num_table_entries_ptr, u32 table_data_size) { /* Current table data size */ u32 curr_table_data_size; /* Counter of lli array entry */ u32 array_counter; /* Init current table data size and lli array entry counter */ curr_table_data_size = 0; array_counter = 0; *num_table_entries_ptr = 1; dev_dbg(&sep->pdev->dev, "[PID%d] build lli table table_data_size: (hex) %x\n", current->pid, table_data_size); /* Fill the table till table size reaches the needed amount */ while (curr_table_data_size < table_data_size) { /* Update the number of entries in table */ (*num_table_entries_ptr)++; lli_table_ptr->bus_address = cpu_to_le32(lli_array_ptr[array_counter].bus_address); lli_table_ptr->block_size = cpu_to_le32(lli_array_ptr[array_counter].block_size); curr_table_data_size += lli_array_ptr[array_counter].block_size; dev_dbg(&sep->pdev->dev, "[PID%d] lli_table_ptr is %p\n", current->pid, lli_table_ptr); dev_dbg(&sep->pdev->dev, "[PID%d] lli_table_ptr->bus_address: %08lx\n", current->pid, (unsigned long)lli_table_ptr->bus_address); dev_dbg(&sep->pdev->dev, "[PID%d] lli_table_ptr->block_size is (hex) %x\n", current->pid, lli_table_ptr->block_size); /* Check for overflow of the table data */ if (curr_table_data_size > table_data_size) { dev_dbg(&sep->pdev->dev, "[PID%d] curr_table_data_size too large\n", current->pid); /* Update the size of block in the table */ lli_table_ptr->block_size = cpu_to_le32(lli_table_ptr->block_size) - (curr_table_data_size - table_data_size); /* Update the physical address in the lli array */ lli_array_ptr[array_counter].bus_address += cpu_to_le32(lli_table_ptr->block_size); /* Update the block size left in the lli array */ lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size); } else /* Advance to the next entry in the lli_array */ array_counter++; dev_dbg(&sep->pdev->dev, "[PID%d] lli_table_ptr->bus_address is %08lx\n", current->pid, (unsigned long)lli_table_ptr->bus_address); dev_dbg(&sep->pdev->dev, "[PID%d] lli_table_ptr->block_size is (hex) %x\n", current->pid, lli_table_ptr->block_size); /* Move to the next entry in table */ lli_table_ptr++; } /* Set the info entry to default */ lli_table_ptr->bus_address = 0xffffffff; lli_table_ptr->block_size = 0; /* Set the output parameter */ *num_processed_entries_ptr += array_counter; } /** * sep_shared_area_virt_to_bus - map shared area to bus address * @sep: pointer to struct sep_device * @virt_address: virtual address to convert * * This functions returns the physical address inside shared area according * to the virtual address. It can be either on the externa RAM device * (ioremapped), or on the system RAM * This implementation is for the external RAM */ static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep, void *virt_address) { dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n", current->pid, virt_address); dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n", current->pid, (unsigned long) sep->shared_bus + (virt_address - sep->shared_addr)); return sep->shared_bus + (size_t)(virt_address - sep->shared_addr); } /** * sep_shared_area_bus_to_virt - map shared area bus address to kernel * @sep: pointer to struct sep_device * @bus_address: bus address to convert * * This functions returns the virtual address inside shared area * according to the physical address. It can be either on the * externa RAM device (ioremapped), or on the system RAM * This implementation is for the external RAM */ static void *sep_shared_area_bus_to_virt(struct sep_device *sep, dma_addr_t bus_address) { dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n", current->pid, (unsigned long)bus_address, (unsigned long)(sep->shared_addr + (size_t)(bus_address - sep->shared_bus))); return sep->shared_addr + (size_t)(bus_address - sep->shared_bus); } /** * sep_debug_print_lli_tables - dump LLI table * @sep: pointer to struct sep_device * @lli_table_ptr: pointer to sep_lli_entry * @num_table_entries: number of entries * @table_data_size: total data size * * Walk the the list of the print created tables and print all the data */ static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size) { #ifdef DEBUG unsigned long table_count = 1; unsigned long entries_count = 0; dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n", current->pid); if (num_table_entries == 0) { dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n", current->pid); return; } while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) { dev_dbg(&sep->pdev->dev, "[PID%d] lli table %08lx, " "table_data_size is (hex) %lx\n", current->pid, table_count, table_data_size); dev_dbg(&sep->pdev->dev, "[PID%d] num_table_entries is (hex) %lx\n", current->pid, num_table_entries); /* Print entries of the table (without info entry) */ for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) { dev_dbg(&sep->pdev->dev, "[PID%d] lli_table_ptr address is %08lx\n", current->pid, (unsigned long) lli_table_ptr); dev_dbg(&sep->pdev->dev, "[PID%d] phys address is %08lx " "block size is (hex) %x\n", current->pid, (unsigned long)lli_table_ptr->bus_address, lli_table_ptr->block_size); } /* Point to the info entry */ lli_table_ptr--; dev_dbg(&sep->pdev->dev, "[PID%d] phys lli_table_ptr->block_size " "is (hex) %x\n", current->pid, lli_table_ptr->block_size); dev_dbg(&sep->pdev->dev, "[PID%d] phys lli_table_ptr->physical_address " "is %08lx\n", current->pid, (unsigned long)lli_table_ptr->bus_address); table_data_size = lli_table_ptr->block_size & 0xffffff; num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff; dev_dbg(&sep->pdev->dev, "[PID%d] phys table_data_size is " "(hex) %lx num_table_entries is" " %lx bus_address is%lx\n", current->pid, table_data_size, num_table_entries, (unsigned long)lli_table_ptr->bus_address); if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff) lli_table_ptr = (struct sep_lli_entry *) sep_shared_bus_to_virt(sep, (unsigned long)lli_table_ptr->bus_address); table_count++; } dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n", current->pid); #endif } /** * sep_prepare_empty_lli_table - create a blank LLI table * @sep: pointer to struct sep_device * @lli_table_addr_ptr: pointer to lli table * @num_entries_ptr: pointer to number of entries * @table_data_size_ptr: point to table data size * @dmatables_region: Optional buffer for DMA tables * @dma_ctx: DMA context * * This function creates empty lli tables when there is no data */ static void sep_prepare_empty_lli_table(struct sep_device *sep, dma_addr_t *lli_table_addr_ptr, u32 *num_entries_ptr, u32 *table_data_size_ptr, void **dmatables_region, struct sep_dma_context *dma_ctx) { struct sep_lli_entry *lli_table_ptr; /* Find the area for new table */ lli_table_ptr = (struct sep_lli_entry *)(sep->shared_addr + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); if (dmatables_region && *dmatables_region) lli_table_ptr = *dmatables_region; lli_table_ptr->bus_address = 0; lli_table_ptr->block_size = 0; lli_table_ptr++; lli_table_ptr->bus_address = 0xFFFFFFFF; lli_table_ptr->block_size = 0; /* Set the output parameter value */ *lli_table_addr_ptr = sep->shared_bus + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; /* Set the num of entries and table data size for empty table */ *num_entries_ptr = 2; *table_data_size_ptr = 0; /* Update the number of created tables */ dma_ctx->num_lli_tables_created++; } /** * sep_prepare_input_dma_table - prepare input DMA mappings * @sep: pointer to struct sep_device * @data_size: * @block_size: * @lli_table_ptr: * @num_entries_ptr: * @table_data_size_ptr: * @is_kva: set for kernel data (kernel cryptio call) * * This function prepares only input DMA table for synhronic symmetric * operations (HASH) * Note that all bus addresses that are passed to the SEP * are in 32 bit format; the SEP is a 32 bit device */ static int sep_prepare_input_dma_table(struct sep_device *sep, unsigned long app_virt_addr, u32 data_size, u32 block_size, dma_addr_t *lli_table_ptr, u32 *num_entries_ptr, u32 *table_data_size_ptr, bool is_kva, void **dmatables_region, struct sep_dma_context *dma_ctx ) { int error = 0; /* Pointer to the info entry of the table - the last entry */ struct sep_lli_entry *info_entry_ptr; /* Array of pointers to page */ struct sep_lli_entry *lli_array_ptr; /* Points to the first entry to be processed in the lli_in_array */ u32 current_entry = 0; /* Num entries in the virtual buffer */ u32 sep_lli_entries = 0; /* Lli table pointer */ struct sep_lli_entry *in_lli_table_ptr; /* The total data in one table */ u32 table_data_size = 0; /* Flag for last table */ u32 last_table_flag = 0; /* Number of entries in lli table */ u32 num_entries_in_table = 0; /* Next table address */ void *lli_table_alloc_addr = NULL; void *dma_lli_table_alloc_addr = NULL; void *dma_in_lli_table_ptr = NULL; dev_dbg(&sep->pdev->dev, "[PID%d] prepare intput dma " "tbl data size: (hex) %x\n", current->pid, data_size); dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n", current->pid, block_size); /* Initialize the pages pointers */ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0; /* Set the kernel address for first table to be allocated */ lli_table_alloc_addr = (void *)(sep->shared_addr + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); if (data_size == 0) { if (dmatables_region) { error = sep_allocate_dmatables_region(sep, dmatables_region, dma_ctx, 1); if (error) return error; } /* Special case - create meptu table - 2 entries, zero data */ sep_prepare_empty_lli_table(sep, lli_table_ptr, num_entries_ptr, table_data_size_ptr, dmatables_region, dma_ctx); goto update_dcb_counter; } /* Check if the pages are in Kernel Virtual Address layout */ if (is_kva == true) error = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG, dma_ctx); else /* * Lock the pages of the user buffer * and translate them to pages */ error = sep_lock_user_pages(sep, app_virt_addr, data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG, dma_ctx); if (error) goto end_function; dev_dbg(&sep->pdev->dev, "[PID%d] output sep_in_num_pages is (hex) %x\n", current->pid, dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages); current_entry = 0; info_entry_ptr = NULL; sep_lli_entries = dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages; dma_lli_table_alloc_addr = lli_table_alloc_addr; if (dmatables_region) { error = sep_allocate_dmatables_region(sep, dmatables_region, dma_ctx, sep_lli_entries); if (error) return error; lli_table_alloc_addr = *dmatables_region; } /* Loop till all the entries in in array are processed */ while (current_entry < sep_lli_entries) { /* Set the new input and output tables */ in_lli_table_ptr = (struct sep_lli_entry *)lli_table_alloc_addr; dma_in_lli_table_ptr = (struct sep_lli_entry *)dma_lli_table_alloc_addr; lli_table_alloc_addr += sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; if (dma_lli_table_alloc_addr > ((void *)sep->shared_addr + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { error = -ENOMEM; goto end_function_error; } /* Update the number of created tables */ dma_ctx->num_lli_tables_created++; /* Calculate the maximum size of data for input table */ table_data_size = sep_calculate_lli_table_max_size(sep, &lli_array_ptr[current_entry], (sep_lli_entries - current_entry), &last_table_flag); /* * If this is not the last table - * then allign it to the block size */ if (!last_table_flag) table_data_size = (table_data_size / block_size) * block_size; dev_dbg(&sep->pdev->dev, "[PID%d] output table_data_size is (hex) %x\n", current->pid, table_data_size); /* Construct input lli table */ sep_build_lli_table(sep, &lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size); if (info_entry_ptr == NULL) { /* Set the output parameters to physical addresses */ *lli_table_ptr = sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr); *num_entries_ptr = num_entries_in_table; *table_data_size_ptr = table_data_size; dev_dbg(&sep->pdev->dev, "[PID%d] output lli_table_in_ptr is %08lx\n", current->pid, (unsigned long)*lli_table_ptr); } else { /* Update the info entry of the previous in table */ info_entry_ptr->bus_address = sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr); info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size); } /* Save the pointer to the info entry of the current tables */ info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; } /* Print input tables */ if (!dmatables_region) { sep_debug_print_lli_tables(sep, (struct sep_lli_entry *) sep_shared_area_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr); } /* The array of the pages */ kfree(lli_array_ptr); update_dcb_counter: /* Update DCB counter */ dma_ctx->nr_dcb_creat++; goto end_function; end_function_error: /* Free all the allocated resources */ kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array); dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL; kfree(lli_array_ptr); kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array); dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL; end_function: return error; } /** * sep_construct_dma_tables_from_lli - prepare AES/DES mappings * @sep: pointer to struct sep_device * @lli_in_array: * @sep_in_lli_entries: * @lli_out_array: * @sep_out_lli_entries * @block_size * @lli_table_in_ptr * @lli_table_out_ptr * @in_num_entries_ptr * @out_num_entries_ptr * @table_data_size_ptr * * This function creates the input and output DMA tables for * symmetric operations (AES/DES) according to the block * size from LLI arays * Note that all bus addresses that are passed to the SEP * are in 32 bit format; the SEP is a 32 bit device */ static int sep_construct_dma_tables_from_lli( struct sep_device *sep, struct sep_lli_entry *lli_in_array, u32 sep_in_lli_entries, struct sep_lli_entry *lli_out_array, u32 sep_out_lli_entries, u32 block_size, dma_addr_t *lli_table_in_ptr, dma_addr_t *lli_table_out_ptr, u32 *in_num_entries_ptr, u32 *out_num_entries_ptr, u32 *table_data_size_ptr, void **dmatables_region, struct sep_dma_context *dma_ctx) { /* Points to the area where next lli table can be allocated */ void *lli_table_alloc_addr = NULL; /* * Points to the area in shared region where next lli table * can be allocated */ void *dma_lli_table_alloc_addr = NULL; /* Input lli table in dmatables_region or shared region */ struct sep_lli_entry *in_lli_table_ptr = NULL; /* Input lli table location in the shared region */ struct sep_lli_entry *dma_in_lli_table_ptr = NULL; /* Output lli table in dmatables_region or shared region */ struct sep_lli_entry *out_lli_table_ptr = NULL; /* Output lli table location in the shared region */ struct sep_lli_entry *dma_out_lli_table_ptr = NULL; /* Pointer to the info entry of the table - the last entry */ struct sep_lli_entry *info_in_entry_ptr = NULL; /* Pointer to the info entry of the table - the last entry */ struct sep_lli_entry *info_out_entry_ptr = NULL; /* Points to the first entry to be processed in the lli_in_array */ u32 current_in_entry = 0; /* Points to the first entry to be processed in the lli_out_array */ u32 current_out_entry = 0; /* Max size of the input table */ u32 in_table_data_size = 0; /* Max size of the output table */ u32 out_table_data_size = 0; /* Flag te signifies if this is the last tables build */ u32 last_table_flag = 0; /* The data size that should be in table */ u32 table_data_size = 0; /* Number of etnries in the input table */ u32 num_entries_in_table = 0; /* Number of etnries in the output table */ u32 num_entries_out_table = 0; if (!dma_ctx) { dev_warn(&sep->pdev->dev, "DMA context uninitialized\n"); return -EINVAL; } /* Initiate to point after the message area */ lli_table_alloc_addr = (void *)(sep->shared_addr + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + (dma_ctx->num_lli_tables_created * (sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP))); dma_lli_table_alloc_addr = lli_table_alloc_addr; if (dmatables_region) { /* 2 for both in+out table */ if (sep_allocate_dmatables_region(sep, dmatables_region, dma_ctx, 2*sep_in_lli_entries)) return -ENOMEM; lli_table_alloc_addr = *dmatables_region; } /* Loop till all the entries in in array are not processed */ while (current_in_entry < sep_in_lli_entries) { /* Set the new input and output tables */ in_lli_table_ptr = (struct sep_lli_entry *)lli_table_alloc_addr; dma_in_lli_table_ptr = (struct sep_lli_entry *)dma_lli_table_alloc_addr; lli_table_alloc_addr += sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; /* Set the first output tables */ out_lli_table_ptr = (struct sep_lli_entry *)lli_table_alloc_addr; dma_out_lli_table_ptr = (struct sep_lli_entry *)dma_lli_table_alloc_addr; /* Check if the DMA table area limit was overrun */ if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) > ((void *)sep->shared_addr + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { dev_warn(&sep->pdev->dev, "dma table limit overrun\n"); return -ENOMEM; } /* Update the number of the lli tables created */ dma_ctx->num_lli_tables_created += 2; lli_table_alloc_addr += sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; /* Calculate the maximum size of data for input table */ in_table_data_size = sep_calculate_lli_table_max_size(sep, &lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry), &last_table_flag); /* Calculate the maximum size of data for output table */ out_table_data_size = sep_calculate_lli_table_max_size(sep, &lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry), &last_table_flag); if (!last_table_flag) { in_table_data_size = (in_table_data_size / block_size) * block_size; out_table_data_size = (out_table_data_size / block_size) * block_size; } table_data_size = in_table_data_size; if (table_data_size > out_table_data_size) table_data_size = out_table_data_size; dev_dbg(&sep->pdev->dev, "[PID%d] construct tables from lli" " in_table_data_size is (hex) %x\n", current->pid, in_table_data_size); dev_dbg(&sep->pdev->dev, "[PID%d] construct tables from lli" "out_table_data_size is (hex) %x\n", current->pid, out_table_data_size); /* Construct input lli table */ sep_build_lli_table(sep, &lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size); /* Construct output lli table */ sep_build_lli_table(sep, &lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size); /* If info entry is null - this is the first table built */ if (info_in_entry_ptr == NULL) { /* Set the output parameters to physical addresses */ *lli_table_in_ptr = sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr); *in_num_entries_ptr = num_entries_in_table; *lli_table_out_ptr = sep_shared_area_virt_to_bus(sep, dma_out_lli_table_ptr); *out_num_entries_ptr = num_entries_out_table; *table_data_size_ptr = table_data_size; dev_dbg(&sep->pdev->dev, "[PID%d] output lli_table_in_ptr is %08lx\n", current->pid, (unsigned long)*lli_table_in_ptr); dev_dbg(&sep->pdev->dev, "[PID%d] output lli_table_out_ptr is %08lx\n", current->pid, (unsigned long)*lli_table_out_ptr); } else { /* Update the info entry of the previous in table */ info_in_entry_ptr->bus_address = sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr); info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size); /* Update the info entry of the previous in table */ info_out_entry_ptr->bus_address = sep_shared_area_virt_to_bus(sep, dma_out_lli_table_ptr); info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size); dev_dbg(&sep->pdev->dev, "[PID%d] output lli_table_in_ptr:%08lx %08x\n", current->pid, (unsigned long)info_in_entry_ptr->bus_address, info_in_entry_ptr->block_size); dev_dbg(&sep->pdev->dev, "[PID%d] output lli_table_out_ptr:" "%08lx %08x\n", current->pid, (unsigned long)info_out_entry_ptr->bus_address, info_out_entry_ptr->block_size); } /* Save the pointer to the info entry of the current tables */ info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1; dev_dbg(&sep->pdev->dev, "[PID%d] output num_entries_out_table is %x\n", current->pid, (u32)num_entries_out_table); dev_dbg(&sep->pdev->dev, "[PID%d] output info_in_entry_ptr is %lx\n", current->pid, (unsigned long)info_in_entry_ptr); dev_dbg(&sep->pdev->dev, "[PID%d] output info_out_entry_ptr is %lx\n", current->pid, (unsigned long)info_out_entry_ptr); } /* Print input tables */ if (!dmatables_region) { sep_debug_print_lli_tables( sep, (struct sep_lli_entry *) sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr); } /* Print output tables */ if (!dmatables_region) { sep_debug_print_lli_tables( sep, (struct sep_lli_entry *) sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr); } return 0; } /** * sep_prepare_input_output_dma_table - prepare DMA I/O table * @app_virt_in_addr: * @app_virt_out_addr: * @data_size: * @block_size: * @lli_table_in_ptr: * @lli_table_out_ptr: * @in_num_entries_ptr: * @out_num_entries_ptr: * @table_data_size_ptr: * @is_kva: set for kernel data; used only for kernel crypto module * * This function builds input and output DMA tables for synhronic * symmetric operations (AES, DES, HASH). It also checks that each table * is of the modular block size * Note that all bus addresses that are passed to the SEP * are in 32 bit format; the SEP is a 32 bit device */ static int sep_prepare_input_output_dma_table(struct sep_device *sep, unsigned long app_virt_in_addr, unsigned long app_virt_out_addr, u32 data_size, u32 block_size, dma_addr_t *lli_table_in_ptr, dma_addr_t *lli_table_out_ptr, u32 *in_num_entries_ptr, u32 *out_num_entries_ptr, u32 *table_data_size_ptr, bool is_kva, void **dmatables_region, struct sep_dma_context *dma_ctx) { int error = 0; /* Array of pointers of page */ struct sep_lli_entry *lli_in_array; /* Array of pointers of page */ struct sep_lli_entry *lli_out_array; if (!dma_ctx) { error = -EINVAL; goto end_function; } if (data_size == 0) { /* Prepare empty table for input and output */ if (dmatables_region) { error = sep_allocate_dmatables_region( sep, dmatables_region, dma_ctx, 2); if (error) goto end_function; } sep_prepare_empty_lli_table(sep, lli_table_in_ptr, in_num_entries_ptr, table_data_size_ptr, dmatables_region, dma_ctx); sep_prepare_empty_lli_table(sep, lli_table_out_ptr, out_num_entries_ptr, table_data_size_ptr, dmatables_region, dma_ctx); goto update_dcb_counter; } /* Initialize the pages pointers */ dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL; dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL; /* Lock the pages of the buffer and translate them to pages */ if (is_kva == true) { dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n", current->pid); error = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &lli_in_array, SEP_DRIVER_IN_FLAG, dma_ctx); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] sep_lock_kernel_pages for input " "virtual buffer failed\n", current->pid); goto end_function; } dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n", current->pid); error = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG, dma_ctx); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] sep_lock_kernel_pages for output " "virtual buffer failed\n", current->pid); goto end_function_free_lli_in; } } else { dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n", current->pid); error = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &lli_in_array, SEP_DRIVER_IN_FLAG, dma_ctx); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] sep_lock_user_pages for input " "virtual buffer failed\n", current->pid); goto end_function; } if (dma_ctx->secure_dma == true) { /* secure_dma requires use of non accessible memory */ dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n", current->pid); error = sep_lli_table_secure_dma(sep, app_virt_out_addr, data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG, dma_ctx); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] secure dma table setup " " for output virtual buffer failed\n", current->pid); goto end_function_free_lli_in; } } else { /* For normal, non-secure dma */ dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n", current->pid); dev_dbg(&sep->pdev->dev, "[PID%d] Locking user output pages\n", current->pid); error = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG, dma_ctx); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] sep_lock_user_pages" " for output virtual buffer failed\n", current->pid); goto end_function_free_lli_in; } } } dev_dbg(&sep->pdev->dev, "[PID%d] After lock; prep input output dma " "table sep_in_num_pages is (hex) %x\n", current->pid, dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages); dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n", current->pid, dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages); dev_dbg(&sep->pdev->dev, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP" " is (hex) %x\n", current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); /* Call the fucntion that creates table from the lli arrays */ dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n", current->pid); error = sep_construct_dma_tables_from_lli( sep, lli_in_array, dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. in_num_pages, lli_out_array, dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat]. out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr, dmatables_region, dma_ctx); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] sep_construct_dma_tables_from_lli failed\n", current->pid); goto end_function_with_error; } kfree(lli_out_array); kfree(lli_in_array); update_dcb_counter: /* Update DCB counter */ dma_ctx->nr_dcb_creat++; goto end_function; end_function_with_error: kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array); dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL; kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array); dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL; kfree(lli_out_array); end_function_free_lli_in: kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array); dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL; kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array); dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL; kfree(lli_in_array); end_function: return error; } /** * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks * @app_in_address: unsigned long; for data buffer in (user space) * @app_out_address: unsigned long; for data buffer out (user space) * @data_in_size: u32; for size of data * @block_size: u32; for block size * @tail_block_size: u32; for size of tail block * @isapplet: bool; to indicate external app * @is_kva: bool; kernel buffer; only used for kernel crypto module * @secure_dma; indicates whether this is secure_dma using IMR * * This function prepares the linked DMA tables and puts the * address for the linked list of tables inta a DCB (data control * block) the address of which is known by the SEP hardware * Note that all bus addresses that are passed to the SEP * are in 32 bit format; the SEP is a 32 bit device */ int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep, unsigned long app_in_address, unsigned long app_out_address, u32 data_in_size, u32 block_size, u32 tail_block_size, bool isapplet, bool is_kva, bool secure_dma, struct sep_dcblock *dcb_region, void **dmatables_region, struct sep_dma_context **dma_ctx, struct scatterlist *src_sg, struct scatterlist *dst_sg) { int error = 0; /* Size of tail */ u32 tail_size = 0; /* Address of the created DCB table */ struct sep_dcblock *dcb_table_ptr = NULL; /* The physical address of the first input DMA table */ dma_addr_t in_first_mlli_address = 0; /* Number of entries in the first input DMA table */ u32 in_first_num_entries = 0; /* The physical address of the first output DMA table */ dma_addr_t out_first_mlli_address = 0; /* Number of entries in the first output DMA table */ u32 out_first_num_entries = 0; /* Data in the first input/output table */ u32 first_data_size = 0; dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n", current->pid, app_in_address); dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n", current->pid, app_out_address); dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n", current->pid, data_in_size); dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n", current->pid, block_size); dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n", current->pid, tail_block_size); dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n", current->pid, isapplet); dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n", current->pid, is_kva); dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n", current->pid, src_sg); dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n", current->pid, dst_sg); if (!dma_ctx) { dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n", current->pid); error = -EINVAL; goto end_function; } if (*dma_ctx) { /* In case there are multiple DCBs for this transaction */ dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n", current->pid); } else { *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL); if (!(*dma_ctx)) { dev_dbg(&sep->pdev->dev, "[PID%d] Not enough memory for DMA context\n", current->pid); error = -ENOMEM; goto end_function; } dev_dbg(&sep->pdev->dev, "[PID%d] Created DMA context addr at 0x%p\n", current->pid, *dma_ctx); } (*dma_ctx)->secure_dma = secure_dma; /* these are for kernel crypto only */ (*dma_ctx)->src_sg = src_sg; (*dma_ctx)->dst_sg = dst_sg; if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) { /* No more DCBs to allocate */ dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n", current->pid); error = -ENOSPC; goto end_function_error; } /* Allocate new DCB */ if (dcb_region) { dcb_table_ptr = dcb_region; } else { dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr + SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES + ((*dma_ctx)->nr_dcb_creat * sizeof(struct sep_dcblock))); } /* Set the default values in the DCB */ dcb_table_ptr->input_mlli_address = 0; dcb_table_ptr->input_mlli_num_entries = 0; dcb_table_ptr->input_mlli_data_size = 0; dcb_table_ptr->output_mlli_address = 0; dcb_table_ptr->output_mlli_num_entries = 0; dcb_table_ptr->output_mlli_data_size = 0; dcb_table_ptr->tail_data_size = 0; dcb_table_ptr->out_vr_tail_pt = 0; if (isapplet == true) { /* Check if there is enough data for DMA operation */ if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) { if (is_kva == true) { error = -ENODEV; goto end_function_error; } else { if (copy_from_user(dcb_table_ptr->tail_data, (void __user *)app_in_address, data_in_size)) { error = -EFAULT; goto end_function_error; } } dcb_table_ptr->tail_data_size = data_in_size; /* Set the output user-space address for mem2mem op */ if (app_out_address) dcb_table_ptr->out_vr_tail_pt = (aligned_u64)app_out_address; /* * Update both data length parameters in order to avoid * second data copy and allow building of empty mlli * tables */ tail_size = 0x0; data_in_size = 0x0; } else { if (!app_out_address) { tail_size = data_in_size % block_size; if (!tail_size) { if (tail_block_size == block_size) tail_size = block_size; } } else { tail_size = 0; } } if (tail_size) { if (tail_size > sizeof(dcb_table_ptr->tail_data)) return -EINVAL; if (is_kva == true) { error = -ENODEV; goto end_function_error; } else { /* We have tail data - copy it to DCB */ if (copy_from_user(dcb_table_ptr->tail_data, (void __user *)(app_in_address + data_in_size - tail_size), tail_size)) { error = -EFAULT; goto end_function_error; } } if (app_out_address) /* * Calculate the output address * according to tail data size */ dcb_table_ptr->out_vr_tail_pt = (aligned_u64)app_out_address + data_in_size - tail_size; /* Save the real tail data size */ dcb_table_ptr->tail_data_size = tail_size; /* * Update the data size without the tail * data size AKA data for the dma */ data_in_size = (data_in_size - tail_size); } } /* Check if we need to build only input table or input/output */ if (app_out_address) { /* Prepare input/output tables */ error = sep_prepare_input_output_dma_table(sep, app_in_address, app_out_address, data_in_size, block_size, &in_first_mlli_address, &out_first_mlli_address, &in_first_num_entries, &out_first_num_entries, &first_data_size, is_kva, dmatables_region, *dma_ctx); } else { /* Prepare input tables */ error = sep_prepare_input_dma_table(sep, app_in_address, data_in_size, block_size, &in_first_mlli_address, &in_first_num_entries, &first_data_size, is_kva, dmatables_region, *dma_ctx); } if (error) { dev_warn(&sep->pdev->dev, "prepare DMA table call failed " "from prepare DCB call\n"); goto end_function_error; } /* Set the DCB values */ dcb_table_ptr->input_mlli_address = in_first_mlli_address; dcb_table_ptr->input_mlli_num_entries = in_first_num_entries; dcb_table_ptr->input_mlli_data_size = first_data_size; dcb_table_ptr->output_mlli_address = out_first_mlli_address; dcb_table_ptr->output_mlli_num_entries = out_first_num_entries; dcb_table_ptr->output_mlli_data_size = first_data_size; goto end_function; end_function_error: kfree(*dma_ctx); *dma_ctx = NULL; end_function: return error; } /** * sep_free_dma_tables_and_dcb - free DMA tables and DCBs * @sep: pointer to struct sep_device * @isapplet: indicates external application (used for kernel access) * @is_kva: indicates kernel addresses (only used for kernel crypto) * * This function frees the DMA tables and DCB */ static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet, bool is_kva, struct sep_dma_context **dma_ctx) { struct sep_dcblock *dcb_table_ptr; unsigned long pt_hold; void *tail_pt; int i = 0; int error = 0; int error_temp = 0; dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n", current->pid); if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) { dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n", current->pid); /* Tail stuff is only for non secure_dma */ /* Set pointer to first DCB table */ dcb_table_ptr = (struct sep_dcblock *) (sep->shared_addr + SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES); /** * Go over each DCB and see if * tail pointer must be updated */ for (i = 0; dma_ctx && *dma_ctx && i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) { if (dcb_table_ptr->out_vr_tail_pt) { pt_hold = (unsigned long)dcb_table_ptr-> out_vr_tail_pt; tail_pt = (void *)pt_hold; if (is_kva == true) { error = -ENODEV; break; } else { error_temp = copy_to_user( (void __user *)tail_pt, dcb_table_ptr->tail_data, dcb_table_ptr->tail_data_size); } if (error_temp) { /* Release the DMA resource */ error = -EFAULT; break; } } } } /* Free the output pages, if any */ sep_free_dma_table_data_handler(sep, dma_ctx); dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n", current->pid); return error; } /** * sep_prepare_dcb_handler - prepare a control block * @sep: pointer to struct sep_device * @arg: pointer to user parameters * @secure_dma: indicate whether we are using secure_dma on IMR * * This function will retrieve the RAR buffer physical addresses, type * & size corresponding to the RAR handles provided in the buffers vector. */ static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg, bool secure_dma, struct sep_dma_context **dma_ctx) { int error; /* Command arguments */ static struct build_dcb_struct command_args; /* Get the command arguments */ if (copy_from_user(&command_args, (void __user *)arg, sizeof(struct build_dcb_struct))) { error = -EFAULT; goto end_function; } dev_dbg(&sep->pdev->dev, "[PID%d] prep dcb handler app_in_address is %08llx\n", current->pid, command_args.app_in_address); dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address is %08llx\n", current->pid, command_args.app_out_address); dev_dbg(&sep->pdev->dev, "[PID%d] data_size is %x\n", current->pid, command_args.data_in_size); dev_dbg(&sep->pdev->dev, "[PID%d] block_size is %x\n", current->pid, command_args.block_size); dev_dbg(&sep->pdev->dev, "[PID%d] tail block_size is %x\n", current->pid, command_args.tail_block_size); dev_dbg(&sep->pdev->dev, "[PID%d] is_applet is %x\n", current->pid, command_args.is_applet); if (!command_args.app_in_address) { dev_warn(&sep->pdev->dev, "[PID%d] null app_in_address\n", current->pid); error = -EINVAL; goto end_function; } error = sep_prepare_input_output_dma_table_in_dcb(sep, (unsigned long)command_args.app_in_address, (unsigned long)command_args.app_out_address, command_args.data_in_size, command_args.block_size, command_args.tail_block_size, command_args.is_applet, false, secure_dma, NULL, NULL, dma_ctx, NULL, NULL); end_function: return error; } /** * sep_free_dcb_handler - free control block resources * @sep: pointer to struct sep_device * * This function frees the DCB resources and updates the needed * user-space buffers. */ static int sep_free_dcb_handler(struct sep_device *sep, struct sep_dma_context **dma_ctx) { if (!dma_ctx || !(*dma_ctx)) { dev_dbg(&sep->pdev->dev, "[PID%d] no dma context defined, nothing to free\n", current->pid); return -EINVAL; } dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n", current->pid, (*dma_ctx)->nr_dcb_creat); return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx); } /** * sep_ioctl - ioctl handler for sep device * @filp: pointer to struct file * @cmd: command * @arg: pointer to argument structure * * Implement the ioctl methods availble on the SEP device. */ static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct sep_private_data * const private_data = filp->private_data; struct sep_call_status *call_status = &private_data->call_status; struct sep_device *sep = private_data->device; struct sep_dma_context **dma_ctx = &private_data->dma_ctx; struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; int error = 0; dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n", current->pid, cmd); dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n", current->pid, *dma_ctx); /* Make sure we own this device */ error = sep_check_transaction_owner(sep); if (error) { dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n", current->pid); goto end_function; } /* Check that sep_mmap has been called before */ if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status)) { dev_dbg(&sep->pdev->dev, "[PID%d] mmap not called\n", current->pid); error = -EPROTO; goto end_function; } /* Check that the command is for SEP device */ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) { error = -ENOTTY; goto end_function; } switch (cmd) { case SEP_IOCSENDSEPCOMMAND: dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCSENDSEPCOMMAND start\n", current->pid); if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &call_status->status)) { dev_warn(&sep->pdev->dev, "[PID%d] send msg already done\n", current->pid); error = -EPROTO; goto end_function; } /* Send command to SEP */ error = sep_send_command_handler(sep); if (!error) set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &call_status->status); dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCSENDSEPCOMMAND end\n", current->pid); break; case SEP_IOCENDTRANSACTION: dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCENDTRANSACTION start\n", current->pid); error = sep_end_transaction_handler(sep, dma_ctx, call_status, my_queue_elem); dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCENDTRANSACTION end\n", current->pid); break; case SEP_IOCPREPAREDCB: dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCPREPAREDCB start\n", current->pid); case SEP_IOCPREPAREDCB_SECURE_DMA: dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n", current->pid); if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &call_status->status)) { dev_dbg(&sep->pdev->dev, "[PID%d] dcb prep needed before send msg\n", current->pid); error = -EPROTO; goto end_function; } if (!arg) { dev_dbg(&sep->pdev->dev, "[PID%d] dcb null arg\n", current->pid); error = -EINVAL; goto end_function; } if (cmd == SEP_IOCPREPAREDCB) { /* No secure dma */ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n", current->pid); error = sep_prepare_dcb_handler(sep, arg, false, dma_ctx); } else { /* Secure dma */ dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOC_POC (with secure_dma)\n", current->pid); error = sep_prepare_dcb_handler(sep, arg, true, dma_ctx); } dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n", current->pid); break; case SEP_IOCFREEDCB: dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n", current->pid); case SEP_IOCFREEDCB_SECURE_DMA: dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n", current->pid); error = sep_free_dcb_handler(sep, dma_ctx); dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n", current->pid); break; default: error = -ENOTTY; dev_dbg(&sep->pdev->dev, "[PID%d] default end\n", current->pid); break; } end_function: dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid); return error; } /** * sep_inthandler - interrupt handler for sep device * @irq: interrupt * @dev_id: device id */ static irqreturn_t sep_inthandler(int irq, void *dev_id) { unsigned long lock_irq_flag; u32 reg_val, reg_val2 = 0; struct sep_device *sep = dev_id; irqreturn_t int_error = IRQ_HANDLED; /* Are we in power save? */ #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM) if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) { dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n"); return IRQ_NONE; } #endif if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) { dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n"); return IRQ_NONE; } /* Read the IRR register to check if this is SEP interrupt */ reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val); if (reg_val & (0x1 << 13)) { /* Lock and update the counter of reply messages */ spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag); sep->reply_ct++; spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag); dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n", sep->send_ct, sep->reply_ct); /* Is this a kernel client request */ if (sep->in_kernel) { tasklet_schedule(&sep->finish_tasklet); goto finished_interrupt; } /* Is this printf or daemon request? */ reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); dev_dbg(&sep->pdev->dev, "SEP Interrupt - GPR2 is %08x\n", reg_val2); clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags); if ((reg_val2 >> 30) & 0x1) { dev_dbg(&sep->pdev->dev, "int: printf request\n"); } else if (reg_val2 >> 31) { dev_dbg(&sep->pdev->dev, "int: daemon request\n"); } else { dev_dbg(&sep->pdev->dev, "int: SEP reply\n"); wake_up(&sep->event_interrupt); } } else { dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n"); int_error = IRQ_NONE; } finished_interrupt: if (int_error == IRQ_HANDLED) sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val); return int_error; } /** * sep_reconfig_shared_area - reconfigure shared area * @sep: pointer to struct sep_device * * Reconfig the shared area between HOST and SEP - needed in case * the DX_CC_Init function was called before OS loading. */ static int sep_reconfig_shared_area(struct sep_device *sep) { int ret_val; /* use to limit waiting for SEP */ unsigned long end_time; /* Send the new SHARED MESSAGE AREA to the SEP */ dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n", (unsigned long long)sep->shared_bus); sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus); /* Poll for SEP response */ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); end_time = jiffies + (WAIT_TIME * HZ); while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) && (ret_val != sep->shared_bus)) ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); /* Check the return value (register) */ if (ret_val != sep->shared_bus) { dev_warn(&sep->pdev->dev, "could not reconfig shared area\n"); dev_warn(&sep->pdev->dev, "result was %x\n", ret_val); ret_val = -ENOMEM; } else ret_val = 0; dev_dbg(&sep->pdev->dev, "reconfig shared area end\n"); return ret_val; } /** * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables * contexts into use * @sep: SEP device * @dcb_region: DCB region copy * @dmatables_region: MLLI/DMA tables copy * @dma_ctx: DMA context for current transaction */ ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep, struct sep_dcblock **dcb_region, void **dmatables_region, struct sep_dma_context *dma_ctx) { void *dmaregion_free_start = NULL; void *dmaregion_free_end = NULL; void *dcbregion_free_start = NULL; void *dcbregion_free_end = NULL; ssize_t error = 0; dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n", current->pid); if (1 > dma_ctx->nr_dcb_creat) { dev_warn(&sep->pdev->dev, "[PID%d] invalid number of dcbs to activate 0x%08X\n", current->pid, dma_ctx->nr_dcb_creat); error = -EINVAL; goto end_function; } dmaregion_free_start = sep->shared_addr + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES; dmaregion_free_end = dmaregion_free_start + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1; if (dmaregion_free_start + dma_ctx->dmatables_len > dmaregion_free_end) { error = -ENOMEM; goto end_function; } memcpy(dmaregion_free_start, *dmatables_region, dma_ctx->dmatables_len); /* Free MLLI table copy */ kfree(*dmatables_region); *dmatables_region = NULL; /* Copy thread's DCB table copy to DCB table region */ dcbregion_free_start = sep->shared_addr + SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES; dcbregion_free_end = dcbregion_free_start + (SEP_MAX_NUM_SYNC_DMA_OPS * sizeof(struct sep_dcblock)) - 1; if (dcbregion_free_start + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock)) > dcbregion_free_end) { error = -ENOMEM; goto end_function; } memcpy(dcbregion_free_start, *dcb_region, dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock)); /* Print the tables */ dev_dbg(&sep->pdev->dev, "activate: input table\n"); sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep, (*dcb_region)->input_mlli_address), (*dcb_region)->input_mlli_num_entries, (*dcb_region)->input_mlli_data_size); dev_dbg(&sep->pdev->dev, "activate: output table\n"); sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep, (*dcb_region)->output_mlli_address), (*dcb_region)->output_mlli_num_entries, (*dcb_region)->output_mlli_data_size); dev_dbg(&sep->pdev->dev, "[PID%d] printing activated tables\n", current->pid); end_function: kfree(*dmatables_region); *dmatables_region = NULL; kfree(*dcb_region); *dcb_region = NULL; return error; } /** * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context * @sep: SEP device * @dcb_region: DCB region buf to create for current transaction * @dmatables_region: MLLI/DMA tables buf to create for current transaction * @dma_ctx: DMA context buf to create for current transaction * @user_dcb_args: User arguments for DCB/MLLI creation * @num_dcbs: Number of DCBs to create * @secure_dma: Indicate use of IMR restricted memory secure dma */ static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep, struct sep_dcblock **dcb_region, void **dmatables_region, struct sep_dma_context **dma_ctx, const struct build_dcb_struct __user *user_dcb_args, const u32 num_dcbs, bool secure_dma) { int error = 0; int i = 0; struct build_dcb_struct *dcb_args = NULL; dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n", current->pid); if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) { error = -EINVAL; goto end_function; } if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) { dev_warn(&sep->pdev->dev, "[PID%d] invalid number of dcbs 0x%08X\n", current->pid, num_dcbs); error = -EINVAL; goto end_function; } dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct), GFP_KERNEL); if (!dcb_args) { dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n", current->pid); error = -ENOMEM; goto end_function; } if (copy_from_user(dcb_args, user_dcb_args, num_dcbs * sizeof(struct build_dcb_struct))) { error = -EINVAL; goto end_function; } /* Allocate thread-specific memory for DCB */ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock), GFP_KERNEL); if (!(*dcb_region)) { error = -ENOMEM; goto end_function; } /* Prepare DCB and MLLI table into the allocated regions */ for (i = 0; i < num_dcbs; i++) { error = sep_prepare_input_output_dma_table_in_dcb(sep, (unsigned long)dcb_args[i].app_in_address, (unsigned long)dcb_args[i].app_out_address, dcb_args[i].data_in_size, dcb_args[i].block_size, dcb_args[i].tail_block_size, dcb_args[i].is_applet, false, secure_dma, *dcb_region, dmatables_region, dma_ctx, NULL, NULL); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] dma table creation failed\n", current->pid); goto end_function; } if (dcb_args[i].app_in_address != 0) (*dma_ctx)->input_data_len += dcb_args[i].data_in_size; } end_function: kfree(dcb_args); return error; } /** * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context * for kernel crypto * @sep: SEP device * @dcb_region: DCB region buf to create for current transaction * @dmatables_region: MLLI/DMA tables buf to create for current transaction * @dma_ctx: DMA context buf to create for current transaction * @user_dcb_args: User arguments for DCB/MLLI creation * @num_dcbs: Number of DCBs to create * This does that same thing as sep_create_dcb_dmatables_context * except that it is used only for the kernel crypto operation. It is * separate because there is no user data involved; the dcb data structure * is specific for kernel crypto (build_dcb_struct_kernel) */ int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep, struct sep_dcblock **dcb_region, void **dmatables_region, struct sep_dma_context **dma_ctx, const struct build_dcb_struct_kernel *dcb_data, const u32 num_dcbs) { int error = 0; int i = 0; dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n", current->pid); if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) { error = -EINVAL; goto end_function; } if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) { dev_warn(&sep->pdev->dev, "[PID%d] invalid number of dcbs 0x%08X\n", current->pid, num_dcbs); error = -EINVAL; goto end_function; } dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n", current->pid, num_dcbs); /* Allocate thread-specific memory for DCB */ *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock), GFP_KERNEL); if (!(*dcb_region)) { error = -ENOMEM; goto end_function; } /* Prepare DCB and MLLI table into the allocated regions */ for (i = 0; i < num_dcbs; i++) { error = sep_prepare_input_output_dma_table_in_dcb(sep, (unsigned long)dcb_data->app_in_address, (unsigned long)dcb_data->app_out_address, dcb_data->data_in_size, dcb_data->block_size, dcb_data->tail_block_size, dcb_data->is_applet, true, false, *dcb_region, dmatables_region, dma_ctx, dcb_data->src_sg, dcb_data->dst_sg); if (error) { dev_warn(&sep->pdev->dev, "[PID%d] dma table creation failed\n", current->pid); goto end_function; } } end_function: return error; } /** * sep_activate_msgarea_context - Takes the message area context into use * @sep: SEP device * @msg_region: Message area context buf * @msg_len: Message area context buffer size */ static ssize_t sep_activate_msgarea_context(struct sep_device *sep, void **msg_region, const size_t msg_len) { dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n", current->pid); if (!msg_region || !(*msg_region) || SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) { dev_warn(&sep->pdev->dev, "[PID%d] invalid act msgarea len 0x%08zX\n", current->pid, msg_len); return -EINVAL; } memcpy(sep->shared_addr, *msg_region, msg_len); return 0; } /** * sep_create_msgarea_context - Creates message area context * @sep: SEP device * @msg_region: Msg area region buf to create for current transaction * @msg_user: Content for msg area region from user * @msg_len: Message area size */ static ssize_t sep_create_msgarea_context(struct sep_device *sep, void **msg_region, const void __user *msg_user, const size_t msg_len) { int error = 0; dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n", current->pid); if (!msg_region || !msg_user || SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len || SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) { dev_warn(&sep->pdev->dev, "[PID%d] invalid creat msgarea len 0x%08zX\n", current->pid, msg_len); error = -EINVAL; goto end_function; } /* Allocate thread-specific memory for message buffer */ *msg_region = kzalloc(msg_len, GFP_KERNEL); if (!(*msg_region)) { dev_warn(&sep->pdev->dev, "[PID%d] no mem for msgarea context\n", current->pid); error = -ENOMEM; goto end_function; } /* Copy input data to write() to allocated message buffer */ if (copy_from_user(*msg_region, msg_user, msg_len)) { error = -EINVAL; goto end_function; } end_function: if (error && msg_region) { kfree(*msg_region); *msg_region = NULL; } return error; } /** * sep_read - Returns results of an operation for fastcall interface * @filp: File pointer * @buf_user: User buffer for storing results * @count_user: User buffer size * @offset: File offset, not supported * * The implementation does not support reading in chunks, all data must be * consumed during a single read system call. */ static ssize_t sep_read(struct file *filp, char __user *buf_user, size_t count_user, loff_t *offset) { struct sep_private_data * const private_data = filp->private_data; struct sep_call_status *call_status = &private_data->call_status; struct sep_device *sep = private_data->device; struct sep_dma_context **dma_ctx = &private_data->dma_ctx; struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem; ssize_t error = 0, error_tmp = 0; /* Am I the process that owns the transaction? */ error = sep_check_transaction_owner(sep); if (error) { dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n", current->pid); goto end_function; } /* Checks that user has called necessarry apis */ if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status)) { dev_warn(&sep->pdev->dev, "[PID%d] fastcall write not called\n", current->pid); error = -EPROTO; goto end_function_error; } if (!buf_user) { dev_warn(&sep->pdev->dev, "[PID%d] null user buffer\n", current->pid); error = -EINVAL; goto end_function_error; } /* Wait for SEP to finish */ wait_event(sep->event_interrupt, test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0); sep_dump_message(sep); dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n", current->pid, count_user); /* In case user has allocated bigger buffer */ if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES) count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES; if (copy_to_user(buf_user, sep->shared_addr, count_user)) { error = -EFAULT; goto end_function_error; } dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid); error = count_user; end_function_error: /* Copy possible tail data to user and free DCB and MLLIs */ error_tmp = sep_free_dcb_handler(sep, dma_ctx); if (error_tmp) dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n", current->pid); /* End the transaction, wakeup pending ones */ error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status, my_queue_elem); if (error_tmp) dev_warn(&sep->pdev->dev, "[PID%d] ending transaction failed\n", current->pid); end_function: return error; } /** * sep_fastcall_args_get - Gets fastcall params from user * sep: SEP device * @args: Parameters buffer * @buf_user: User buffer for operation parameters * @count_user: User buffer size */ static inline ssize_t sep_fastcall_args_get(struct sep_device *sep, struct sep_fastcall_hdr *args, const char __user *buf_user, const size_t count_user) { ssize_t error = 0; size_t actual_count = 0; if (!buf_user) { dev_warn(&sep->pdev->dev, "[PID%d] null user buffer\n", current->pid); error = -EINVAL; goto end_function; } if (count_user < sizeof(struct sep_fastcall_hdr)) { dev_warn(&sep->pdev->dev, "[PID%d] too small message size 0x%08zX\n", current->pid, count_user); error = -EINVAL; goto end_function; } if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) { error = -EFAULT; goto end_function; } if (SEP_FC_MAGIC != args->magic) { dev_warn(&sep->pdev->dev, "[PID%d] invalid fastcall magic 0x%08X\n", current->pid, args->magic); error = -EINVAL; goto end_function; } dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n", current->pid, args->num_dcbs); dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n", current->pid, args->msg_len); if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len || SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) { dev_warn(&sep->pdev->dev, "[PID%d] invalid message length\n", current->pid); error = -EINVAL; goto end_function; } actual_count = sizeof(struct sep_fastcall_hdr) + args->msg_len + (args->num_dcbs * sizeof(struct build_dcb_struct)); if (actual_count != count_user) { dev_warn(&sep->pdev->dev, "[PID%d] inconsistent message " "sizes 0x%08zX vs 0x%08zX\n", current->pid, actual_count, count_user); error = -EMSGSIZE; goto end_function; } end_function: return error; } /** * sep_write - Starts an operation for fastcall interface * @filp: File pointer * @buf_user: User buffer for operation parameters * @count_user: User buffer size * @offset: File offset, not supported * * The implementation does not support writing in chunks, * all data must be given during a single write system call. */ static ssize_t sep_write(struct file *filp, const char __user *buf_user, size_t count_user, loff_t *offset) { struct sep_private_data * const private_data = filp->private_data; struct sep_call_status *call_status = &private_data->call_status; struct sep_device *sep = private_data->device; struct sep_dma_context *dma_ctx = NULL; struct sep_fastcall_hdr call_hdr = {0}; void *msg_region = NULL; void *dmatables_region = NULL; struct sep_dcblock *dcb_region = NULL; ssize_t error = 0; struct sep_queue_info *my_queue_elem = NULL; bool my_secure_dma; /* are we using secure_dma (IMR)? */ dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n", current->pid, sep); dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n", current->pid, private_data); error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user); if (error) goto end_function; buf_user += sizeof(struct sep_fastcall_hdr); if (call_hdr.secure_dma == 0) my_secure_dma = false; else my_secure_dma = true; /* * Controlling driver memory usage by limiting amount of * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number * of threads can progress further at a time */ dev_dbg(&sep->pdev->dev, "[PID%d] waiting for double buffering " "region access\n", current->pid); error = down_interruptible(&sep->sep_doublebuf); dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n", current->pid); if (error) { /* Signal received */ goto end_function_error; } /* * Prepare contents of the shared area regions for * the operation into temporary buffers */ if (0 < call_hdr.num_dcbs) { error = sep_create_dcb_dmatables_context(sep, &dcb_region, &dmatables_region, &dma_ctx, (const struct build_dcb_struct __user *) buf_user, call_hdr.num_dcbs, my_secure_dma); if (error) goto end_function_error_doublebuf; buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct); } error = sep_create_msgarea_context(sep, &msg_region, buf_user, call_hdr.msg_len); if (error) goto end_function_error_doublebuf; dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n", current->pid); my_queue_elem = sep_queue_status_add(sep, ((struct sep_msgarea_hdr *)msg_region)->opcode, (dma_ctx) ? dma_ctx->input_data_len : 0, current->pid, current->comm, sizeof(current->comm)); if (!my_queue_elem) { dev_dbg(&sep->pdev->dev, "[PID%d] updating queue" "status error\n", current->pid); error = -ENOMEM; goto end_function_error_doublebuf; } /* Wait until current process gets the transaction */ error = sep_wait_transaction(sep); if (error) { /* Interrupted by signal, don't clear transaction */ dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n", current->pid); sep_queue_status_remove(sep, &my_queue_elem); goto end_function_error_doublebuf; } dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n", current->pid); private_data->my_queue_elem = my_queue_elem; /* Activate shared area regions for the transaction */ error = sep_activate_msgarea_context(sep, &msg_region, call_hdr.msg_len); if (error) goto end_function_error_clear_transact; sep_dump_message(sep); if (0 < call_hdr.num_dcbs) { error = sep_activate_dcb_dmatables_context(sep, &dcb_region, &dmatables_region, dma_ctx); if (error) goto end_function_error_clear_transact; } /* Send command to SEP */ error = sep_send_command_handler(sep); if (error) goto end_function_error_clear_transact; /* Store DMA context for the transaction */ private_data->dma_ctx = dma_ctx; /* Update call status */ set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status); error = count_user; up(&sep->sep_doublebuf); dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n", current->pid); goto end_function; end_function_error_clear_transact: sep_end_transaction_handler(sep, &dma_ctx, call_status, &private_data->my_queue_elem); end_function_error_doublebuf: up(&sep->sep_doublebuf); dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n", current->pid); end_function_error: if (dma_ctx) sep_free_dma_table_data_handler(sep, &dma_ctx); end_function: kfree(dcb_region); kfree(dmatables_region); kfree(msg_region); return error; } /** * sep_seek - Handler for seek system call * @filp: File pointer * @offset: File offset * @origin: Options for offset * * Fastcall interface does not support seeking, all reads * and writes are from/to offset zero */ static loff_t sep_seek(struct file *filp, loff_t offset, int origin) { return -ENOSYS; } /** * sep_file_operations - file operation on sep device * @sep_ioctl: ioctl handler from user space call * @sep_poll: poll handler * @sep_open: handles sep device open request * @sep_release:handles sep device release request * @sep_mmap: handles memory mapping requests * @sep_read: handles read request on sep device * @sep_write: handles write request on sep device * @sep_seek: handles seek request on sep device */ static const struct file_operations sep_file_operations = { .owner = THIS_MODULE, .unlocked_ioctl = sep_ioctl, .poll = sep_poll, .open = sep_open, .release = sep_release, .mmap = sep_mmap, .read = sep_read, .write = sep_write, .llseek = sep_seek, }; /** * sep_sysfs_read - read sysfs entry per gives arguments * @filp: file pointer * @kobj: kobject pointer * @attr: binary file attributes * @buf: read to this buffer * @pos: offset to read * @count: amount of data to read * * This function is to read sysfs entries for sep driver per given arguments. */ static ssize_t sep_sysfs_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t pos, size_t count) { unsigned long lck_flags; size_t nleft = count; struct sep_device *sep = sep_dev; struct sep_queue_info *queue_elem = NULL; u32 queue_num = 0; u32 i = 1; spin_lock_irqsave(&sep->sep_queue_lock, lck_flags); queue_num = sep->sep_queue_num; if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT) queue_num = SEP_DOUBLEBUF_USERS_LIMIT; if (count < sizeof(queue_num) + (queue_num * sizeof(struct sep_queue_data))) { spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); return -EINVAL; } memcpy(buf, &queue_num, sizeof(queue_num)); buf += sizeof(queue_num); nleft -= sizeof(queue_num); list_for_each_entry(queue_elem, &sep->sep_queue_status, list) { if (i++ > queue_num) break; memcpy(buf, &queue_elem->data, sizeof(queue_elem->data)); nleft -= sizeof(queue_elem->data); buf += sizeof(queue_elem->data); } spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags); return count - nleft; } /** * bin_attributes - defines attributes for queue_status * @attr: attributes (name & permissions) * @read: function pointer to read this file * @size: maxinum size of binary attribute */ static const struct bin_attribute queue_status = { .attr = {.name = "queue_status", .mode = 0444}, .read = sep_sysfs_read, .size = sizeof(u32) + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)), }; /** * sep_register_driver_with_fs - register misc devices * @sep: pointer to struct sep_device * * This function registers the driver with the file system */ static int sep_register_driver_with_fs(struct sep_device *sep) { int ret_val; sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR; sep->miscdev_sep.name = SEP_DEV_NAME; sep->miscdev_sep.fops = &sep_file_operations; ret_val = misc_register(&sep->miscdev_sep); if (ret_val) { dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n", ret_val); return ret_val; } ret_val = device_create_bin_file(sep->miscdev_sep.this_device, &queue_status); if (ret_val) { dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n", ret_val); return ret_val; } return ret_val; } /** *sep_probe - probe a matching PCI device *@pdev: pci_device *@ent: pci_device_id * *Attempt to set up and configure a SEP device that has been *discovered by the PCI layer. Allocates all required resources. */ static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int error = 0; struct sep_device *sep = NULL; if (sep_dev != NULL) { dev_dbg(&pdev->dev, "only one SEP supported.\n"); return -EBUSY; } /* Enable the device */ error = pci_enable_device(pdev); if (error) { dev_warn(&pdev->dev, "error enabling pci device\n"); goto end_function; } /* Allocate the sep_device structure for this device */ sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC); if (sep_dev == NULL) { dev_warn(&pdev->dev, "can't kmalloc the sep_device structure\n"); error = -ENOMEM; goto end_function_disable_device; } /* * We're going to use another variable for actually * working with the device; this way, if we have * multiple devices in the future, it would be easier * to make appropriate changes */ sep = sep_dev; sep->pdev = pci_dev_get(pdev); init_waitqueue_head(&sep->event_transactions); init_waitqueue_head(&sep->event_interrupt); spin_lock_init(&sep->snd_rply_lck); spin_lock_init(&sep->sep_queue_lock); sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT); INIT_LIST_HEAD(&sep->sep_queue_status); dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, " "device being prepared\n"); /* Set up our register area */ sep->reg_physical_addr = pci_resource_start(sep->pdev, 0); if (!sep->reg_physical_addr) { dev_warn(&sep->pdev->dev, "Error getting register start\n"); error = -ENODEV; goto end_function_free_sep_dev; } sep->reg_physical_end = pci_resource_end(sep->pdev, 0); if (!sep->reg_physical_end) { dev_warn(&sep->pdev->dev, "Error getting register end\n"); error = -ENODEV; goto end_function_free_sep_dev; } sep->reg_addr = ioremap_nocache(sep->reg_physical_addr, (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1)); if (!sep->reg_addr) { dev_warn(&sep->pdev->dev, "Error getting register virtual\n"); error = -ENODEV; goto end_function_free_sep_dev; } dev_dbg(&sep->pdev->dev, "Register area start %llx end %llx virtual %p\n", (unsigned long long)sep->reg_physical_addr, (unsigned long long)sep->reg_physical_end, sep->reg_addr); /* Allocate the shared area */ sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; if (sep_map_and_alloc_shared_area(sep)) { error = -ENOMEM; /* Allocation failed */ goto end_function_error; } /* Clear ICR register */ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); /* Set the IMR register - open only GPR 2 */ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); /* Read send/receive counters from SEP */ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); sep->reply_ct &= 0x3FFFFFFF; sep->send_ct = sep->reply_ct; /* Get the interrupt line */ error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep); if (error) goto end_function_deallocate_sep_shared_area; /* The new chip requires a shared area reconfigure */ error = sep_reconfig_shared_area(sep); if (error) goto end_function_free_irq; sep->in_use = 1; /* Finally magic up the device nodes */ /* Register driver with the fs */ error = sep_register_driver_with_fs(sep); if (error) { dev_err(&sep->pdev->dev, "error registering dev file\n"); goto end_function_free_irq; } sep->in_use = 0; /* through touching the device */ #ifdef SEP_ENABLE_RUNTIME_PM pm_runtime_put_noidle(&sep->pdev->dev); pm_runtime_allow(&sep->pdev->dev); pm_runtime_set_autosuspend_delay(&sep->pdev->dev, SUSPEND_DELAY); pm_runtime_use_autosuspend(&sep->pdev->dev); pm_runtime_mark_last_busy(&sep->pdev->dev); sep->power_save_setup = 1; #endif /* register kernel crypto driver */ #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) error = sep_crypto_setup(); if (error) { dev_err(&sep->pdev->dev, "crypto setup failed\n"); goto end_function_free_irq; } #endif goto end_function; end_function_free_irq: free_irq(pdev->irq, sep); end_function_deallocate_sep_shared_area: /* De-allocate shared area */ sep_unmap_and_free_shared_area(sep); end_function_error: iounmap(sep->reg_addr); end_function_free_sep_dev: pci_dev_put(sep_dev->pdev); kfree(sep_dev); sep_dev = NULL; end_function_disable_device: pci_disable_device(pdev); end_function: return error; } /** * sep_remove - handles removing device from pci subsystem * @pdev: pointer to pci device * * This function will handle removing our sep device from pci subsystem on exit * or unloading this module. It should free up all used resources, and unmap if * any memory regions mapped. */ static void sep_remove(struct pci_dev *pdev) { struct sep_device *sep = sep_dev; /* Unregister from fs */ misc_deregister(&sep->miscdev_sep); /* Unregister from kernel crypto */ #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) sep_crypto_takedown(); #endif /* Free the irq */ free_irq(sep->pdev->irq, sep); /* Free the shared area */ sep_unmap_and_free_shared_area(sep_dev); iounmap(sep_dev->reg_addr); #ifdef SEP_ENABLE_RUNTIME_PM if (sep->in_use) { sep->in_use = 0; pm_runtime_forbid(&sep->pdev->dev); pm_runtime_get_noresume(&sep->pdev->dev); } #endif pci_dev_put(sep_dev->pdev); kfree(sep_dev); sep_dev = NULL; } /* Initialize struct pci_device_id for our driver */ static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)}, {0} }; /* Export our pci_device_id structure to user space */ MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl); #ifdef SEP_ENABLE_RUNTIME_PM /** * sep_pm_resume - rsume routine while waking up from S3 state * @dev: pointer to sep device * * This function is to be used to wake up sep driver while system awakes from S3 * state i.e. suspend to ram. The RAM in intact. * Notes - revisit with more understanding of pm, ICR/IMR & counters. */ static int sep_pci_resume(struct device *dev) { struct sep_device *sep = sep_dev; dev_dbg(&sep->pdev->dev, "pci resume called\n"); if (sep->power_state == SEP_DRIVER_POWERON) return 0; /* Clear ICR register */ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); /* Set the IMR register - open only GPR 2 */ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); /* Read send/receive counters from SEP */ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); sep->reply_ct &= 0x3FFFFFFF; sep->send_ct = sep->reply_ct; sep->power_state = SEP_DRIVER_POWERON; return 0; } /** * sep_pm_suspend - suspend routine while going to S3 state * @dev: pointer to sep device * * This function is to be used to suspend sep driver while system goes to S3 * state i.e. suspend to ram. The RAM in intact and ON during this suspend. * Notes - revisit with more understanding of pm, ICR/IMR */ static int sep_pci_suspend(struct device *dev) { struct sep_device *sep = sep_dev; dev_dbg(&sep->pdev->dev, "pci suspend called\n"); if (sep->in_use == 1) return -EAGAIN; sep->power_state = SEP_DRIVER_POWEROFF; /* Clear ICR register */ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); /* Set the IMR to block all */ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF); return 0; } /** * sep_pm_runtime_resume - runtime resume routine * @dev: pointer to sep device * * Notes - revisit with more understanding of pm, ICR/IMR & counters */ static int sep_pm_runtime_resume(struct device *dev) { u32 retval2; u32 delay_count; struct sep_device *sep = sep_dev; dev_dbg(&sep->pdev->dev, "pm runtime resume called\n"); /** * Wait until the SCU boot is ready * This is done by iterating SCU_DELAY_ITERATION (10 * microseconds each) up to SCU_DELAY_MAX (50) times. * This bit can be set in a random time that is less * than 500 microseconds after each power resume */ retval2 = 0; delay_count = 0; while ((!retval2) && (delay_count < SCU_DELAY_MAX)) { retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); retval2 &= 0x00000008; if (!retval2) { udelay(SCU_DELAY_ITERATION); delay_count += 1; } } if (!retval2) { dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n"); return -EINVAL; } /* Clear ICR register */ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); /* Set the IMR register - open only GPR 2 */ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); /* Read send/receive counters from SEP */ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); sep->reply_ct &= 0x3FFFFFFF; sep->send_ct = sep->reply_ct; return 0; } /** * sep_pm_runtime_suspend - runtime suspend routine * @dev: pointer to sep device * * Notes - revisit with more understanding of pm */ static int sep_pm_runtime_suspend(struct device *dev) { struct sep_device *sep = sep_dev; dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n"); /* Clear ICR register */ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); return 0; } /** * sep_pm - power management for sep driver * @sep_pm_runtime_resume: resume- no communication with cpu & main memory * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory * @sep_pci_suspend: suspend - main memory is still ON * @sep_pci_resume: resume - main meory is still ON */ static const struct dev_pm_ops sep_pm = { .runtime_resume = sep_pm_runtime_resume, .runtime_suspend = sep_pm_runtime_suspend, .resume = sep_pci_resume, .suspend = sep_pci_suspend, }; #endif /* SEP_ENABLE_RUNTIME_PM */ /** * sep_pci_driver - registers this device with pci subsystem * @name: name identifier for this driver * @sep_pci_id_tbl: pointer to struct pci_device_id table * @sep_probe: pointer to probe function in PCI driver * @sep_remove: pointer to remove function in PCI driver */ static struct pci_driver sep_pci_driver = { #ifdef SEP_ENABLE_RUNTIME_PM .driver = { .pm = &sep_pm, }, #endif .name = "sep_sec_driver", .id_table = sep_pci_id_tbl, .probe = sep_probe, .remove = sep_remove }; /** * sep_init - init function * * Module load time. Register the PCI device driver. */ static int __init sep_init(void) { return pci_register_driver(&sep_pci_driver); } /** * sep_exit - called to unload driver * * Unregister the driver The device will perform all the cleanup required. */ static void __exit sep_exit(void) { pci_unregister_driver(&sep_pci_driver); } module_init(sep_init); module_exit(sep_exit); MODULE_LICENSE("GPL");
gpl-2.0
Team-Blackout/Blackout_Jewel_plus
sound/sparc/dbri.c
5034
80241
/* * Driver for DBRI sound chip found on Sparcs. * Copyright (C) 2004, 2005 Martin Habets (mhabets@users.sourceforge.net) * * Converted to ring buffered version by Krzysztof Helt (krzysztof.h1@wp.pl) * * Based entirely upon drivers/sbus/audio/dbri.c which is: * Copyright (C) 1997 Rudolf Koenig (rfkoenig@immd4.informatik.uni-erlangen.de) * Copyright (C) 1998, 1999 Brent Baccala (baccala@freesoft.org) * * This is the low level driver for the DBRI & MMCODEC duo used for ISDN & AUDIO * on Sun SPARCStation 10, 20, LX and Voyager models. * * - DBRI: AT&T T5900FX Dual Basic Rates ISDN Interface. It is a 32 channel * data time multiplexer with ISDN support (aka T7259) * Interfaces: SBus,ISDN NT & TE, CHI, 4 bits parallel. * CHI: (spelled ki) Concentration Highway Interface (AT&T or Intel bus ?). * Documentation: * - "STP 4000SBus Dual Basic Rate ISDN (DBRI) Transceiver" from * Sparc Technology Business (courtesy of Sun Support) * - Data sheet of the T7903, a newer but very similar ISA bus equivalent * available from the Lucent (formerly AT&T microelectronics) home * page. * - http://www.freesoft.org/Linux/DBRI/ * - MMCODEC: Crystal Semiconductor CS4215 16 bit Multimedia Audio Codec * Interfaces: CHI, Audio In & Out, 2 bits parallel * Documentation: from the Crystal Semiconductor home page. * * The DBRI is a 32 pipe machine, each pipe can transfer some bits between * memory and a serial device (long pipes, no. 0-15) or between two serial * devices (short pipes, no. 16-31), or simply send a fixed data to a serial * device (short pipes). * A timeslot defines the bit-offset and no. of bits read from a serial device. * The timeslots are linked to 6 circular lists, one for each direction for * each serial device (NT,TE,CHI). A timeslot is associated to 1 or 2 pipes * (the second one is a monitor/tee pipe, valid only for serial input). * * The mmcodec is connected via the CHI bus and needs the data & some * parameters (volume, output selection) time multiplexed in 8 byte * chunks. It also has a control mode, which serves for audio format setting. * * Looking at the CS4215 data sheet it is easy to set up 2 or 4 codecs on * the same CHI bus, so I thought perhaps it is possible to use the on-board * & the speakerbox codec simultaneously, giving 2 (not very independent :-) * audio devices. But the SUN HW group decided against it, at least on my * LX the speakerbox connector has at least 1 pin missing and 1 wrongly * connected. * * I've tried to stick to the following function naming conventions: * snd_* ALSA stuff * cs4215_* CS4215 codec specific stuff * dbri_* DBRI high-level stuff * other DBRI low-level stuff */ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/control.h> #include <sound/initval.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/atomic.h> #include <linux/module.h> MODULE_AUTHOR("Rudolf Koenig, Brent Baccala and Martin Habets"); MODULE_DESCRIPTION("Sun DBRI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Sun,DBRI}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ /* Enable this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Sun DBRI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Sun DBRI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Sun DBRI soundcard."); #undef DBRI_DEBUG #define D_INT (1<<0) #define D_GEN (1<<1) #define D_CMD (1<<2) #define D_MM (1<<3) #define D_USR (1<<4) #define D_DESC (1<<5) static int dbri_debug; module_param(dbri_debug, int, 0644); MODULE_PARM_DESC(dbri_debug, "Debug value for Sun DBRI soundcard."); #ifdef DBRI_DEBUG static char *cmds[] = { "WAIT", "PAUSE", "JUMP", "IIQ", "REX", "SDP", "CDP", "DTS", "SSP", "CHI", "NT", "TE", "CDEC", "TEST", "CDM", "RESRV" }; #define dprintk(a, x...) if (dbri_debug & a) printk(KERN_DEBUG x) #else #define dprintk(a, x...) do { } while (0) #endif /* DBRI_DEBUG */ #define DBRI_CMD(cmd, intr, value) ((cmd << 28) | \ (intr << 27) | \ value) /*************************************************************************** CS4215 specific definitions and structures ****************************************************************************/ struct cs4215 { __u8 data[4]; /* Data mode: Time slots 5-8 */ __u8 ctrl[4]; /* Ctrl mode: Time slots 1-4 */ __u8 onboard; __u8 offset; /* Bit offset from frame sync to time slot 1 */ volatile __u32 status; volatile __u32 version; __u8 precision; /* In bits, either 8 or 16 */ __u8 channels; /* 1 or 2 */ }; /* * Control mode first */ /* Time Slot 1, Status register */ #define CS4215_CLB (1<<2) /* Control Latch Bit */ #define CS4215_OLB (1<<3) /* 1: line: 2.0V, speaker 4V */ /* 0: line: 2.8V, speaker 8V */ #define CS4215_MLB (1<<4) /* 1: Microphone: 20dB gain disabled */ #define CS4215_RSRVD_1 (1<<5) /* Time Slot 2, Data Format Register */ #define CS4215_DFR_LINEAR16 0 #define CS4215_DFR_ULAW 1 #define CS4215_DFR_ALAW 2 #define CS4215_DFR_LINEAR8 3 #define CS4215_DFR_STEREO (1<<2) static struct { unsigned short freq; unsigned char xtal; unsigned char csval; } CS4215_FREQ[] = { { 8000, (1 << 4), (0 << 3) }, { 16000, (1 << 4), (1 << 3) }, { 27429, (1 << 4), (2 << 3) }, /* Actually 24428.57 */ { 32000, (1 << 4), (3 << 3) }, /* { NA, (1 << 4), (4 << 3) }, */ /* { NA, (1 << 4), (5 << 3) }, */ { 48000, (1 << 4), (6 << 3) }, { 9600, (1 << 4), (7 << 3) }, { 5512, (2 << 4), (0 << 3) }, /* Actually 5512.5 */ { 11025, (2 << 4), (1 << 3) }, { 18900, (2 << 4), (2 << 3) }, { 22050, (2 << 4), (3 << 3) }, { 37800, (2 << 4), (4 << 3) }, { 44100, (2 << 4), (5 << 3) }, { 33075, (2 << 4), (6 << 3) }, { 6615, (2 << 4), (7 << 3) }, { 0, 0, 0} }; #define CS4215_HPF (1<<7) /* High Pass Filter, 1: Enabled */ #define CS4215_12_MASK 0xfcbf /* Mask off reserved bits in slot 1 & 2 */ /* Time Slot 3, Serial Port Control register */ #define CS4215_XEN (1<<0) /* 0: Enable serial output */ #define CS4215_XCLK (1<<1) /* 1: Master mode: Generate SCLK */ #define CS4215_BSEL_64 (0<<2) /* Bitrate: 64 bits per frame */ #define CS4215_BSEL_128 (1<<2) #define CS4215_BSEL_256 (2<<2) #define CS4215_MCK_MAST (0<<4) /* Master clock */ #define CS4215_MCK_XTL1 (1<<4) /* 24.576 MHz clock source */ #define CS4215_MCK_XTL2 (2<<4) /* 16.9344 MHz clock source */ #define CS4215_MCK_CLK1 (3<<4) /* Clockin, 256 x Fs */ #define CS4215_MCK_CLK2 (4<<4) /* Clockin, see DFR */ /* Time Slot 4, Test Register */ #define CS4215_DAD (1<<0) /* 0:Digital-Dig loop, 1:Dig-Analog-Dig loop */ #define CS4215_ENL (1<<1) /* Enable Loopback Testing */ /* Time Slot 5, Parallel Port Register */ /* Read only here and the same as the in data mode */ /* Time Slot 6, Reserved */ /* Time Slot 7, Version Register */ #define CS4215_VERSION_MASK 0xf /* Known versions 0/C, 1/D, 2/E */ /* Time Slot 8, Reserved */ /* * Data mode */ /* Time Slot 1-2: Left Channel Data, 2-3: Right Channel Data */ /* Time Slot 5, Output Setting */ #define CS4215_LO(v) v /* Left Output Attenuation 0x3f: -94.5 dB */ #define CS4215_LE (1<<6) /* Line Out Enable */ #define CS4215_HE (1<<7) /* Headphone Enable */ /* Time Slot 6, Output Setting */ #define CS4215_RO(v) v /* Right Output Attenuation 0x3f: -94.5 dB */ #define CS4215_SE (1<<6) /* Speaker Enable */ #define CS4215_ADI (1<<7) /* A/D Data Invalid: Busy in calibration */ /* Time Slot 7, Input Setting */ #define CS4215_LG(v) v /* Left Gain Setting 0xf: 22.5 dB */ #define CS4215_IS (1<<4) /* Input Select: 1=Microphone, 0=Line */ #define CS4215_OVR (1<<5) /* 1: Over range condition occurred */ #define CS4215_PIO0 (1<<6) /* Parallel I/O 0 */ #define CS4215_PIO1 (1<<7) /* Time Slot 8, Input Setting */ #define CS4215_RG(v) v /* Right Gain Setting 0xf: 22.5 dB */ #define CS4215_MA(v) (v<<4) /* Monitor Path Attenuation 0xf: mute */ /*************************************************************************** DBRI specific definitions and structures ****************************************************************************/ /* DBRI main registers */ #define REG0 0x00 /* Status and Control */ #define REG1 0x04 /* Mode and Interrupt */ #define REG2 0x08 /* Parallel IO */ #define REG3 0x0c /* Test */ #define REG8 0x20 /* Command Queue Pointer */ #define REG9 0x24 /* Interrupt Queue Pointer */ #define DBRI_NO_CMDS 64 #define DBRI_INT_BLK 64 #define DBRI_NO_DESCS 64 #define DBRI_NO_PIPES 32 #define DBRI_MAX_PIPE (DBRI_NO_PIPES - 1) #define DBRI_REC 0 #define DBRI_PLAY 1 #define DBRI_NO_STREAMS 2 /* One transmit/receive descriptor */ /* When ba != 0 descriptor is used */ struct dbri_mem { volatile __u32 word1; __u32 ba; /* Transmit/Receive Buffer Address */ __u32 nda; /* Next Descriptor Address */ volatile __u32 word4; }; /* This structure is in a DMA region where it can accessed by both * the CPU and the DBRI */ struct dbri_dma { s32 cmd[DBRI_NO_CMDS]; /* Place for commands */ volatile s32 intr[DBRI_INT_BLK]; /* Interrupt field */ struct dbri_mem desc[DBRI_NO_DESCS]; /* Xmit/receive descriptors */ }; #define dbri_dma_off(member, elem) \ ((u32)(unsigned long) \ (&(((struct dbri_dma *)0)->member[elem]))) enum in_or_out { PIPEinput, PIPEoutput }; struct dbri_pipe { u32 sdp; /* SDP command word */ int nextpipe; /* Next pipe in linked list */ int length; /* Length of timeslot (bits) */ int first_desc; /* Index of first descriptor */ int desc; /* Index of active descriptor */ volatile __u32 *recv_fixed_ptr; /* Ptr to receive fixed data */ }; /* Per stream (playback or record) information */ struct dbri_streaminfo { struct snd_pcm_substream *substream; u32 dvma_buffer; /* Device view of ALSA DMA buffer */ int size; /* Size of DMA buffer */ size_t offset; /* offset in user buffer */ int pipe; /* Data pipe used */ int left_gain; /* mixer elements */ int right_gain; }; /* This structure holds the information for both chips (DBRI & CS4215) */ struct snd_dbri { int regs_size, irq; /* Needed for unload */ struct platform_device *op; /* OF device info */ spinlock_t lock; struct dbri_dma *dma; /* Pointer to our DMA block */ u32 dma_dvma; /* DBRI visible DMA address */ void __iomem *regs; /* dbri HW regs */ int dbri_irqp; /* intr queue pointer */ struct dbri_pipe pipes[DBRI_NO_PIPES]; /* DBRI's 32 data pipes */ int next_desc[DBRI_NO_DESCS]; /* Index of next desc, or -1 */ spinlock_t cmdlock; /* Protects cmd queue accesses */ s32 *cmdptr; /* Pointer to the last queued cmd */ int chi_bpf; struct cs4215 mm; /* mmcodec special info */ /* per stream (playback/record) info */ struct dbri_streaminfo stream_info[DBRI_NO_STREAMS]; }; #define DBRI_MAX_VOLUME 63 /* Output volume */ #define DBRI_MAX_GAIN 15 /* Input gain */ /* DBRI Reg0 - Status Control Register - defines. (Page 17) */ #define D_P (1<<15) /* Program command & queue pointer valid */ #define D_G (1<<14) /* Allow 4-Word SBus Burst */ #define D_S (1<<13) /* Allow 16-Word SBus Burst */ #define D_E (1<<12) /* Allow 8-Word SBus Burst */ #define D_X (1<<7) /* Sanity Timer Disable */ #define D_T (1<<6) /* Permit activation of the TE interface */ #define D_N (1<<5) /* Permit activation of the NT interface */ #define D_C (1<<4) /* Permit activation of the CHI interface */ #define D_F (1<<3) /* Force Sanity Timer Time-Out */ #define D_D (1<<2) /* Disable Master Mode */ #define D_H (1<<1) /* Halt for Analysis */ #define D_R (1<<0) /* Soft Reset */ /* DBRI Reg1 - Mode and Interrupt Register - defines. (Page 18) */ #define D_LITTLE_END (1<<8) /* Byte Order */ #define D_BIG_END (0<<8) /* Byte Order */ #define D_MRR (1<<4) /* Multiple Error Ack on SBus (read only) */ #define D_MLE (1<<3) /* Multiple Late Error on SBus (read only) */ #define D_LBG (1<<2) /* Lost Bus Grant on SBus (read only) */ #define D_MBE (1<<1) /* Burst Error on SBus (read only) */ #define D_IR (1<<0) /* Interrupt Indicator (read only) */ /* DBRI Reg2 - Parallel IO Register - defines. (Page 18) */ #define D_ENPIO3 (1<<7) /* Enable Pin 3 */ #define D_ENPIO2 (1<<6) /* Enable Pin 2 */ #define D_ENPIO1 (1<<5) /* Enable Pin 1 */ #define D_ENPIO0 (1<<4) /* Enable Pin 0 */ #define D_ENPIO (0xf0) /* Enable all the pins */ #define D_PIO3 (1<<3) /* Pin 3: 1: Data mode, 0: Ctrl mode */ #define D_PIO2 (1<<2) /* Pin 2: 1: Onboard PDN */ #define D_PIO1 (1<<1) /* Pin 1: 0: Reset */ #define D_PIO0 (1<<0) /* Pin 0: 1: Speakerbox PDN */ /* DBRI Commands (Page 20) */ #define D_WAIT 0x0 /* Stop execution */ #define D_PAUSE 0x1 /* Flush long pipes */ #define D_JUMP 0x2 /* New command queue */ #define D_IIQ 0x3 /* Initialize Interrupt Queue */ #define D_REX 0x4 /* Report command execution via interrupt */ #define D_SDP 0x5 /* Setup Data Pipe */ #define D_CDP 0x6 /* Continue Data Pipe (reread NULL Pointer) */ #define D_DTS 0x7 /* Define Time Slot */ #define D_SSP 0x8 /* Set short Data Pipe */ #define D_CHI 0x9 /* Set CHI Global Mode */ #define D_NT 0xa /* NT Command */ #define D_TE 0xb /* TE Command */ #define D_CDEC 0xc /* Codec setup */ #define D_TEST 0xd /* No comment */ #define D_CDM 0xe /* CHI Data mode command */ /* Special bits for some commands */ #define D_PIPE(v) ((v)<<0) /* Pipe No.: 0-15 long, 16-21 short */ /* Setup Data Pipe */ /* IRM */ #define D_SDP_2SAME (1<<18) /* Report 2nd time in a row value received */ #define D_SDP_CHANGE (2<<18) /* Report any changes */ #define D_SDP_EVERY (3<<18) /* Report any changes */ #define D_SDP_EOL (1<<17) /* EOL interrupt enable */ #define D_SDP_IDLE (1<<16) /* HDLC idle interrupt enable */ /* Pipe data MODE */ #define D_SDP_MEM (0<<13) /* To/from memory */ #define D_SDP_HDLC (2<<13) #define D_SDP_HDLC_D (3<<13) /* D Channel (prio control) */ #define D_SDP_SER (4<<13) /* Serial to serial */ #define D_SDP_FIXED (6<<13) /* Short only */ #define D_SDP_MODE(v) ((v)&(7<<13)) #define D_SDP_TO_SER (1<<12) /* Direction */ #define D_SDP_FROM_SER (0<<12) /* Direction */ #define D_SDP_MSB (1<<11) /* Bit order within Byte */ #define D_SDP_LSB (0<<11) /* Bit order within Byte */ #define D_SDP_P (1<<10) /* Pointer Valid */ #define D_SDP_A (1<<8) /* Abort */ #define D_SDP_C (1<<7) /* Clear */ /* Define Time Slot */ #define D_DTS_VI (1<<17) /* Valid Input Time-Slot Descriptor */ #define D_DTS_VO (1<<16) /* Valid Output Time-Slot Descriptor */ #define D_DTS_INS (1<<15) /* Insert Time Slot */ #define D_DTS_DEL (0<<15) /* Delete Time Slot */ #define D_DTS_PRVIN(v) ((v)<<10) /* Previous In Pipe */ #define D_DTS_PRVOUT(v) ((v)<<5) /* Previous Out Pipe */ /* Time Slot defines */ #define D_TS_LEN(v) ((v)<<24) /* Number of bits in this time slot */ #define D_TS_CYCLE(v) ((v)<<14) /* Bit Count at start of TS */ #define D_TS_DI (1<<13) /* Data Invert */ #define D_TS_1CHANNEL (0<<10) /* Single Channel / Normal mode */ #define D_TS_MONITOR (2<<10) /* Monitor pipe */ #define D_TS_NONCONTIG (3<<10) /* Non contiguous mode */ #define D_TS_ANCHOR (7<<10) /* Starting short pipes */ #define D_TS_MON(v) ((v)<<5) /* Monitor Pipe */ #define D_TS_NEXT(v) ((v)<<0) /* Pipe no.: 0-15 long, 16-21 short */ /* Concentration Highway Interface Modes */ #define D_CHI_CHICM(v) ((v)<<16) /* Clock mode */ #define D_CHI_IR (1<<15) /* Immediate Interrupt Report */ #define D_CHI_EN (1<<14) /* CHIL Interrupt enabled */ #define D_CHI_OD (1<<13) /* Open Drain Enable */ #define D_CHI_FE (1<<12) /* Sample CHIFS on Rising Frame Edge */ #define D_CHI_FD (1<<11) /* Frame Drive */ #define D_CHI_BPF(v) ((v)<<0) /* Bits per Frame */ /* NT: These are here for completeness */ #define D_NT_FBIT (1<<17) /* Frame Bit */ #define D_NT_NBF (1<<16) /* Number of bad frames to loose framing */ #define D_NT_IRM_IMM (1<<15) /* Interrupt Report & Mask: Immediate */ #define D_NT_IRM_EN (1<<14) /* Interrupt Report & Mask: Enable */ #define D_NT_ISNT (1<<13) /* Configure interface as NT */ #define D_NT_FT (1<<12) /* Fixed Timing */ #define D_NT_EZ (1<<11) /* Echo Channel is Zeros */ #define D_NT_IFA (1<<10) /* Inhibit Final Activation */ #define D_NT_ACT (1<<9) /* Activate Interface */ #define D_NT_MFE (1<<8) /* Multiframe Enable */ #define D_NT_RLB(v) ((v)<<5) /* Remote Loopback */ #define D_NT_LLB(v) ((v)<<2) /* Local Loopback */ #define D_NT_FACT (1<<1) /* Force Activation */ #define D_NT_ABV (1<<0) /* Activate Bipolar Violation */ /* Codec Setup */ #define D_CDEC_CK(v) ((v)<<24) /* Clock Select */ #define D_CDEC_FED(v) ((v)<<12) /* FSCOD Falling Edge Delay */ #define D_CDEC_RED(v) ((v)<<0) /* FSCOD Rising Edge Delay */ /* Test */ #define D_TEST_RAM(v) ((v)<<16) /* RAM Pointer */ #define D_TEST_SIZE(v) ((v)<<11) /* */ #define D_TEST_ROMONOFF 0x5 /* Toggle ROM opcode monitor on/off */ #define D_TEST_PROC 0x6 /* Microprocessor test */ #define D_TEST_SER 0x7 /* Serial-Controller test */ #define D_TEST_RAMREAD 0x8 /* Copy from Ram to system memory */ #define D_TEST_RAMWRITE 0x9 /* Copy into Ram from system memory */ #define D_TEST_RAMBIST 0xa /* RAM Built-In Self Test */ #define D_TEST_MCBIST 0xb /* Microcontroller Built-In Self Test */ #define D_TEST_DUMP 0xe /* ROM Dump */ /* CHI Data Mode */ #define D_CDM_THI (1 << 8) /* Transmit Data on CHIDR Pin */ #define D_CDM_RHI (1 << 7) /* Receive Data on CHIDX Pin */ #define D_CDM_RCE (1 << 6) /* Receive on Rising Edge of CHICK */ #define D_CDM_XCE (1 << 2) /* Transmit Data on Rising Edge of CHICK */ #define D_CDM_XEN (1 << 1) /* Transmit Highway Enable */ #define D_CDM_REN (1 << 0) /* Receive Highway Enable */ /* The Interrupts */ #define D_INTR_BRDY 1 /* Buffer Ready for processing */ #define D_INTR_MINT 2 /* Marked Interrupt in RD/TD */ #define D_INTR_IBEG 3 /* Flag to idle transition detected (HDLC) */ #define D_INTR_IEND 4 /* Idle to flag transition detected (HDLC) */ #define D_INTR_EOL 5 /* End of List */ #define D_INTR_CMDI 6 /* Command has bean read */ #define D_INTR_XCMP 8 /* Transmission of frame complete */ #define D_INTR_SBRI 9 /* BRI status change info */ #define D_INTR_FXDT 10 /* Fixed data change */ #define D_INTR_CHIL 11 /* CHI lost frame sync (channel 36 only) */ #define D_INTR_COLL 11 /* Unrecoverable D-Channel collision */ #define D_INTR_DBYT 12 /* Dropped by frame slip */ #define D_INTR_RBYT 13 /* Repeated by frame slip */ #define D_INTR_LINT 14 /* Lost Interrupt */ #define D_INTR_UNDR 15 /* DMA underrun */ #define D_INTR_TE 32 #define D_INTR_NT 34 #define D_INTR_CHI 36 #define D_INTR_CMD 38 #define D_INTR_GETCHAN(v) (((v) >> 24) & 0x3f) #define D_INTR_GETCODE(v) (((v) >> 20) & 0xf) #define D_INTR_GETCMD(v) (((v) >> 16) & 0xf) #define D_INTR_GETVAL(v) ((v) & 0xffff) #define D_INTR_GETRVAL(v) ((v) & 0xfffff) #define D_P_0 0 /* TE receive anchor */ #define D_P_1 1 /* TE transmit anchor */ #define D_P_2 2 /* NT transmit anchor */ #define D_P_3 3 /* NT receive anchor */ #define D_P_4 4 /* CHI send data */ #define D_P_5 5 /* CHI receive data */ #define D_P_6 6 /* */ #define D_P_7 7 /* */ #define D_P_8 8 /* */ #define D_P_9 9 /* */ #define D_P_10 10 /* */ #define D_P_11 11 /* */ #define D_P_12 12 /* */ #define D_P_13 13 /* */ #define D_P_14 14 /* */ #define D_P_15 15 /* */ #define D_P_16 16 /* CHI anchor pipe */ #define D_P_17 17 /* CHI send */ #define D_P_18 18 /* CHI receive */ #define D_P_19 19 /* CHI receive */ #define D_P_20 20 /* CHI receive */ #define D_P_21 21 /* */ #define D_P_22 22 /* */ #define D_P_23 23 /* */ #define D_P_24 24 /* */ #define D_P_25 25 /* */ #define D_P_26 26 /* */ #define D_P_27 27 /* */ #define D_P_28 28 /* */ #define D_P_29 29 /* */ #define D_P_30 30 /* */ #define D_P_31 31 /* */ /* Transmit descriptor defines */ #define DBRI_TD_F (1 << 31) /* End of Frame */ #define DBRI_TD_D (1 << 30) /* Do not append CRC */ #define DBRI_TD_CNT(v) ((v) << 16) /* Number of valid bytes in the buffer */ #define DBRI_TD_B (1 << 15) /* Final interrupt */ #define DBRI_TD_M (1 << 14) /* Marker interrupt */ #define DBRI_TD_I (1 << 13) /* Transmit Idle Characters */ #define DBRI_TD_FCNT(v) (v) /* Flag Count */ #define DBRI_TD_UNR (1 << 3) /* Underrun: transmitter is out of data */ #define DBRI_TD_ABT (1 << 2) /* Abort: frame aborted */ #define DBRI_TD_TBC (1 << 0) /* Transmit buffer Complete */ #define DBRI_TD_STATUS(v) ((v) & 0xff) /* Transmit status */ /* Maximum buffer size per TD: almost 8KB */ #define DBRI_TD_MAXCNT ((1 << 13) - 4) /* Receive descriptor defines */ #define DBRI_RD_F (1 << 31) /* End of Frame */ #define DBRI_RD_C (1 << 30) /* Completed buffer */ #define DBRI_RD_B (1 << 15) /* Final interrupt */ #define DBRI_RD_M (1 << 14) /* Marker interrupt */ #define DBRI_RD_BCNT(v) (v) /* Buffer size */ #define DBRI_RD_CRC (1 << 7) /* 0: CRC is correct */ #define DBRI_RD_BBC (1 << 6) /* 1: Bad Byte received */ #define DBRI_RD_ABT (1 << 5) /* Abort: frame aborted */ #define DBRI_RD_OVRN (1 << 3) /* Overrun: data lost */ #define DBRI_RD_STATUS(v) ((v) & 0xff) /* Receive status */ #define DBRI_RD_CNT(v) (((v) >> 16) & 0x1fff) /* Valid bytes in the buffer */ /* stream_info[] access */ /* Translate the ALSA direction into the array index */ #define DBRI_STREAMNO(substream) \ (substream->stream == \ SNDRV_PCM_STREAM_PLAYBACK ? DBRI_PLAY: DBRI_REC) /* Return a pointer to dbri_streaminfo */ #define DBRI_STREAM(dbri, substream) \ &dbri->stream_info[DBRI_STREAMNO(substream)] /* * Short data pipes transmit LSB first. The CS4215 receives MSB first. Grrr. * So we have to reverse the bits. Note: not all bit lengths are supported */ static __u32 reverse_bytes(__u32 b, int len) { switch (len) { case 32: b = ((b & 0xffff0000) >> 16) | ((b & 0x0000ffff) << 16); case 16: b = ((b & 0xff00ff00) >> 8) | ((b & 0x00ff00ff) << 8); case 8: b = ((b & 0xf0f0f0f0) >> 4) | ((b & 0x0f0f0f0f) << 4); case 4: b = ((b & 0xcccccccc) >> 2) | ((b & 0x33333333) << 2); case 2: b = ((b & 0xaaaaaaaa) >> 1) | ((b & 0x55555555) << 1); case 1: case 0: break; default: printk(KERN_ERR "DBRI reverse_bytes: unsupported length\n"); }; return b; } /* **************************************************************************** ************** DBRI initialization and command synchronization ************* **************************************************************************** Commands are sent to the DBRI by building a list of them in memory, then writing the address of the first list item to DBRI register 8. The list is terminated with a WAIT command, which generates a CPU interrupt to signal completion. Since the DBRI can run in parallel with the CPU, several means of synchronization present themselves. The method implemented here uses the dbri_cmdwait() to wait for execution of batch of sent commands. A circular command buffer is used here. A new command is being added while another can be executed. The scheme works by adding two WAIT commands after each sent batch of commands. When the next batch is prepared it is added after the WAIT commands then the WAITs are replaced with single JUMP command to the new batch. The the DBRI is forced to reread the last WAIT command (replaced by the JUMP by then). If the DBRI is still executing previous commands the request to reread the WAIT command is ignored. Every time a routine wants to write commands to the DBRI, it must first call dbri_cmdlock() and get pointer to a free space in dbri->dma->cmd buffer. After this, the commands can be written to the buffer, and dbri_cmdsend() is called with the final pointer value to send them to the DBRI. */ #define MAXLOOPS 20 /* * Wait for the current command string to execute */ static void dbri_cmdwait(struct snd_dbri *dbri) { int maxloops = MAXLOOPS; unsigned long flags; /* Delay if previous commands are still being processed */ spin_lock_irqsave(&dbri->lock, flags); while ((--maxloops) > 0 && (sbus_readl(dbri->regs + REG0) & D_P)) { spin_unlock_irqrestore(&dbri->lock, flags); msleep_interruptible(1); spin_lock_irqsave(&dbri->lock, flags); } spin_unlock_irqrestore(&dbri->lock, flags); if (maxloops == 0) printk(KERN_ERR "DBRI: Chip never completed command buffer\n"); else dprintk(D_CMD, "Chip completed command buffer (%d)\n", MAXLOOPS - maxloops - 1); } /* * Lock the command queue and return pointer to space for len cmd words * It locks the cmdlock spinlock. */ static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len) { /* Space for 2 WAIT cmds (replaced later by 1 JUMP cmd) */ len += 2; spin_lock(&dbri->cmdlock); if (dbri->cmdptr - dbri->dma->cmd + len < DBRI_NO_CMDS - 2) return dbri->cmdptr + 2; else if (len < sbus_readl(dbri->regs + REG8) - dbri->dma_dvma) return dbri->dma->cmd; else printk(KERN_ERR "DBRI: no space for commands."); return NULL; } /* * Send prepared cmd string. It works by writing a JUMP cmd into * the last WAIT cmd and force DBRI to reread the cmd. * The JUMP cmd points to the new cmd string. * It also releases the cmdlock spinlock. * * Lock must be held before calling this. */ static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len) { s32 tmp, addr; static int wait_id = 0; wait_id++; wait_id &= 0xffff; /* restrict it to a 16 bit counter. */ *(cmd) = DBRI_CMD(D_WAIT, 1, wait_id); *(cmd+1) = DBRI_CMD(D_WAIT, 1, wait_id); /* Replace the last command with JUMP */ addr = dbri->dma_dvma + (cmd - len - dbri->dma->cmd) * sizeof(s32); *(dbri->cmdptr+1) = addr; *(dbri->cmdptr) = DBRI_CMD(D_JUMP, 0, 0); #ifdef DBRI_DEBUG if (cmd > dbri->cmdptr) { s32 *ptr; for (ptr = dbri->cmdptr; ptr < cmd+2; ptr++) dprintk(D_CMD, "cmd: %lx:%08x\n", (unsigned long)ptr, *ptr); } else { s32 *ptr = dbri->cmdptr; dprintk(D_CMD, "cmd: %lx:%08x\n", (unsigned long)ptr, *ptr); ptr++; dprintk(D_CMD, "cmd: %lx:%08x\n", (unsigned long)ptr, *ptr); for (ptr = dbri->dma->cmd; ptr < cmd+2; ptr++) dprintk(D_CMD, "cmd: %lx:%08x\n", (unsigned long)ptr, *ptr); } #endif /* Reread the last command */ tmp = sbus_readl(dbri->regs + REG0); tmp |= D_P; sbus_writel(tmp, dbri->regs + REG0); dbri->cmdptr = cmd; spin_unlock(&dbri->cmdlock); } /* Lock must be held when calling this */ static void dbri_reset(struct snd_dbri *dbri) { int i; u32 tmp; dprintk(D_GEN, "reset 0:%x 2:%x 8:%x 9:%x\n", sbus_readl(dbri->regs + REG0), sbus_readl(dbri->regs + REG2), sbus_readl(dbri->regs + REG8), sbus_readl(dbri->regs + REG9)); sbus_writel(D_R, dbri->regs + REG0); /* Soft Reset */ for (i = 0; (sbus_readl(dbri->regs + REG0) & D_R) && i < 64; i++) udelay(10); /* A brute approach - DBRI falls back to working burst size by itself * On SS20 D_S does not work, so do not try so high. */ tmp = sbus_readl(dbri->regs + REG0); tmp |= D_G | D_E; tmp &= ~D_S; sbus_writel(tmp, dbri->regs + REG0); } /* Lock must not be held before calling this */ static void __devinit dbri_initialize(struct snd_dbri *dbri) { s32 *cmd; u32 dma_addr; unsigned long flags; int n; spin_lock_irqsave(&dbri->lock, flags); dbri_reset(dbri); /* Initialize pipes */ for (n = 0; n < DBRI_NO_PIPES; n++) dbri->pipes[n].desc = dbri->pipes[n].first_desc = -1; spin_lock_init(&dbri->cmdlock); /* * Initialize the interrupt ring buffer. */ dma_addr = dbri->dma_dvma + dbri_dma_off(intr, 0); dbri->dma->intr[0] = dma_addr; dbri->dbri_irqp = 1; /* * Set up the interrupt queue */ spin_lock(&dbri->cmdlock); cmd = dbri->cmdptr = dbri->dma->cmd; *(cmd++) = DBRI_CMD(D_IIQ, 0, 0); *(cmd++) = dma_addr; *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); dbri->cmdptr = cmd; *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); dma_addr = dbri->dma_dvma + dbri_dma_off(cmd, 0); sbus_writel(dma_addr, dbri->regs + REG8); spin_unlock(&dbri->cmdlock); spin_unlock_irqrestore(&dbri->lock, flags); dbri_cmdwait(dbri); } /* **************************************************************************** ************************** DBRI data pipe management *********************** **************************************************************************** While DBRI control functions use the command and interrupt buffers, the main data path takes the form of data pipes, which can be short (command and interrupt driven), or long (attached to DMA buffers). These functions provide a rudimentary means of setting up and managing the DBRI's pipes, but the calling functions have to make sure they respect the pipes' linked list ordering, among other things. The transmit and receive functions here interface closely with the transmit and receive interrupt code. */ static inline int pipe_active(struct snd_dbri *dbri, int pipe) { return ((pipe >= 0) && (dbri->pipes[pipe].desc != -1)); } /* reset_pipe(dbri, pipe) * * Called on an in-use pipe to clear anything being transmitted or received * Lock must be held before calling this. */ static void reset_pipe(struct snd_dbri *dbri, int pipe) { int sdp; int desc; s32 *cmd; if (pipe < 0 || pipe > DBRI_MAX_PIPE) { printk(KERN_ERR "DBRI: reset_pipe called with " "illegal pipe number\n"); return; } sdp = dbri->pipes[pipe].sdp; if (sdp == 0) { printk(KERN_ERR "DBRI: reset_pipe called " "on uninitialized pipe\n"); return; } cmd = dbri_cmdlock(dbri, 3); *(cmd++) = DBRI_CMD(D_SDP, 0, sdp | D_SDP_C | D_SDP_P); *(cmd++) = 0; *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); dbri_cmdsend(dbri, cmd, 3); desc = dbri->pipes[pipe].first_desc; if (desc >= 0) do { dbri->dma->desc[desc].ba = 0; dbri->dma->desc[desc].nda = 0; desc = dbri->next_desc[desc]; } while (desc != -1 && desc != dbri->pipes[pipe].first_desc); dbri->pipes[pipe].desc = -1; dbri->pipes[pipe].first_desc = -1; } /* * Lock must be held before calling this. */ static void setup_pipe(struct snd_dbri *dbri, int pipe, int sdp) { if (pipe < 0 || pipe > DBRI_MAX_PIPE) { printk(KERN_ERR "DBRI: setup_pipe called " "with illegal pipe number\n"); return; } if ((sdp & 0xf800) != sdp) { printk(KERN_ERR "DBRI: setup_pipe called " "with strange SDP value\n"); /* sdp &= 0xf800; */ } /* If this is a fixed receive pipe, arrange for an interrupt * every time its data changes */ if (D_SDP_MODE(sdp) == D_SDP_FIXED && !(sdp & D_SDP_TO_SER)) sdp |= D_SDP_CHANGE; sdp |= D_PIPE(pipe); dbri->pipes[pipe].sdp = sdp; dbri->pipes[pipe].desc = -1; dbri->pipes[pipe].first_desc = -1; reset_pipe(dbri, pipe); } /* * Lock must be held before calling this. */ static void link_time_slot(struct snd_dbri *dbri, int pipe, int prevpipe, int nextpipe, int length, int cycle) { s32 *cmd; int val; if (pipe < 0 || pipe > DBRI_MAX_PIPE || prevpipe < 0 || prevpipe > DBRI_MAX_PIPE || nextpipe < 0 || nextpipe > DBRI_MAX_PIPE) { printk(KERN_ERR "DBRI: link_time_slot called with illegal pipe number\n"); return; } if (dbri->pipes[pipe].sdp == 0 || dbri->pipes[prevpipe].sdp == 0 || dbri->pipes[nextpipe].sdp == 0) { printk(KERN_ERR "DBRI: link_time_slot called " "on uninitialized pipe\n"); return; } dbri->pipes[prevpipe].nextpipe = pipe; dbri->pipes[pipe].nextpipe = nextpipe; dbri->pipes[pipe].length = length; cmd = dbri_cmdlock(dbri, 4); if (dbri->pipes[pipe].sdp & D_SDP_TO_SER) { /* Deal with CHI special case: * "If transmission on edges 0 or 1 is desired, then cycle n * (where n = # of bit times per frame...) must be used." * - DBRI data sheet, page 11 */ if (prevpipe == 16 && cycle == 0) cycle = dbri->chi_bpf; val = D_DTS_VO | D_DTS_INS | D_DTS_PRVOUT(prevpipe) | pipe; *(cmd++) = DBRI_CMD(D_DTS, 0, val); *(cmd++) = 0; *(cmd++) = D_TS_LEN(length) | D_TS_CYCLE(cycle) | D_TS_NEXT(nextpipe); } else { val = D_DTS_VI | D_DTS_INS | D_DTS_PRVIN(prevpipe) | pipe; *(cmd++) = DBRI_CMD(D_DTS, 0, val); *(cmd++) = D_TS_LEN(length) | D_TS_CYCLE(cycle) | D_TS_NEXT(nextpipe); *(cmd++) = 0; } *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); dbri_cmdsend(dbri, cmd, 4); } #if 0 /* * Lock must be held before calling this. */ static void unlink_time_slot(struct snd_dbri *dbri, int pipe, enum in_or_out direction, int prevpipe, int nextpipe) { s32 *cmd; int val; if (pipe < 0 || pipe > DBRI_MAX_PIPE || prevpipe < 0 || prevpipe > DBRI_MAX_PIPE || nextpipe < 0 || nextpipe > DBRI_MAX_PIPE) { printk(KERN_ERR "DBRI: unlink_time_slot called with illegal pipe number\n"); return; } cmd = dbri_cmdlock(dbri, 4); if (direction == PIPEinput) { val = D_DTS_VI | D_DTS_DEL | D_DTS_PRVIN(prevpipe) | pipe; *(cmd++) = DBRI_CMD(D_DTS, 0, val); *(cmd++) = D_TS_NEXT(nextpipe); *(cmd++) = 0; } else { val = D_DTS_VO | D_DTS_DEL | D_DTS_PRVOUT(prevpipe) | pipe; *(cmd++) = DBRI_CMD(D_DTS, 0, val); *(cmd++) = 0; *(cmd++) = D_TS_NEXT(nextpipe); } *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); dbri_cmdsend(dbri, cmd, 4); } #endif /* xmit_fixed() / recv_fixed() * * Transmit/receive data on a "fixed" pipe - i.e, one whose contents are not * expected to change much, and which we don't need to buffer. * The DBRI only interrupts us when the data changes (receive pipes), * or only changes the data when this function is called (transmit pipes). * Only short pipes (numbers 16-31) can be used in fixed data mode. * * These function operate on a 32-bit field, no matter how large * the actual time slot is. The interrupt handler takes care of bit * ordering and alignment. An 8-bit time slot will always end up * in the low-order 8 bits, filled either MSB-first or LSB-first, * depending on the settings passed to setup_pipe(). * * Lock must not be held before calling it. */ static void xmit_fixed(struct snd_dbri *dbri, int pipe, unsigned int data) { s32 *cmd; unsigned long flags; if (pipe < 16 || pipe > DBRI_MAX_PIPE) { printk(KERN_ERR "DBRI: xmit_fixed: Illegal pipe number\n"); return; } if (D_SDP_MODE(dbri->pipes[pipe].sdp) == 0) { printk(KERN_ERR "DBRI: xmit_fixed: " "Uninitialized pipe %d\n", pipe); return; } if (D_SDP_MODE(dbri->pipes[pipe].sdp) != D_SDP_FIXED) { printk(KERN_ERR "DBRI: xmit_fixed: Non-fixed pipe %d\n", pipe); return; } if (!(dbri->pipes[pipe].sdp & D_SDP_TO_SER)) { printk(KERN_ERR "DBRI: xmit_fixed: Called on receive pipe %d\n", pipe); return; } /* DBRI short pipes always transmit LSB first */ if (dbri->pipes[pipe].sdp & D_SDP_MSB) data = reverse_bytes(data, dbri->pipes[pipe].length); cmd = dbri_cmdlock(dbri, 3); *(cmd++) = DBRI_CMD(D_SSP, 0, pipe); *(cmd++) = data; *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); spin_lock_irqsave(&dbri->lock, flags); dbri_cmdsend(dbri, cmd, 3); spin_unlock_irqrestore(&dbri->lock, flags); dbri_cmdwait(dbri); } static void recv_fixed(struct snd_dbri *dbri, int pipe, volatile __u32 *ptr) { if (pipe < 16 || pipe > DBRI_MAX_PIPE) { printk(KERN_ERR "DBRI: recv_fixed called with " "illegal pipe number\n"); return; } if (D_SDP_MODE(dbri->pipes[pipe].sdp) != D_SDP_FIXED) { printk(KERN_ERR "DBRI: recv_fixed called on " "non-fixed pipe %d\n", pipe); return; } if (dbri->pipes[pipe].sdp & D_SDP_TO_SER) { printk(KERN_ERR "DBRI: recv_fixed called on " "transmit pipe %d\n", pipe); return; } dbri->pipes[pipe].recv_fixed_ptr = ptr; } /* setup_descs() * * Setup transmit/receive data on a "long" pipe - i.e, one associated * with a DMA buffer. * * Only pipe numbers 0-15 can be used in this mode. * * This function takes a stream number pointing to a data buffer, * and work by building chains of descriptors which identify the * data buffers. Buffers too large for a single descriptor will * be spread across multiple descriptors. * * All descriptors create a ring buffer. * * Lock must be held before calling this. */ static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period) { struct dbri_streaminfo *info = &dbri->stream_info[streamno]; __u32 dvma_buffer; int desc; int len; int first_desc = -1; int last_desc = -1; if (info->pipe < 0 || info->pipe > 15) { printk(KERN_ERR "DBRI: setup_descs: Illegal pipe number\n"); return -2; } if (dbri->pipes[info->pipe].sdp == 0) { printk(KERN_ERR "DBRI: setup_descs: Uninitialized pipe %d\n", info->pipe); return -2; } dvma_buffer = info->dvma_buffer; len = info->size; if (streamno == DBRI_PLAY) { if (!(dbri->pipes[info->pipe].sdp & D_SDP_TO_SER)) { printk(KERN_ERR "DBRI: setup_descs: " "Called on receive pipe %d\n", info->pipe); return -2; } } else { if (dbri->pipes[info->pipe].sdp & D_SDP_TO_SER) { printk(KERN_ERR "DBRI: setup_descs: Called on transmit pipe %d\n", info->pipe); return -2; } /* Should be able to queue multiple buffers * to receive on a pipe */ if (pipe_active(dbri, info->pipe)) { printk(KERN_ERR "DBRI: recv_on_pipe: " "Called on active pipe %d\n", info->pipe); return -2; } /* Make sure buffer size is multiple of four */ len &= ~3; } /* Free descriptors if pipe has any */ desc = dbri->pipes[info->pipe].first_desc; if (desc >= 0) do { dbri->dma->desc[desc].ba = 0; dbri->dma->desc[desc].nda = 0; desc = dbri->next_desc[desc]; } while (desc != -1 && desc != dbri->pipes[info->pipe].first_desc); dbri->pipes[info->pipe].desc = -1; dbri->pipes[info->pipe].first_desc = -1; desc = 0; while (len > 0) { int mylen; for (; desc < DBRI_NO_DESCS; desc++) { if (!dbri->dma->desc[desc].ba) break; } if (desc == DBRI_NO_DESCS) { printk(KERN_ERR "DBRI: setup_descs: No descriptors\n"); return -1; } if (len > DBRI_TD_MAXCNT) mylen = DBRI_TD_MAXCNT; /* 8KB - 4 */ else mylen = len; if (mylen > period) mylen = period; dbri->next_desc[desc] = -1; dbri->dma->desc[desc].ba = dvma_buffer; dbri->dma->desc[desc].nda = 0; if (streamno == DBRI_PLAY) { dbri->dma->desc[desc].word1 = DBRI_TD_CNT(mylen); dbri->dma->desc[desc].word4 = 0; dbri->dma->desc[desc].word1 |= DBRI_TD_F | DBRI_TD_B; } else { dbri->dma->desc[desc].word1 = 0; dbri->dma->desc[desc].word4 = DBRI_RD_B | DBRI_RD_BCNT(mylen); } if (first_desc == -1) first_desc = desc; else { dbri->next_desc[last_desc] = desc; dbri->dma->desc[last_desc].nda = dbri->dma_dvma + dbri_dma_off(desc, desc); } last_desc = desc; dvma_buffer += mylen; len -= mylen; } if (first_desc == -1 || last_desc == -1) { printk(KERN_ERR "DBRI: setup_descs: " " Not enough descriptors available\n"); return -1; } dbri->dma->desc[last_desc].nda = dbri->dma_dvma + dbri_dma_off(desc, first_desc); dbri->next_desc[last_desc] = first_desc; dbri->pipes[info->pipe].first_desc = first_desc; dbri->pipes[info->pipe].desc = first_desc; #ifdef DBRI_DEBUG for (desc = first_desc; desc != -1;) { dprintk(D_DESC, "DESC %d: %08x %08x %08x %08x\n", desc, dbri->dma->desc[desc].word1, dbri->dma->desc[desc].ba, dbri->dma->desc[desc].nda, dbri->dma->desc[desc].word4); desc = dbri->next_desc[desc]; if (desc == first_desc) break; } #endif return 0; } /* **************************************************************************** ************************** DBRI - CHI interface **************************** **************************************************************************** The CHI is a four-wire (clock, frame sync, data in, data out) time-division multiplexed serial interface which the DBRI can operate in either master (give clock/frame sync) or slave (take clock/frame sync) mode. */ enum master_or_slave { CHImaster, CHIslave }; /* * Lock must not be held before calling it. */ static void reset_chi(struct snd_dbri *dbri, enum master_or_slave master_or_slave, int bits_per_frame) { s32 *cmd; int val; /* Set CHI Anchor: Pipe 16 */ cmd = dbri_cmdlock(dbri, 4); val = D_DTS_VO | D_DTS_VI | D_DTS_INS | D_DTS_PRVIN(16) | D_PIPE(16) | D_DTS_PRVOUT(16); *(cmd++) = DBRI_CMD(D_DTS, 0, val); *(cmd++) = D_TS_ANCHOR | D_TS_NEXT(16); *(cmd++) = D_TS_ANCHOR | D_TS_NEXT(16); *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); dbri_cmdsend(dbri, cmd, 4); dbri->pipes[16].sdp = 1; dbri->pipes[16].nextpipe = 16; cmd = dbri_cmdlock(dbri, 4); if (master_or_slave == CHIslave) { /* Setup DBRI for CHI Slave - receive clock, frame sync (FS) * * CHICM = 0 (slave mode, 8 kHz frame rate) * IR = give immediate CHI status interrupt * EN = give CHI status interrupt upon change */ *(cmd++) = DBRI_CMD(D_CHI, 0, D_CHI_CHICM(0)); } else { /* Setup DBRI for CHI Master - generate clock, FS * * BPF = bits per 8 kHz frame * 12.288 MHz / CHICM_divisor = clock rate * FD = 1 - drive CHIFS on rising edge of CHICK */ int clockrate = bits_per_frame * 8; int divisor = 12288 / clockrate; if (divisor > 255 || divisor * clockrate != 12288) printk(KERN_ERR "DBRI: illegal bits_per_frame " "in setup_chi\n"); *(cmd++) = DBRI_CMD(D_CHI, 0, D_CHI_CHICM(divisor) | D_CHI_FD | D_CHI_BPF(bits_per_frame)); } dbri->chi_bpf = bits_per_frame; /* CHI Data Mode * * RCE = 0 - receive on falling edge of CHICK * XCE = 1 - transmit on rising edge of CHICK * XEN = 1 - enable transmitter * REN = 1 - enable receiver */ *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); *(cmd++) = DBRI_CMD(D_CDM, 0, D_CDM_XCE | D_CDM_XEN | D_CDM_REN); *(cmd++) = DBRI_CMD(D_PAUSE, 0, 0); dbri_cmdsend(dbri, cmd, 4); } /* **************************************************************************** *********************** CS4215 audio codec management ********************** **************************************************************************** In the standard SPARC audio configuration, the CS4215 codec is attached to the DBRI via the CHI interface and few of the DBRI's PIO pins. * Lock must not be held before calling it. */ static __devinit void cs4215_setup_pipes(struct snd_dbri *dbri) { unsigned long flags; spin_lock_irqsave(&dbri->lock, flags); /* * Data mode: * Pipe 4: Send timeslots 1-4 (audio data) * Pipe 20: Send timeslots 5-8 (part of ctrl data) * Pipe 6: Receive timeslots 1-4 (audio data) * Pipe 21: Receive timeslots 6-7. We can only receive 20 bits via * interrupt, and the rest of the data (slot 5 and 8) is * not relevant for us (only for doublechecking). * * Control mode: * Pipe 17: Send timeslots 1-4 (slots 5-8 are read only) * Pipe 18: Receive timeslot 1 (clb). * Pipe 19: Receive timeslot 7 (version). */ setup_pipe(dbri, 4, D_SDP_MEM | D_SDP_TO_SER | D_SDP_MSB); setup_pipe(dbri, 20, D_SDP_FIXED | D_SDP_TO_SER | D_SDP_MSB); setup_pipe(dbri, 6, D_SDP_MEM | D_SDP_FROM_SER | D_SDP_MSB); setup_pipe(dbri, 21, D_SDP_FIXED | D_SDP_FROM_SER | D_SDP_MSB); setup_pipe(dbri, 17, D_SDP_FIXED | D_SDP_TO_SER | D_SDP_MSB); setup_pipe(dbri, 18, D_SDP_FIXED | D_SDP_FROM_SER | D_SDP_MSB); setup_pipe(dbri, 19, D_SDP_FIXED | D_SDP_FROM_SER | D_SDP_MSB); spin_unlock_irqrestore(&dbri->lock, flags); dbri_cmdwait(dbri); } static __devinit int cs4215_init_data(struct cs4215 *mm) { /* * No action, memory resetting only. * * Data Time Slot 5-8 * Speaker,Line and Headphone enable. Gain set to the half. * Input is mike. */ mm->data[0] = CS4215_LO(0x20) | CS4215_HE | CS4215_LE; mm->data[1] = CS4215_RO(0x20) | CS4215_SE; mm->data[2] = CS4215_LG(0x8) | CS4215_IS | CS4215_PIO0 | CS4215_PIO1; mm->data[3] = CS4215_RG(0x8) | CS4215_MA(0xf); /* * Control Time Slot 1-4 * 0: Default I/O voltage scale * 1: 8 bit ulaw, 8kHz, mono, high pass filter disabled * 2: Serial enable, CHI master, 128 bits per frame, clock 1 * 3: Tests disabled */ mm->ctrl[0] = CS4215_RSRVD_1 | CS4215_MLB; mm->ctrl[1] = CS4215_DFR_ULAW | CS4215_FREQ[0].csval; mm->ctrl[2] = CS4215_XCLK | CS4215_BSEL_128 | CS4215_FREQ[0].xtal; mm->ctrl[3] = 0; mm->status = 0; mm->version = 0xff; mm->precision = 8; /* For ULAW */ mm->channels = 1; return 0; } static void cs4215_setdata(struct snd_dbri *dbri, int muted) { if (muted) { dbri->mm.data[0] |= 63; dbri->mm.data[1] |= 63; dbri->mm.data[2] &= ~15; dbri->mm.data[3] &= ~15; } else { /* Start by setting the playback attenuation. */ struct dbri_streaminfo *info = &dbri->stream_info[DBRI_PLAY]; int left_gain = info->left_gain & 0x3f; int right_gain = info->right_gain & 0x3f; dbri->mm.data[0] &= ~0x3f; /* Reset the volume bits */ dbri->mm.data[1] &= ~0x3f; dbri->mm.data[0] |= (DBRI_MAX_VOLUME - left_gain); dbri->mm.data[1] |= (DBRI_MAX_VOLUME - right_gain); /* Now set the recording gain. */ info = &dbri->stream_info[DBRI_REC]; left_gain = info->left_gain & 0xf; right_gain = info->right_gain & 0xf; dbri->mm.data[2] |= CS4215_LG(left_gain); dbri->mm.data[3] |= CS4215_RG(right_gain); } xmit_fixed(dbri, 20, *(int *)dbri->mm.data); } /* * Set the CS4215 to data mode. */ static void cs4215_open(struct snd_dbri *dbri) { int data_width; u32 tmp; unsigned long flags; dprintk(D_MM, "cs4215_open: %d channels, %d bits\n", dbri->mm.channels, dbri->mm.precision); /* Temporarily mute outputs, and wait 1/8000 sec (125 us) * to make sure this takes. This avoids clicking noises. */ cs4215_setdata(dbri, 1); udelay(125); /* * Data mode: * Pipe 4: Send timeslots 1-4 (audio data) * Pipe 20: Send timeslots 5-8 (part of ctrl data) * Pipe 6: Receive timeslots 1-4 (audio data) * Pipe 21: Receive timeslots 6-7. We can only receive 20 bits via * interrupt, and the rest of the data (slot 5 and 8) is * not relevant for us (only for doublechecking). * * Just like in control mode, the time slots are all offset by eight * bits. The CS4215, it seems, observes TSIN (the delayed signal) * even if it's the CHI master. Don't ask me... */ spin_lock_irqsave(&dbri->lock, flags); tmp = sbus_readl(dbri->regs + REG0); tmp &= ~(D_C); /* Disable CHI */ sbus_writel(tmp, dbri->regs + REG0); /* Switch CS4215 to data mode - set PIO3 to 1 */ sbus_writel(D_ENPIO | D_PIO1 | D_PIO3 | (dbri->mm.onboard ? D_PIO0 : D_PIO2), dbri->regs + REG2); reset_chi(dbri, CHIslave, 128); /* Note: this next doesn't work for 8-bit stereo, because the two * channels would be on timeslots 1 and 3, with 2 and 4 idle. * (See CS4215 datasheet Fig 15) * * DBRI non-contiguous mode would be required to make this work. */ data_width = dbri->mm.channels * dbri->mm.precision; link_time_slot(dbri, 4, 16, 16, data_width, dbri->mm.offset); link_time_slot(dbri, 20, 4, 16, 32, dbri->mm.offset + 32); link_time_slot(dbri, 6, 16, 16, data_width, dbri->mm.offset); link_time_slot(dbri, 21, 6, 16, 16, dbri->mm.offset + 40); /* FIXME: enable CHI after _setdata? */ tmp = sbus_readl(dbri->regs + REG0); tmp |= D_C; /* Enable CHI */ sbus_writel(tmp, dbri->regs + REG0); spin_unlock_irqrestore(&dbri->lock, flags); cs4215_setdata(dbri, 0); } /* * Send the control information (i.e. audio format) */ static int cs4215_setctrl(struct snd_dbri *dbri) { int i, val; u32 tmp; unsigned long flags; /* FIXME - let the CPU do something useful during these delays */ /* Temporarily mute outputs, and wait 1/8000 sec (125 us) * to make sure this takes. This avoids clicking noises. */ cs4215_setdata(dbri, 1); udelay(125); /* * Enable Control mode: Set DBRI's PIO3 (4215's D/~C) to 0, then wait * 12 cycles <= 12/(5512.5*64) sec = 34.01 usec */ val = D_ENPIO | D_PIO1 | (dbri->mm.onboard ? D_PIO0 : D_PIO2); sbus_writel(val, dbri->regs + REG2); dprintk(D_MM, "cs4215_setctrl: reg2=0x%x\n", val); udelay(34); /* In Control mode, the CS4215 is a slave device, so the DBRI must * operate as CHI master, supplying clocking and frame synchronization. * * In Data mode, however, the CS4215 must be CHI master to insure * that its data stream is synchronous with its codec. * * The upshot of all this? We start by putting the DBRI into master * mode, program the CS4215 in Control mode, then switch the CS4215 * into Data mode and put the DBRI into slave mode. Various timing * requirements must be observed along the way. * * Oh, and one more thing, on a SPARCStation 20 (and maybe * others?), the addressing of the CS4215's time slots is * offset by eight bits, so we add eight to all the "cycle" * values in the Define Time Slot (DTS) commands. This is * done in hardware by a TI 248 that delays the DBRI->4215 * frame sync signal by eight clock cycles. Anybody know why? */ spin_lock_irqsave(&dbri->lock, flags); tmp = sbus_readl(dbri->regs + REG0); tmp &= ~D_C; /* Disable CHI */ sbus_writel(tmp, dbri->regs + REG0); reset_chi(dbri, CHImaster, 128); /* * Control mode: * Pipe 17: Send timeslots 1-4 (slots 5-8 are read only) * Pipe 18: Receive timeslot 1 (clb). * Pipe 19: Receive timeslot 7 (version). */ link_time_slot(dbri, 17, 16, 16, 32, dbri->mm.offset); link_time_slot(dbri, 18, 16, 16, 8, dbri->mm.offset); link_time_slot(dbri, 19, 18, 16, 8, dbri->mm.offset + 48); spin_unlock_irqrestore(&dbri->lock, flags); /* Wait for the chip to echo back CLB (Control Latch Bit) as zero */ dbri->mm.ctrl[0] &= ~CS4215_CLB; xmit_fixed(dbri, 17, *(int *)dbri->mm.ctrl); spin_lock_irqsave(&dbri->lock, flags); tmp = sbus_readl(dbri->regs + REG0); tmp |= D_C; /* Enable CHI */ sbus_writel(tmp, dbri->regs + REG0); spin_unlock_irqrestore(&dbri->lock, flags); for (i = 10; ((dbri->mm.status & 0xe4) != 0x20); --i) msleep_interruptible(1); if (i == 0) { dprintk(D_MM, "CS4215 didn't respond to CLB (0x%02x)\n", dbri->mm.status); return -1; } /* Disable changes to our copy of the version number, as we are about * to leave control mode. */ recv_fixed(dbri, 19, NULL); /* Terminate CS4215 control mode - data sheet says * "Set CLB=1 and send two more frames of valid control info" */ dbri->mm.ctrl[0] |= CS4215_CLB; xmit_fixed(dbri, 17, *(int *)dbri->mm.ctrl); /* Two frames of control info @ 8kHz frame rate = 250 us delay */ udelay(250); cs4215_setdata(dbri, 0); return 0; } /* * Setup the codec with the sampling rate, audio format and number of * channels. * As part of the process we resend the settings for the data * timeslots as well. */ static int cs4215_prepare(struct snd_dbri *dbri, unsigned int rate, snd_pcm_format_t format, unsigned int channels) { int freq_idx; int ret = 0; /* Lookup index for this rate */ for (freq_idx = 0; CS4215_FREQ[freq_idx].freq != 0; freq_idx++) { if (CS4215_FREQ[freq_idx].freq == rate) break; } if (CS4215_FREQ[freq_idx].freq != rate) { printk(KERN_WARNING "DBRI: Unsupported rate %d Hz\n", rate); return -1; } switch (format) { case SNDRV_PCM_FORMAT_MU_LAW: dbri->mm.ctrl[1] = CS4215_DFR_ULAW; dbri->mm.precision = 8; break; case SNDRV_PCM_FORMAT_A_LAW: dbri->mm.ctrl[1] = CS4215_DFR_ALAW; dbri->mm.precision = 8; break; case SNDRV_PCM_FORMAT_U8: dbri->mm.ctrl[1] = CS4215_DFR_LINEAR8; dbri->mm.precision = 8; break; case SNDRV_PCM_FORMAT_S16_BE: dbri->mm.ctrl[1] = CS4215_DFR_LINEAR16; dbri->mm.precision = 16; break; default: printk(KERN_WARNING "DBRI: Unsupported format %d\n", format); return -1; } /* Add rate parameters */ dbri->mm.ctrl[1] |= CS4215_FREQ[freq_idx].csval; dbri->mm.ctrl[2] = CS4215_XCLK | CS4215_BSEL_128 | CS4215_FREQ[freq_idx].xtal; dbri->mm.channels = channels; if (channels == 2) dbri->mm.ctrl[1] |= CS4215_DFR_STEREO; ret = cs4215_setctrl(dbri); if (ret == 0) cs4215_open(dbri); /* set codec to data mode */ return ret; } /* * */ static __devinit int cs4215_init(struct snd_dbri *dbri) { u32 reg2 = sbus_readl(dbri->regs + REG2); dprintk(D_MM, "cs4215_init: reg2=0x%x\n", reg2); /* Look for the cs4215 chips */ if (reg2 & D_PIO2) { dprintk(D_MM, "Onboard CS4215 detected\n"); dbri->mm.onboard = 1; } if (reg2 & D_PIO0) { dprintk(D_MM, "Speakerbox detected\n"); dbri->mm.onboard = 0; if (reg2 & D_PIO2) { printk(KERN_INFO "DBRI: Using speakerbox / " "ignoring onboard mmcodec.\n"); sbus_writel(D_ENPIO2, dbri->regs + REG2); } } if (!(reg2 & (D_PIO0 | D_PIO2))) { printk(KERN_ERR "DBRI: no mmcodec found.\n"); return -EIO; } cs4215_setup_pipes(dbri); cs4215_init_data(&dbri->mm); /* Enable capture of the status & version timeslots. */ recv_fixed(dbri, 18, &dbri->mm.status); recv_fixed(dbri, 19, &dbri->mm.version); dbri->mm.offset = dbri->mm.onboard ? 0 : 8; if (cs4215_setctrl(dbri) == -1 || dbri->mm.version == 0xff) { dprintk(D_MM, "CS4215 failed probe at offset %d\n", dbri->mm.offset); return -EIO; } dprintk(D_MM, "Found CS4215 at offset %d\n", dbri->mm.offset); return 0; } /* **************************************************************************** *************************** DBRI interrupt handler ************************* **************************************************************************** The DBRI communicates with the CPU mainly via a circular interrupt buffer. When an interrupt is signaled, the CPU walks through the buffer and calls dbri_process_one_interrupt() for each interrupt word. Complicated interrupts are handled by dedicated functions (which appear first in this file). Any pending interrupts can be serviced by calling dbri_process_interrupt_buffer(), which works even if the CPU's interrupts are disabled. */ /* xmit_descs() * * Starts transmitting the current TD's for recording/playing. * For playback, ALSA has filled the DMA memory with new data (we hope). */ static void xmit_descs(struct snd_dbri *dbri) { struct dbri_streaminfo *info; s32 *cmd; unsigned long flags; int first_td; if (dbri == NULL) return; /* Disabled */ info = &dbri->stream_info[DBRI_REC]; spin_lock_irqsave(&dbri->lock, flags); if (info->pipe >= 0) { first_td = dbri->pipes[info->pipe].first_desc; dprintk(D_DESC, "xmit_descs rec @ TD %d\n", first_td); /* Stream could be closed by the time we run. */ if (first_td >= 0) { cmd = dbri_cmdlock(dbri, 2); *(cmd++) = DBRI_CMD(D_SDP, 0, dbri->pipes[info->pipe].sdp | D_SDP_P | D_SDP_EVERY | D_SDP_C); *(cmd++) = dbri->dma_dvma + dbri_dma_off(desc, first_td); dbri_cmdsend(dbri, cmd, 2); /* Reset our admin of the pipe. */ dbri->pipes[info->pipe].desc = first_td; } } info = &dbri->stream_info[DBRI_PLAY]; if (info->pipe >= 0) { first_td = dbri->pipes[info->pipe].first_desc; dprintk(D_DESC, "xmit_descs play @ TD %d\n", first_td); /* Stream could be closed by the time we run. */ if (first_td >= 0) { cmd = dbri_cmdlock(dbri, 2); *(cmd++) = DBRI_CMD(D_SDP, 0, dbri->pipes[info->pipe].sdp | D_SDP_P | D_SDP_EVERY | D_SDP_C); *(cmd++) = dbri->dma_dvma + dbri_dma_off(desc, first_td); dbri_cmdsend(dbri, cmd, 2); /* Reset our admin of the pipe. */ dbri->pipes[info->pipe].desc = first_td; } } spin_unlock_irqrestore(&dbri->lock, flags); } /* transmission_complete_intr() * * Called by main interrupt handler when DBRI signals transmission complete * on a pipe (interrupt triggered by the B bit in a transmit descriptor). * * Walks through the pipe's list of transmit buffer descriptors and marks * them as available. Stops when the first descriptor is found without * TBC (Transmit Buffer Complete) set, or we've run through them all. * * The DMA buffers are not released. They form a ring buffer and * they are filled by ALSA while others are transmitted by DMA. * */ static void transmission_complete_intr(struct snd_dbri *dbri, int pipe) { struct dbri_streaminfo *info = &dbri->stream_info[DBRI_PLAY]; int td = dbri->pipes[pipe].desc; int status; while (td >= 0) { if (td >= DBRI_NO_DESCS) { printk(KERN_ERR "DBRI: invalid td on pipe %d\n", pipe); return; } status = DBRI_TD_STATUS(dbri->dma->desc[td].word4); if (!(status & DBRI_TD_TBC)) break; dprintk(D_INT, "TD %d, status 0x%02x\n", td, status); dbri->dma->desc[td].word4 = 0; /* Reset it for next time. */ info->offset += DBRI_RD_CNT(dbri->dma->desc[td].word1); td = dbri->next_desc[td]; dbri->pipes[pipe].desc = td; } /* Notify ALSA */ spin_unlock(&dbri->lock); snd_pcm_period_elapsed(info->substream); spin_lock(&dbri->lock); } static void reception_complete_intr(struct snd_dbri *dbri, int pipe) { struct dbri_streaminfo *info; int rd = dbri->pipes[pipe].desc; s32 status; if (rd < 0 || rd >= DBRI_NO_DESCS) { printk(KERN_ERR "DBRI: invalid rd on pipe %d\n", pipe); return; } dbri->pipes[pipe].desc = dbri->next_desc[rd]; status = dbri->dma->desc[rd].word1; dbri->dma->desc[rd].word1 = 0; /* Reset it for next time. */ info = &dbri->stream_info[DBRI_REC]; info->offset += DBRI_RD_CNT(status); /* FIXME: Check status */ dprintk(D_INT, "Recv RD %d, status 0x%02x, len %d\n", rd, DBRI_RD_STATUS(status), DBRI_RD_CNT(status)); /* Notify ALSA */ spin_unlock(&dbri->lock); snd_pcm_period_elapsed(info->substream); spin_lock(&dbri->lock); } static void dbri_process_one_interrupt(struct snd_dbri *dbri, int x) { int val = D_INTR_GETVAL(x); int channel = D_INTR_GETCHAN(x); int command = D_INTR_GETCMD(x); int code = D_INTR_GETCODE(x); #ifdef DBRI_DEBUG int rval = D_INTR_GETRVAL(x); #endif if (channel == D_INTR_CMD) { dprintk(D_CMD, "INTR: Command: %-5s Value:%d\n", cmds[command], val); } else { dprintk(D_INT, "INTR: Chan:%d Code:%d Val:%#x\n", channel, code, rval); } switch (code) { case D_INTR_CMDI: if (command != D_WAIT) printk(KERN_ERR "DBRI: Command read interrupt\n"); break; case D_INTR_BRDY: reception_complete_intr(dbri, channel); break; case D_INTR_XCMP: case D_INTR_MINT: transmission_complete_intr(dbri, channel); break; case D_INTR_UNDR: /* UNDR - Transmission underrun * resend SDP command with clear pipe bit (C) set */ { /* FIXME: do something useful in case of underrun */ printk(KERN_ERR "DBRI: Underrun error\n"); #if 0 s32 *cmd; int pipe = channel; int td = dbri->pipes[pipe].desc; dbri->dma->desc[td].word4 = 0; cmd = dbri_cmdlock(dbri, NoGetLock); *(cmd++) = DBRI_CMD(D_SDP, 0, dbri->pipes[pipe].sdp | D_SDP_P | D_SDP_C | D_SDP_2SAME); *(cmd++) = dbri->dma_dvma + dbri_dma_off(desc, td); dbri_cmdsend(dbri, cmd); #endif } break; case D_INTR_FXDT: /* FXDT - Fixed data change */ if (dbri->pipes[channel].sdp & D_SDP_MSB) val = reverse_bytes(val, dbri->pipes[channel].length); if (dbri->pipes[channel].recv_fixed_ptr) *(dbri->pipes[channel].recv_fixed_ptr) = val; break; default: if (channel != D_INTR_CMD) printk(KERN_WARNING "DBRI: Ignored Interrupt: %d (0x%x)\n", code, x); } } /* dbri_process_interrupt_buffer advances through the DBRI's interrupt * buffer until it finds a zero word (indicating nothing more to do * right now). Non-zero words require processing and are handed off * to dbri_process_one_interrupt AFTER advancing the pointer. */ static void dbri_process_interrupt_buffer(struct snd_dbri *dbri) { s32 x; while ((x = dbri->dma->intr[dbri->dbri_irqp]) != 0) { dbri->dma->intr[dbri->dbri_irqp] = 0; dbri->dbri_irqp++; if (dbri->dbri_irqp == DBRI_INT_BLK) dbri->dbri_irqp = 1; dbri_process_one_interrupt(dbri, x); } } static irqreturn_t snd_dbri_interrupt(int irq, void *dev_id) { struct snd_dbri *dbri = dev_id; static int errcnt = 0; int x; if (dbri == NULL) return IRQ_NONE; spin_lock(&dbri->lock); /* * Read it, so the interrupt goes away. */ x = sbus_readl(dbri->regs + REG1); if (x & (D_MRR | D_MLE | D_LBG | D_MBE)) { u32 tmp; if (x & D_MRR) printk(KERN_ERR "DBRI: Multiple Error Ack on SBus reg1=0x%x\n", x); if (x & D_MLE) printk(KERN_ERR "DBRI: Multiple Late Error on SBus reg1=0x%x\n", x); if (x & D_LBG) printk(KERN_ERR "DBRI: Lost Bus Grant on SBus reg1=0x%x\n", x); if (x & D_MBE) printk(KERN_ERR "DBRI: Burst Error on SBus reg1=0x%x\n", x); /* Some of these SBus errors cause the chip's SBus circuitry * to be disabled, so just re-enable and try to keep going. * * The only one I've seen is MRR, which will be triggered * if you let a transmit pipe underrun, then try to CDP it. * * If these things persist, we reset the chip. */ if ((++errcnt) % 10 == 0) { dprintk(D_INT, "Interrupt errors exceeded.\n"); dbri_reset(dbri); } else { tmp = sbus_readl(dbri->regs + REG0); tmp &= ~(D_D); sbus_writel(tmp, dbri->regs + REG0); } } dbri_process_interrupt_buffer(dbri); spin_unlock(&dbri->lock); return IRQ_HANDLED; } /**************************************************************************** PCM Interface ****************************************************************************/ static struct snd_pcm_hardware snd_dbri_pcm_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH, .formats = SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_BE, .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_5512, .rate_min = 5512, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 1, .period_bytes_max = DBRI_TD_MAXCNT, .periods_min = 1, .periods_max = 1024, }; static int snd_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); struct snd_mask fmt; snd_mask_any(&fmt); if (c->min > 1) { fmt.bits[0] &= SNDRV_PCM_FMTBIT_S16_BE; return snd_mask_refine(f, &fmt); } return 0; } static int snd_hw_rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); struct snd_interval ch; snd_interval_any(&ch); if (!(f->bits[0] & SNDRV_PCM_FMTBIT_S16_BE)) { ch.min = 1; ch.max = 1; ch.integer = 1; return snd_interval_refine(c, &ch); } return 0; } static int snd_dbri_open(struct snd_pcm_substream *substream) { struct snd_dbri *dbri = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct dbri_streaminfo *info = DBRI_STREAM(dbri, substream); unsigned long flags; dprintk(D_USR, "open audio output.\n"); runtime->hw = snd_dbri_pcm_hw; spin_lock_irqsave(&dbri->lock, flags); info->substream = substream; info->offset = 0; info->dvma_buffer = 0; info->pipe = -1; spin_unlock_irqrestore(&dbri->lock, flags); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_hw_rule_format, NULL, SNDRV_PCM_HW_PARAM_FORMAT, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, snd_hw_rule_channels, NULL, SNDRV_PCM_HW_PARAM_CHANNELS, -1); cs4215_open(dbri); return 0; } static int snd_dbri_close(struct snd_pcm_substream *substream) { struct snd_dbri *dbri = snd_pcm_substream_chip(substream); struct dbri_streaminfo *info = DBRI_STREAM(dbri, substream); dprintk(D_USR, "close audio output.\n"); info->substream = NULL; info->offset = 0; return 0; } static int snd_dbri_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dbri *dbri = snd_pcm_substream_chip(substream); struct dbri_streaminfo *info = DBRI_STREAM(dbri, substream); int direction; int ret; /* set sampling rate, audio format and number of channels */ ret = cs4215_prepare(dbri, params_rate(hw_params), params_format(hw_params), params_channels(hw_params)); if (ret != 0) return ret; if ((ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) { printk(KERN_ERR "malloc_pages failed with %d\n", ret); return ret; } /* hw_params can get called multiple times. Only map the DMA once. */ if (info->dvma_buffer == 0) { if (DBRI_STREAMNO(substream) == DBRI_PLAY) direction = DMA_TO_DEVICE; else direction = DMA_FROM_DEVICE; info->dvma_buffer = dma_map_single(&dbri->op->dev, runtime->dma_area, params_buffer_bytes(hw_params), direction); } direction = params_buffer_bytes(hw_params); dprintk(D_USR, "hw_params: %d bytes, dvma=%x\n", direction, info->dvma_buffer); return 0; } static int snd_dbri_hw_free(struct snd_pcm_substream *substream) { struct snd_dbri *dbri = snd_pcm_substream_chip(substream); struct dbri_streaminfo *info = DBRI_STREAM(dbri, substream); int direction; dprintk(D_USR, "hw_free.\n"); /* hw_free can get called multiple times. Only unmap the DMA once. */ if (info->dvma_buffer) { if (DBRI_STREAMNO(substream) == DBRI_PLAY) direction = DMA_TO_DEVICE; else direction = DMA_FROM_DEVICE; dma_unmap_single(&dbri->op->dev, info->dvma_buffer, substream->runtime->buffer_size, direction); info->dvma_buffer = 0; } if (info->pipe != -1) { reset_pipe(dbri, info->pipe); info->pipe = -1; } return snd_pcm_lib_free_pages(substream); } static int snd_dbri_prepare(struct snd_pcm_substream *substream) { struct snd_dbri *dbri = snd_pcm_substream_chip(substream); struct dbri_streaminfo *info = DBRI_STREAM(dbri, substream); int ret; info->size = snd_pcm_lib_buffer_bytes(substream); if (DBRI_STREAMNO(substream) == DBRI_PLAY) info->pipe = 4; /* Send pipe */ else info->pipe = 6; /* Receive pipe */ spin_lock_irq(&dbri->lock); info->offset = 0; /* Setup the all the transmit/receive descriptors to cover the * whole DMA buffer. */ ret = setup_descs(dbri, DBRI_STREAMNO(substream), snd_pcm_lib_period_bytes(substream)); spin_unlock_irq(&dbri->lock); dprintk(D_USR, "prepare audio output. %d bytes\n", info->size); return ret; } static int snd_dbri_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_dbri *dbri = snd_pcm_substream_chip(substream); struct dbri_streaminfo *info = DBRI_STREAM(dbri, substream); int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: dprintk(D_USR, "start audio, period is %d bytes\n", (int)snd_pcm_lib_period_bytes(substream)); /* Re-submit the TDs. */ xmit_descs(dbri); break; case SNDRV_PCM_TRIGGER_STOP: dprintk(D_USR, "stop audio.\n"); reset_pipe(dbri, info->pipe); break; default: ret = -EINVAL; } return ret; } static snd_pcm_uframes_t snd_dbri_pointer(struct snd_pcm_substream *substream) { struct snd_dbri *dbri = snd_pcm_substream_chip(substream); struct dbri_streaminfo *info = DBRI_STREAM(dbri, substream); snd_pcm_uframes_t ret; ret = bytes_to_frames(substream->runtime, info->offset) % substream->runtime->buffer_size; dprintk(D_USR, "I/O pointer: %ld frames of %ld.\n", ret, substream->runtime->buffer_size); return ret; } static struct snd_pcm_ops snd_dbri_ops = { .open = snd_dbri_open, .close = snd_dbri_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_dbri_hw_params, .hw_free = snd_dbri_hw_free, .prepare = snd_dbri_prepare, .trigger = snd_dbri_trigger, .pointer = snd_dbri_pointer, }; static int __devinit snd_dbri_pcm(struct snd_card *card) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(card, /* ID */ "sun_dbri", /* device */ 0, /* playback count */ 1, /* capture count */ 1, &pcm)) < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dbri_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_dbri_ops); pcm->private_data = card->private_data; pcm->info_flags = 0; strcpy(pcm->name, card->shortname); if ((err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 64 * 1024, 64 * 1024)) < 0) return err; return 0; } /***************************************************************************** Mixer interface *****************************************************************************/ static int snd_cs4215_info_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; if (kcontrol->private_value == DBRI_PLAY) uinfo->value.integer.max = DBRI_MAX_VOLUME; else uinfo->value.integer.max = DBRI_MAX_GAIN; return 0; } static int snd_cs4215_get_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dbri *dbri = snd_kcontrol_chip(kcontrol); struct dbri_streaminfo *info; if (snd_BUG_ON(!dbri)) return -EINVAL; info = &dbri->stream_info[kcontrol->private_value]; ucontrol->value.integer.value[0] = info->left_gain; ucontrol->value.integer.value[1] = info->right_gain; return 0; } static int snd_cs4215_put_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dbri *dbri = snd_kcontrol_chip(kcontrol); struct dbri_streaminfo *info = &dbri->stream_info[kcontrol->private_value]; unsigned int vol[2]; int changed = 0; vol[0] = ucontrol->value.integer.value[0]; vol[1] = ucontrol->value.integer.value[1]; if (kcontrol->private_value == DBRI_PLAY) { if (vol[0] > DBRI_MAX_VOLUME || vol[1] > DBRI_MAX_VOLUME) return -EINVAL; } else { if (vol[0] > DBRI_MAX_GAIN || vol[1] > DBRI_MAX_GAIN) return -EINVAL; } if (info->left_gain != vol[0]) { info->left_gain = vol[0]; changed = 1; } if (info->right_gain != vol[1]) { info->right_gain = vol[1]; changed = 1; } if (changed) { /* First mute outputs, and wait 1/8000 sec (125 us) * to make sure this takes. This avoids clicking noises. */ cs4215_setdata(dbri, 1); udelay(125); cs4215_setdata(dbri, 0); } return changed; } static int snd_cs4215_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = (mask == 1) ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_cs4215_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dbri *dbri = snd_kcontrol_chip(kcontrol); int elem = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 1; if (snd_BUG_ON(!dbri)) return -EINVAL; if (elem < 4) ucontrol->value.integer.value[0] = (dbri->mm.data[elem] >> shift) & mask; else ucontrol->value.integer.value[0] = (dbri->mm.ctrl[elem - 4] >> shift) & mask; if (invert == 1) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_cs4215_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dbri *dbri = snd_kcontrol_chip(kcontrol); int elem = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 1; int changed = 0; unsigned short val; if (snd_BUG_ON(!dbri)) return -EINVAL; val = (ucontrol->value.integer.value[0] & mask); if (invert == 1) val = mask - val; val <<= shift; if (elem < 4) { dbri->mm.data[elem] = (dbri->mm.data[elem] & ~(mask << shift)) | val; changed = (val != dbri->mm.data[elem]); } else { dbri->mm.ctrl[elem - 4] = (dbri->mm.ctrl[elem - 4] & ~(mask << shift)) | val; changed = (val != dbri->mm.ctrl[elem - 4]); } dprintk(D_GEN, "put_single: mask=0x%x, changed=%d, " "mixer-value=%ld, mm-value=0x%x\n", mask, changed, ucontrol->value.integer.value[0], dbri->mm.data[elem & 3]); if (changed) { /* First mute outputs, and wait 1/8000 sec (125 us) * to make sure this takes. This avoids clicking noises. */ cs4215_setdata(dbri, 1); udelay(125); cs4215_setdata(dbri, 0); } return changed; } /* Entries 0-3 map to the 4 data timeslots, entries 4-7 map to the 4 control timeslots. Shift is the bit offset in the timeslot, mask defines the number of bits. invert is a boolean for use with attenuation. */ #define CS4215_SINGLE(xname, entry, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ .info = snd_cs4215_info_single, \ .get = snd_cs4215_get_single, .put = snd_cs4215_put_single, \ .private_value = (entry) | ((shift) << 8) | ((mask) << 16) | \ ((invert) << 24) }, static struct snd_kcontrol_new dbri_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Playback Volume", .info = snd_cs4215_info_volume, .get = snd_cs4215_get_volume, .put = snd_cs4215_put_volume, .private_value = DBRI_PLAY, }, CS4215_SINGLE("Headphone switch", 0, 7, 1, 0) CS4215_SINGLE("Line out switch", 0, 6, 1, 0) CS4215_SINGLE("Speaker switch", 1, 6, 1, 0) { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Volume", .info = snd_cs4215_info_volume, .get = snd_cs4215_get_volume, .put = snd_cs4215_put_volume, .private_value = DBRI_REC, }, /* FIXME: mic/line switch */ CS4215_SINGLE("Line in switch", 2, 4, 1, 0) CS4215_SINGLE("High Pass Filter switch", 5, 7, 1, 0) CS4215_SINGLE("Monitor Volume", 3, 4, 0xf, 1) CS4215_SINGLE("Mic boost", 4, 4, 1, 1) }; static int __devinit snd_dbri_mixer(struct snd_card *card) { int idx, err; struct snd_dbri *dbri; if (snd_BUG_ON(!card || !card->private_data)) return -EINVAL; dbri = card->private_data; strcpy(card->mixername, card->shortname); for (idx = 0; idx < ARRAY_SIZE(dbri_controls); idx++) { err = snd_ctl_add(card, snd_ctl_new1(&dbri_controls[idx], dbri)); if (err < 0) return err; } for (idx = DBRI_REC; idx < DBRI_NO_STREAMS; idx++) { dbri->stream_info[idx].left_gain = 0; dbri->stream_info[idx].right_gain = 0; } return 0; } /**************************************************************************** /proc interface ****************************************************************************/ static void dbri_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_dbri *dbri = entry->private_data; snd_iprintf(buffer, "REG0: 0x%x\n", sbus_readl(dbri->regs + REG0)); snd_iprintf(buffer, "REG2: 0x%x\n", sbus_readl(dbri->regs + REG2)); snd_iprintf(buffer, "REG8: 0x%x\n", sbus_readl(dbri->regs + REG8)); snd_iprintf(buffer, "REG9: 0x%x\n", sbus_readl(dbri->regs + REG9)); } #ifdef DBRI_DEBUG static void dbri_debug_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_dbri *dbri = entry->private_data; int pipe; snd_iprintf(buffer, "debug=%d\n", dbri_debug); for (pipe = 0; pipe < 32; pipe++) { if (pipe_active(dbri, pipe)) { struct dbri_pipe *pptr = &dbri->pipes[pipe]; snd_iprintf(buffer, "Pipe %d: %s SDP=0x%x desc=%d, " "len=%d next %d\n", pipe, (pptr->sdp & D_SDP_TO_SER) ? "output" : "input", pptr->sdp, pptr->desc, pptr->length, pptr->nextpipe); } } } #endif static void __devinit snd_dbri_proc(struct snd_card *card) { struct snd_dbri *dbri = card->private_data; struct snd_info_entry *entry; if (!snd_card_proc_new(card, "regs", &entry)) snd_info_set_text_ops(entry, dbri, dbri_regs_read); #ifdef DBRI_DEBUG if (!snd_card_proc_new(card, "debug", &entry)) { snd_info_set_text_ops(entry, dbri, dbri_debug_read); entry->mode = S_IFREG | S_IRUGO; /* Readable only. */ } #endif } /* **************************************************************************** **************************** Initialization ******************************** **************************************************************************** */ static void snd_dbri_free(struct snd_dbri *dbri); static int __devinit snd_dbri_create(struct snd_card *card, struct platform_device *op, int irq, int dev) { struct snd_dbri *dbri = card->private_data; int err; spin_lock_init(&dbri->lock); dbri->op = op; dbri->irq = irq; dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma), &dbri->dma_dvma, GFP_ATOMIC); if (!dbri->dma) return -ENOMEM; memset((void *)dbri->dma, 0, sizeof(struct dbri_dma)); dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n", dbri->dma, dbri->dma_dvma); /* Map the registers into memory. */ dbri->regs_size = resource_size(&op->resource[0]); dbri->regs = of_ioremap(&op->resource[0], 0, dbri->regs_size, "DBRI Registers"); if (!dbri->regs) { printk(KERN_ERR "DBRI: could not allocate registers\n"); dma_free_coherent(&op->dev, sizeof(struct dbri_dma), (void *)dbri->dma, dbri->dma_dvma); return -EIO; } err = request_irq(dbri->irq, snd_dbri_interrupt, IRQF_SHARED, "DBRI audio", dbri); if (err) { printk(KERN_ERR "DBRI: Can't get irq %d\n", dbri->irq); of_iounmap(&op->resource[0], dbri->regs, dbri->regs_size); dma_free_coherent(&op->dev, sizeof(struct dbri_dma), (void *)dbri->dma, dbri->dma_dvma); return err; } /* Do low level initialization of the DBRI and CS4215 chips */ dbri_initialize(dbri); err = cs4215_init(dbri); if (err) { snd_dbri_free(dbri); return err; } return 0; } static void snd_dbri_free(struct snd_dbri *dbri) { dprintk(D_GEN, "snd_dbri_free\n"); dbri_reset(dbri); if (dbri->irq) free_irq(dbri->irq, dbri); if (dbri->regs) of_iounmap(&dbri->op->resource[0], dbri->regs, dbri->regs_size); if (dbri->dma) dma_free_coherent(&dbri->op->dev, sizeof(struct dbri_dma), (void *)dbri->dma, dbri->dma_dvma); } static int __devinit dbri_probe(struct platform_device *op) { struct snd_dbri *dbri; struct resource *rp; struct snd_card *card; static int dev = 0; int irq; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } irq = op->archdata.irqs[0]; if (irq <= 0) { printk(KERN_ERR "DBRI-%d: No IRQ.\n", dev); return -ENODEV; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_dbri), &card); if (err < 0) return err; strcpy(card->driver, "DBRI"); strcpy(card->shortname, "Sun DBRI"); rp = &op->resource[0]; sprintf(card->longname, "%s at 0x%02lx:0x%016Lx, irq %d", card->shortname, rp->flags & 0xffL, (unsigned long long)rp->start, irq); err = snd_dbri_create(card, op, irq, dev); if (err < 0) { snd_card_free(card); return err; } dbri = card->private_data; err = snd_dbri_pcm(card); if (err < 0) goto _err; err = snd_dbri_mixer(card); if (err < 0) goto _err; /* /proc file handling */ snd_dbri_proc(card); dev_set_drvdata(&op->dev, card); err = snd_card_register(card); if (err < 0) goto _err; printk(KERN_INFO "audio%d at %p (irq %d) is DBRI(%c)+CS4215(%d)\n", dev, dbri->regs, dbri->irq, op->dev.of_node->name[9], dbri->mm.version); dev++; return 0; _err: snd_dbri_free(dbri); snd_card_free(card); return err; } static int __devexit dbri_remove(struct platform_device *op) { struct snd_card *card = dev_get_drvdata(&op->dev); snd_dbri_free(card->private_data); snd_card_free(card); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id dbri_match[] = { { .name = "SUNW,DBRIe", }, { .name = "SUNW,DBRIf", }, {}, }; MODULE_DEVICE_TABLE(of, dbri_match); static struct platform_driver dbri_sbus_driver = { .driver = { .name = "dbri", .owner = THIS_MODULE, .of_match_table = dbri_match, }, .probe = dbri_probe, .remove = __devexit_p(dbri_remove), }; module_platform_driver(dbri_sbus_driver);
gpl-2.0
nyterage/S4_Mini_CM10.1
arch/m32r/kernel/sys_m32r.c
7850
2567
/* * linux/arch/m32r/kernel/sys_m32r.c * * This file contains various random system calls that * have a non-standard calling sequence on the Linux/M32R platform. * * Taken from i386 version. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> #include <linux/ipc.h> #include <asm/uaccess.h> #include <asm/cachectl.h> #include <asm/cacheflush.h> #include <asm/syscall.h> #include <asm/unistd.h> /* * sys_tas() - test-and-set */ asmlinkage int sys_tas(int __user *addr) { int oldval; if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) return -EFAULT; /* atomic operation: * oldval = *addr; *addr = 1; */ __asm__ __volatile__ ( DCACHE_CLEAR("%0", "r4", "%1") " .fillinsn\n" "1:\n" " lock %0, @%1 -> unlock %2, @%1\n" "2:\n" /* NOTE: * The m32r processor can accept interrupts only * at the 32-bit instruction boundary. * So, in the above code, the "unlock" instruction * can be executed continuously after the "lock" * instruction execution without any interruptions. */ ".section .fixup,\"ax\"\n" " .balign 4\n" "3: ldi %0, #%3\n" " seth r14, #high(2b)\n" " or3 r14, r14, #low(2b)\n" " jmp r14\n" ".previous\n" ".section __ex_table,\"a\"\n" " .balign 4\n" " .long 1b,3b\n" ".previous\n" : "=&r" (oldval) : "r" (addr), "r" (1), "i"(-EFAULT) : "r14", "memory" #ifdef CONFIG_CHIP_M32700_TS1 , "r4" #endif /* CONFIG_CHIP_M32700_TS1 */ ); return oldval; } asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) { /* This should flush more selectively ... */ _flush_cache_all(); return 0; } asmlinkage int sys_cachectl(char *addr, int nbytes, int op) { /* Not implemented yet. */ return -ENOSYS; } /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long __scno __asm__ ("r7") = __NR_execve; register long __arg3 __asm__ ("r2") = (long)(envp); register long __arg2 __asm__ ("r1") = (long)(argv); register long __res __asm__ ("r0") = (long)(filename); __asm__ __volatile__ ( "trap #" SYSCALL_VECTOR "|| nop" : "=r" (__res) : "r" (__scno), "0" (__res), "r" (__arg2), "r" (__arg3) : "memory"); return __res; }
gpl-2.0
dduval/kernel-rhel4
drivers/scsi/aic7xxx_old/aic7xxx_seq.c
12714
20747
/* * DO NOT EDIT - This file is automatically generated. */ static unsigned char seqprog[] = { 0xff, 0x6a, 0x06, 0x08, 0x7f, 0x02, 0x04, 0x08, 0x12, 0x6a, 0x00, 0x00, 0xff, 0x6a, 0xd6, 0x09, 0xff, 0x6a, 0xdc, 0x09, 0x00, 0x65, 0xca, 0x58, 0xf7, 0x01, 0x02, 0x08, 0xff, 0x4e, 0xc8, 0x08, 0xbf, 0x60, 0xc0, 0x08, 0x60, 0x0b, 0x86, 0x68, 0x40, 0x00, 0x0c, 0x68, 0x08, 0x1f, 0x3e, 0x10, 0x60, 0x0b, 0x86, 0x68, 0x40, 0x00, 0x0c, 0x68, 0x08, 0x1f, 0x3e, 0x10, 0xff, 0x3e, 0x48, 0x60, 0x40, 0xfa, 0x10, 0x78, 0xff, 0xf6, 0xd4, 0x08, 0x01, 0x4e, 0x9c, 0x18, 0x40, 0x60, 0xc0, 0x00, 0x00, 0x4d, 0x10, 0x70, 0x01, 0x4e, 0x9c, 0x18, 0xbf, 0x60, 0xc0, 0x08, 0x00, 0x6a, 0x86, 0x5c, 0xff, 0x4e, 0xc8, 0x18, 0x02, 0x6a, 0x70, 0x5b, 0xff, 0x52, 0x20, 0x09, 0x0d, 0x6a, 0x6a, 0x00, 0x00, 0x52, 0xe6, 0x5b, 0x03, 0xb0, 0x52, 0x31, 0xff, 0xb0, 0x52, 0x09, 0xff, 0xb1, 0x54, 0x09, 0xff, 0xb2, 0x56, 0x09, 0xff, 0xa3, 0x50, 0x09, 0xff, 0x3e, 0x74, 0x09, 0xff, 0x90, 0x7c, 0x08, 0xff, 0x3e, 0x20, 0x09, 0x00, 0x65, 0x4e, 0x58, 0x00, 0x65, 0x0c, 0x40, 0xf7, 0x1f, 0xca, 0x08, 0x08, 0xa1, 0xc8, 0x08, 0x00, 0x65, 0xca, 0x00, 0xff, 0x65, 0x3e, 0x08, 0xf0, 0xa1, 0xc8, 0x08, 0x0f, 0x0f, 0x1e, 0x08, 0x00, 0x0f, 0x1e, 0x00, 0xf0, 0xa1, 0xc8, 0x08, 0x0f, 0x05, 0x0a, 0x08, 0x00, 0x05, 0x0a, 0x00, 0xff, 0x6a, 0x0c, 0x08, 0x5a, 0x6a, 0x00, 0x04, 0x12, 0x65, 0x02, 0x00, 0x31, 0x6a, 0xca, 0x00, 0x80, 0x37, 0x6e, 0x68, 0xff, 0x65, 0xca, 0x18, 0xff, 0x37, 0xdc, 0x08, 0xff, 0x6e, 0xc8, 0x08, 0x00, 0x6c, 0x76, 0x78, 0x20, 0x01, 0x02, 0x00, 0x4c, 0x37, 0xc8, 0x28, 0x08, 0x1f, 0x7e, 0x78, 0x08, 0x37, 0x6e, 0x00, 0x08, 0x64, 0xc8, 0x00, 0x70, 0x64, 0xca, 0x18, 0xff, 0x6c, 0x0a, 0x08, 0x20, 0x64, 0xca, 0x18, 0xff, 0x6c, 0x08, 0x0c, 0x40, 0x0b, 0x96, 0x68, 0x20, 0x6a, 0x16, 0x00, 0xf0, 0x19, 0x6e, 0x08, 0x08, 0x6a, 0x18, 0x00, 0x08, 0x11, 0x22, 0x00, 0x08, 0x6a, 0x66, 0x58, 0x08, 0x6a, 0x68, 0x00, 0x00, 0x65, 0xaa, 0x40, 0x12, 0x6a, 0x00, 0x00, 0x40, 0x6a, 0x16, 0x00, 0xff, 0x3e, 0x20, 0x09, 0xff, 0xba, 0x7c, 0x08, 0xff, 0xa1, 0x6e, 0x08, 0x08, 0x6a, 0x18, 0x00, 0x08, 0x11, 0x22, 0x00, 0x08, 0x6a, 0x66, 0x58, 0x80, 0x6a, 0x68, 0x00, 0x80, 0x36, 0x6c, 0x00, 0x00, 0x65, 0xba, 0x5b, 0xff, 0x3d, 0xc8, 0x08, 0xbf, 0x64, 0xe2, 0x78, 0x80, 0x64, 0xc8, 0x71, 0xa0, 0x64, 0xf8, 0x71, 0xc0, 0x64, 0xf0, 0x71, 0xe0, 0x64, 0x38, 0x72, 0x01, 0x6a, 0x22, 0x01, 0x00, 0x65, 0xaa, 0x40, 0xf7, 0x11, 0x22, 0x08, 0x00, 0x65, 0xca, 0x58, 0xff, 0x06, 0xd4, 0x08, 0xf7, 0x01, 0x02, 0x08, 0x09, 0x0c, 0xc4, 0x78, 0x08, 0x0c, 0x0c, 0x68, 0x01, 0x6a, 0x22, 0x01, 0xff, 0x6a, 0x26, 0x09, 0x02, 0x6a, 0x08, 0x30, 0xff, 0x6a, 0x08, 0x08, 0xdf, 0x01, 0x02, 0x08, 0x01, 0x6a, 0x7a, 0x00, 0xff, 0x6a, 0x6c, 0x0c, 0x04, 0x14, 0x10, 0x31, 0x03, 0xa9, 0x18, 0x31, 0x03, 0xa9, 0x10, 0x30, 0x08, 0x6a, 0xcc, 0x00, 0xa9, 0x6a, 0xd0, 0x5b, 0x00, 0x65, 0x02, 0x41, 0xa8, 0x6a, 0x6a, 0x00, 0x79, 0x6a, 0x6a, 0x00, 0x40, 0x3d, 0xea, 0x68, 0x04, 0x35, 0x6a, 0x00, 0x00, 0x65, 0x2a, 0x5b, 0x80, 0x6a, 0xd4, 0x01, 0x10, 0x36, 0xd6, 0x68, 0x10, 0x36, 0x6c, 0x00, 0x07, 0xac, 0x10, 0x31, 0x05, 0xa3, 0x70, 0x30, 0x03, 0x8c, 0x10, 0x30, 0x88, 0x6a, 0xcc, 0x00, 0xac, 0x6a, 0xc8, 0x5b, 0x00, 0x65, 0xc2, 0x5b, 0x38, 0x6a, 0xcc, 0x00, 0xa3, 0x6a, 0xcc, 0x5b, 0xff, 0x38, 0x12, 0x69, 0x80, 0x02, 0x04, 0x00, 0xe7, 0x35, 0x6a, 0x08, 0x03, 0x69, 0x18, 0x31, 0x03, 0x69, 0x10, 0x30, 0xff, 0x6a, 0x10, 0x00, 0xff, 0x6a, 0x12, 0x00, 0xff, 0x6a, 0x14, 0x00, 0x22, 0x38, 0xc8, 0x28, 0x01, 0x38, 0x1c, 0x61, 0x02, 0x64, 0xc8, 0x00, 0x01, 0x38, 0x1c, 0x61, 0xbf, 0x35, 0x6a, 0x08, 0xff, 0x64, 0xf8, 0x09, 0xff, 0x35, 0x26, 0x09, 0x80, 0x02, 0xa4, 0x69, 0x10, 0x0c, 0x7a, 0x69, 0x80, 0x94, 0x22, 0x79, 0x00, 0x35, 0x0a, 0x5b, 0x80, 0x02, 0xa4, 0x69, 0xff, 0x65, 0x94, 0x79, 0x01, 0x38, 0x70, 0x71, 0xff, 0x38, 0x70, 0x18, 0xff, 0x38, 0x94, 0x79, 0x80, 0xea, 0x4a, 0x61, 0xef, 0x38, 0xc8, 0x18, 0x80, 0x6a, 0xc8, 0x00, 0x00, 0x65, 0x3c, 0x49, 0x33, 0x38, 0xc8, 0x28, 0xff, 0x64, 0xd0, 0x09, 0x04, 0x39, 0xc0, 0x31, 0x09, 0x6a, 0xd6, 0x01, 0x80, 0xeb, 0x42, 0x79, 0xf7, 0xeb, 0xd6, 0x09, 0x08, 0xeb, 0x46, 0x69, 0x01, 0x6a, 0xd6, 0x01, 0x08, 0xe9, 0x10, 0x31, 0x03, 0x8c, 0x10, 0x30, 0xff, 0x38, 0x70, 0x18, 0x88, 0x6a, 0xcc, 0x00, 0x39, 0x6a, 0xce, 0x5b, 0x08, 0x6a, 0x18, 0x01, 0xff, 0x6a, 0x1a, 0x09, 0xff, 0x6a, 0x1c, 0x09, 0x0d, 0x93, 0x26, 0x01, 0x00, 0x65, 0x78, 0x5c, 0x88, 0x6a, 0xcc, 0x00, 0x00, 0x65, 0x6a, 0x5c, 0x00, 0x65, 0xc2, 0x5b, 0xff, 0x6a, 0xc8, 0x08, 0x08, 0x39, 0x72, 0x18, 0x00, 0x3a, 0x74, 0x20, 0x00, 0x65, 0x02, 0x41, 0x01, 0x0c, 0x6c, 0x79, 0x10, 0x0c, 0x02, 0x79, 0x10, 0x0c, 0x7a, 0x69, 0x01, 0xfc, 0x70, 0x79, 0xff, 0x6a, 0x70, 0x08, 0x01, 0x0c, 0x76, 0x79, 0x10, 0x0c, 0x02, 0x79, 0x00, 0x65, 0xae, 0x59, 0x01, 0xfc, 0x94, 0x69, 0x40, 0x0d, 0x84, 0x69, 0xb1, 0x6a, 0x22, 0x01, 0x00, 0x65, 0x94, 0x41, 0x2e, 0xfc, 0xa2, 0x28, 0x3f, 0x38, 0xc8, 0x08, 0x00, 0x51, 0x94, 0x71, 0xff, 0x6a, 0xc8, 0x08, 0xf8, 0x39, 0x72, 0x18, 0xff, 0x3a, 0x74, 0x20, 0x01, 0x38, 0x70, 0x18, 0x00, 0x65, 0x86, 0x41, 0x03, 0x08, 0x52, 0x31, 0xff, 0x38, 0x50, 0x09, 0x12, 0x01, 0x02, 0x00, 0xff, 0x08, 0x52, 0x09, 0xff, 0x09, 0x54, 0x09, 0xff, 0x0a, 0x56, 0x09, 0xff, 0x38, 0x50, 0x09, 0x00, 0x65, 0xaa, 0x40, 0x10, 0x0c, 0xa4, 0x79, 0x00, 0x65, 0xae, 0x59, 0x7f, 0x02, 0x04, 0x08, 0xe1, 0x6a, 0x22, 0x01, 0x00, 0x65, 0xaa, 0x40, 0x04, 0x93, 0xc2, 0x69, 0xdf, 0x93, 0x26, 0x09, 0x20, 0x93, 0xb2, 0x69, 0x02, 0x93, 0x26, 0x01, 0x01, 0x94, 0xb6, 0x79, 0x01, 0x94, 0xb6, 0x79, 0x01, 0x94, 0xb6, 0x79, 0x01, 0x94, 0xb6, 0x79, 0x01, 0x94, 0xb6, 0x79, 0x10, 0x94, 0xc0, 0x69, 0xd7, 0x93, 0x26, 0x09, 0x28, 0x93, 0xc4, 0x69, 0xff, 0x6a, 0xd4, 0x0c, 0x00, 0x65, 0x2a, 0x5b, 0x05, 0xb4, 0x10, 0x31, 0x02, 0x6a, 0x1a, 0x31, 0x03, 0x8c, 0x10, 0x30, 0x88, 0x6a, 0xcc, 0x00, 0xb4, 0x6a, 0xcc, 0x5b, 0xff, 0x6a, 0x1a, 0x09, 0xff, 0x6a, 0x1c, 0x09, 0x00, 0x65, 0xc2, 0x5b, 0x3d, 0x6a, 0x0a, 0x5b, 0xac, 0x6a, 0x26, 0x01, 0x04, 0x0b, 0xde, 0x69, 0x04, 0x0b, 0xe4, 0x69, 0x10, 0x0c, 0xe0, 0x79, 0x02, 0x03, 0xe8, 0x79, 0x11, 0x0c, 0xe4, 0x79, 0xd7, 0x93, 0x26, 0x09, 0x28, 0x93, 0xea, 0x69, 0x12, 0x01, 0x02, 0x00, 0x00, 0x65, 0xaa, 0x40, 0x00, 0x65, 0x2a, 0x5b, 0xff, 0x06, 0x44, 0x09, 0x00, 0x65, 0xaa, 0x40, 0x10, 0x3d, 0x06, 0x00, 0xff, 0x34, 0xca, 0x08, 0x80, 0x65, 0x1c, 0x62, 0x0f, 0xa1, 0xca, 0x08, 0x07, 0xa1, 0xca, 0x08, 0x40, 0xa0, 0xc8, 0x08, 0x00, 0x65, 0xca, 0x00, 0x80, 0x65, 0xca, 0x00, 0x80, 0xa0, 0x0c, 0x7a, 0xff, 0x65, 0x0c, 0x08, 0x00, 0x65, 0x1e, 0x42, 0x20, 0xa0, 0x24, 0x7a, 0xff, 0x65, 0x0c, 0x08, 0x00, 0x65, 0xba, 0x5b, 0xa0, 0x3d, 0x2c, 0x62, 0x23, 0xa0, 0x0c, 0x08, 0x00, 0x65, 0xba, 0x5b, 0xa0, 0x3d, 0x2c, 0x62, 0x00, 0xb9, 0x24, 0x42, 0xff, 0x65, 0x24, 0x62, 0xa1, 0x6a, 0x22, 0x01, 0xff, 0x6a, 0xd4, 0x08, 0x10, 0x51, 0x2c, 0x72, 0x40, 0x6a, 0x18, 0x00, 0xff, 0x65, 0x0c, 0x08, 0x00, 0x65, 0xba, 0x5b, 0xa0, 0x3d, 0xf6, 0x71, 0x40, 0x6a, 0x18, 0x00, 0xff, 0x34, 0xa6, 0x08, 0x80, 0x34, 0x34, 0x62, 0x7f, 0xa0, 0x40, 0x09, 0x08, 0x6a, 0x68, 0x00, 0x00, 0x65, 0xaa, 0x40, 0x64, 0x6a, 0x00, 0x5b, 0x80, 0x64, 0xaa, 0x6a, 0x04, 0x64, 0x8c, 0x72, 0x02, 0x64, 0x92, 0x72, 0x00, 0x6a, 0x54, 0x72, 0x03, 0x64, 0xa6, 0x72, 0x01, 0x64, 0x88, 0x72, 0x07, 0x64, 0xe8, 0x72, 0x08, 0x64, 0x50, 0x72, 0x23, 0x64, 0xec, 0x72, 0x11, 0x6a, 0x22, 0x01, 0x07, 0x6a, 0xf2, 0x5a, 0xff, 0x06, 0xd4, 0x08, 0x00, 0x65, 0xaa, 0x40, 0xff, 0xa8, 0x58, 0x6a, 0xff, 0xa2, 0x70, 0x7a, 0x01, 0x6a, 0x6a, 0x00, 0x00, 0xb9, 0xe6, 0x5b, 0xff, 0xa2, 0x70, 0x7a, 0x71, 0x6a, 0x22, 0x01, 0xff, 0x6a, 0xd4, 0x08, 0x40, 0x51, 0x70, 0x62, 0x0d, 0x6a, 0x6a, 0x00, 0x00, 0xb9, 0xe6, 0x5b, 0xff, 0x3e, 0x74, 0x09, 0xff, 0x90, 0x7c, 0x08, 0x00, 0x65, 0x4e, 0x58, 0x00, 0x65, 0xbc, 0x40, 0x20, 0xa0, 0x78, 0x6a, 0xff, 0x37, 0xc8, 0x08, 0x00, 0x6a, 0x90, 0x5b, 0xff, 0x6a, 0xa6, 0x5b, 0xff, 0xf8, 0xc8, 0x08, 0xff, 0x4f, 0xc8, 0x08, 0x01, 0x6a, 0x90, 0x5b, 0x00, 0xb9, 0xa6, 0x5b, 0x01, 0x4f, 0x9e, 0x18, 0x02, 0x6a, 0x22, 0x01, 0x00, 0x65, 0x80, 0x5c, 0x00, 0x65, 0xbc, 0x40, 0x41, 0x6a, 0x22, 0x01, 0x00, 0x65, 0xaa, 0x40, 0x04, 0xa0, 0x40, 0x01, 0x00, 0x65, 0x98, 0x5c, 0x00, 0x65, 0xbc, 0x40, 0x10, 0x36, 0x50, 0x7a, 0x05, 0x38, 0x46, 0x31, 0x04, 0x14, 0x58, 0x31, 0x03, 0xa9, 0x60, 0x31, 0xa3, 0x6a, 0xcc, 0x00, 0x38, 0x6a, 0xcc, 0x5b, 0xac, 0x6a, 0xcc, 0x00, 0x14, 0x6a, 0xce, 0x5b, 0xa9, 0x6a, 0xd0, 0x5b, 0x00, 0x65, 0x50, 0x42, 0xef, 0x36, 0x6c, 0x08, 0x00, 0x65, 0x50, 0x42, 0x0f, 0x64, 0xc8, 0x08, 0x07, 0x64, 0xc8, 0x08, 0x00, 0x37, 0x6e, 0x00, 0xff, 0x6a, 0xa4, 0x00, 0x00, 0x65, 0x60, 0x5b, 0xff, 0x51, 0xbc, 0x72, 0x20, 0x36, 0xc6, 0x7a, 0x00, 0x90, 0x4e, 0x5b, 0x00, 0x65, 0xc8, 0x42, 0xff, 0x06, 0xd4, 0x08, 0x00, 0x65, 0xba, 0x5b, 0xe0, 0x3d, 0xe2, 0x62, 0x20, 0x12, 0xe2, 0x62, 0x51, 0x6a, 0xf6, 0x5a, 0x00, 0x65, 0x48, 0x5b, 0xff, 0x37, 0xc8, 0x08, 0x00, 0xa1, 0xda, 0x62, 0x04, 0xa0, 0xda, 0x7a, 0xfb, 0xa0, 0x40, 0x09, 0x80, 0x36, 0x6c, 0x00, 0x80, 0xa0, 0x50, 0x7a, 0x7f, 0xa0, 0x40, 0x09, 0xff, 0x6a, 0xf2, 0x5a, 0x00, 0x65, 0x50, 0x42, 0x04, 0xa0, 0xe0, 0x7a, 0x00, 0x65, 0x98, 0x5c, 0x00, 0x65, 0xe2, 0x42, 0x00, 0x65, 0x80, 0x5c, 0x31, 0x6a, 0x22, 0x01, 0x0c, 0x6a, 0xf2, 0x5a, 0x00, 0x65, 0x50, 0x42, 0x61, 0x6a, 0x22, 0x01, 0x00, 0x65, 0x50, 0x42, 0x51, 0x6a, 0xf6, 0x5a, 0x51, 0x6a, 0x22, 0x01, 0x00, 0x65, 0x50, 0x42, 0x10, 0x3d, 0x06, 0x00, 0xff, 0x65, 0x68, 0x0c, 0xff, 0x06, 0xd4, 0x08, 0x01, 0x0c, 0xf8, 0x7a, 0x04, 0x0c, 0xfa, 0x6a, 0xe0, 0x03, 0x7a, 0x08, 0xe0, 0x3d, 0x06, 0x63, 0xff, 0x65, 0xcc, 0x08, 0xff, 0x12, 0xda, 0x0c, 0xff, 0x06, 0xd4, 0x0c, 0xd1, 0x6a, 0x22, 0x01, 0x00, 0x65, 0xaa, 0x40, 0xff, 0x65, 0x26, 0x09, 0x01, 0x0b, 0x1a, 0x6b, 0x10, 0x0c, 0x0c, 0x7b, 0x04, 0x0b, 0x14, 0x6b, 0xff, 0x6a, 0xca, 0x08, 0x04, 0x93, 0x18, 0x6b, 0x01, 0x94, 0x16, 0x7b, 0x10, 0x94, 0x18, 0x6b, 0x80, 0x3d, 0x1e, 0x73, 0x0f, 0x04, 0x22, 0x6b, 0x02, 0x03, 0x22, 0x7b, 0x11, 0x0c, 0x1e, 0x7b, 0xc7, 0x93, 0x26, 0x09, 0xff, 0x99, 0xd4, 0x08, 0x38, 0x93, 0x24, 0x6b, 0xff, 0x6a, 0xd4, 0x0c, 0x80, 0x36, 0x28, 0x6b, 0x21, 0x6a, 0x22, 0x05, 0xff, 0x65, 0x20, 0x09, 0xff, 0x51, 0x36, 0x63, 0xff, 0x37, 0xc8, 0x08, 0xa1, 0x6a, 0x42, 0x43, 0xff, 0x51, 0xc8, 0x08, 0xb9, 0x6a, 0x42, 0x43, 0xff, 0x90, 0xa4, 0x08, 0xff, 0xba, 0x46, 0x73, 0xff, 0xba, 0x20, 0x09, 0xff, 0x65, 0xca, 0x18, 0x00, 0x6c, 0x3a, 0x63, 0xff, 0x90, 0xca, 0x0c, 0xff, 0x6a, 0xca, 0x04, 0x20, 0x36, 0x5a, 0x7b, 0x00, 0x90, 0x2e, 0x5b, 0xff, 0x65, 0x5a, 0x73, 0xff, 0x52, 0x58, 0x73, 0xff, 0xba, 0xcc, 0x08, 0xff, 0x52, 0x20, 0x09, 0xff, 0x66, 0x74, 0x09, 0xff, 0x65, 0x20, 0x0d, 0xff, 0xba, 0x7e, 0x0c, 0x00, 0x6a, 0x86, 0x5c, 0x0d, 0x6a, 0x6a, 0x00, 0x00, 0x51, 0xe6, 0x43, 0xff, 0x3f, 0xb4, 0x73, 0xff, 0x6a, 0xa2, 0x00, 0x00, 0x3f, 0x2e, 0x5b, 0xff, 0x65, 0xb4, 0x73, 0x20, 0x36, 0x6c, 0x00, 0x20, 0xa0, 0x6e, 0x6b, 0xff, 0xb9, 0xa2, 0x0c, 0xff, 0x6a, 0xa2, 0x04, 0xff, 0x65, 0xa4, 0x08, 0xe0, 0x6a, 0xcc, 0x00, 0x45, 0x6a, 0xda, 0x5b, 0x01, 0x6a, 0xd0, 0x01, 0x09, 0x6a, 0xd6, 0x01, 0x80, 0xeb, 0x7a, 0x7b, 0x01, 0x6a, 0xd6, 0x01, 0x01, 0xe9, 0xa4, 0x34, 0x88, 0x6a, 0xcc, 0x00, 0x45, 0x6a, 0xda, 0x5b, 0x01, 0x6a, 0x18, 0x01, 0xff, 0x6a, 0x1a, 0x09, 0xff, 0x6a, 0x1c, 0x09, 0x0d, 0x6a, 0x26, 0x01, 0x00, 0x65, 0x78, 0x5c, 0xff, 0x99, 0xa4, 0x0c, 0xff, 0x65, 0xa4, 0x08, 0xe0, 0x6a, 0xcc, 0x00, 0x45, 0x6a, 0xda, 0x5b, 0x01, 0x6a, 0xd0, 0x01, 0x01, 0x6a, 0xdc, 0x05, 0x88, 0x6a, 0xcc, 0x00, 0x45, 0x6a, 0xda, 0x5b, 0x01, 0x6a, 0x18, 0x01, 0xff, 0x6a, 0x1a, 0x09, 0xff, 0x6a, 0x1c, 0x09, 0x01, 0x6a, 0x26, 0x05, 0x01, 0x65, 0xd8, 0x31, 0x09, 0xee, 0xdc, 0x01, 0x80, 0xee, 0xaa, 0x7b, 0xff, 0x6a, 0xdc, 0x0d, 0xff, 0x65, 0x32, 0x09, 0x0a, 0x93, 0x26, 0x01, 0x00, 0x65, 0x78, 0x44, 0xff, 0x37, 0xc8, 0x08, 0x00, 0x6a, 0x70, 0x5b, 0xff, 0x52, 0xa2, 0x0c, 0x01, 0x0c, 0xba, 0x7b, 0x04, 0x0c, 0xba, 0x6b, 0xe0, 0x03, 0x06, 0x08, 0xe0, 0x03, 0x7a, 0x0c, 0xff, 0x8c, 0x10, 0x08, 0xff, 0x8d, 0x12, 0x08, 0xff, 0x8e, 0x14, 0x0c, 0xff, 0x6c, 0xda, 0x08, 0xff, 0x6c, 0xda, 0x08, 0xff, 0x6c, 0xda, 0x08, 0xff, 0x6c, 0xda, 0x08, 0xff, 0x6c, 0xda, 0x08, 0xff, 0x6c, 0xda, 0x08, 0xff, 0x6c, 0xda, 0x0c, 0x3d, 0x64, 0xa4, 0x28, 0x55, 0x64, 0xc8, 0x28, 0x00, 0x6c, 0xda, 0x18, 0xff, 0x52, 0xc8, 0x08, 0x00, 0x6c, 0xda, 0x20, 0xff, 0x6a, 0xc8, 0x08, 0x00, 0x6c, 0xda, 0x20, 0x00, 0x6c, 0xda, 0x24, 0xff, 0x65, 0xc8, 0x08, 0xe0, 0x6a, 0xcc, 0x00, 0x41, 0x6a, 0xd6, 0x5b, 0xff, 0x90, 0xe2, 0x09, 0x20, 0x6a, 0xd0, 0x01, 0x04, 0x35, 0xf8, 0x7b, 0x1d, 0x6a, 0xdc, 0x01, 0xdc, 0xee, 0xf4, 0x63, 0x00, 0x65, 0x0e, 0x44, 0x01, 0x6a, 0xdc, 0x01, 0x20, 0xa0, 0xd8, 0x31, 0x09, 0xee, 0xdc, 0x01, 0x80, 0xee, 0xfe, 0x7b, 0x11, 0x6a, 0xdc, 0x01, 0x50, 0xee, 0x02, 0x64, 0x20, 0x6a, 0xd0, 0x01, 0x09, 0x6a, 0xdc, 0x01, 0x88, 0xee, 0x08, 0x64, 0x19, 0x6a, 0xdc, 0x01, 0xd8, 0xee, 0x0c, 0x64, 0xff, 0x6a, 0xdc, 0x09, 0x18, 0xee, 0x10, 0x6c, 0xff, 0x6a, 0xd4, 0x0c, 0x88, 0x6a, 0xcc, 0x00, 0x41, 0x6a, 0xd6, 0x5b, 0x20, 0x6a, 0x18, 0x01, 0xff, 0x6a, 0x1a, 0x09, 0xff, 0x6a, 0x1c, 0x09, 0xff, 0x35, 0x26, 0x09, 0x04, 0x35, 0x3c, 0x6c, 0xa0, 0x6a, 0xca, 0x00, 0x20, 0x65, 0xc8, 0x18, 0xff, 0x6c, 0x32, 0x09, 0xff, 0x6c, 0x32, 0x09, 0xff, 0x6c, 0x32, 0x09, 0xff, 0x6c, 0x32, 0x09, 0xff, 0x6c, 0x32, 0x09, 0xff, 0x6c, 0x32, 0x09, 0xff, 0x6c, 0x32, 0x09, 0xff, 0x6c, 0x32, 0x09, 0x00, 0x65, 0x26, 0x64, 0x0a, 0x93, 0x26, 0x01, 0x00, 0x65, 0x78, 0x44, 0xa0, 0x6a, 0xcc, 0x00, 0xe8, 0x6a, 0xc8, 0x00, 0x01, 0x94, 0x40, 0x6c, 0x10, 0x94, 0x42, 0x6c, 0x08, 0x94, 0x54, 0x6c, 0x08, 0x94, 0x54, 0x6c, 0x08, 0x94, 0x54, 0x6c, 0x00, 0x65, 0x68, 0x5c, 0x08, 0x64, 0xc8, 0x18, 0x00, 0x8c, 0xca, 0x18, 0x00, 0x65, 0x4a, 0x4c, 0x00, 0x65, 0x40, 0x44, 0xf7, 0x93, 0x26, 0x09, 0x08, 0x93, 0x56, 0x6c, 0x00, 0x65, 0x68, 0x5c, 0x08, 0x64, 0xc8, 0x18, 0x08, 0x64, 0x58, 0x64, 0xff, 0x6a, 0xd4, 0x0c, 0x00, 0x65, 0x78, 0x5c, 0x00, 0x65, 0x68, 0x5c, 0x00, 0x65, 0x68, 0x5c, 0x00, 0x65, 0x68, 0x5c, 0xff, 0x99, 0xda, 0x08, 0xff, 0x99, 0xda, 0x08, 0xff, 0x99, 0xda, 0x08, 0xff, 0x99, 0xda, 0x08, 0xff, 0x99, 0xda, 0x08, 0xff, 0x99, 0xda, 0x08, 0xff, 0x99, 0xda, 0x08, 0xff, 0x99, 0xda, 0x0c, 0x08, 0x94, 0x78, 0x7c, 0xf7, 0x93, 0x26, 0x09, 0x08, 0x93, 0x7c, 0x6c, 0xff, 0x6a, 0xd4, 0x0c, 0xff, 0x40, 0x74, 0x09, 0xff, 0x90, 0x80, 0x08, 0xff, 0x6a, 0x72, 0x05, 0xff, 0x40, 0x94, 0x64, 0xff, 0x3f, 0x8c, 0x64, 0xff, 0x6a, 0xca, 0x04, 0xff, 0x3f, 0x20, 0x09, 0x01, 0x6a, 0x6a, 0x00, 0x00, 0xb9, 0xe6, 0x5b, 0xff, 0xba, 0x7e, 0x0c, 0xff, 0x40, 0x20, 0x09, 0xff, 0xba, 0x80, 0x0c, 0xff, 0x3f, 0x74, 0x09, 0xff, 0x90, 0x7e, 0x0c, }; static int aic7xxx_patch15_func(struct aic7xxx_host *p); static int aic7xxx_patch15_func(struct aic7xxx_host *p) { return ((p->bugs & AHC_BUG_SCBCHAN_UPLOAD) != 0); } static int aic7xxx_patch14_func(struct aic7xxx_host *p); static int aic7xxx_patch14_func(struct aic7xxx_host *p) { return ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0); } static int aic7xxx_patch13_func(struct aic7xxx_host *p); static int aic7xxx_patch13_func(struct aic7xxx_host *p) { return ((p->features & AHC_WIDE) != 0); } static int aic7xxx_patch12_func(struct aic7xxx_host *p); static int aic7xxx_patch12_func(struct aic7xxx_host *p) { return ((p->bugs & AHC_BUG_AUTOFLUSH) != 0); } static int aic7xxx_patch11_func(struct aic7xxx_host *p); static int aic7xxx_patch11_func(struct aic7xxx_host *p) { return ((p->features & AHC_ULTRA2) == 0); } static int aic7xxx_patch10_func(struct aic7xxx_host *p); static int aic7xxx_patch10_func(struct aic7xxx_host *p) { return ((p->features & AHC_CMD_CHAN) == 0); } static int aic7xxx_patch9_func(struct aic7xxx_host *p); static int aic7xxx_patch9_func(struct aic7xxx_host *p) { return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895); } static int aic7xxx_patch8_func(struct aic7xxx_host *p); static int aic7xxx_patch8_func(struct aic7xxx_host *p) { return ((p->features & AHC_ULTRA) != 0); } static int aic7xxx_patch7_func(struct aic7xxx_host *p); static int aic7xxx_patch7_func(struct aic7xxx_host *p) { return ((p->features & AHC_ULTRA2) != 0); } static int aic7xxx_patch6_func(struct aic7xxx_host *p); static int aic7xxx_patch6_func(struct aic7xxx_host *p) { return ((p->flags & AHC_PAGESCBS) == 0); } static int aic7xxx_patch5_func(struct aic7xxx_host *p); static int aic7xxx_patch5_func(struct aic7xxx_host *p) { return ((p->flags & AHC_PAGESCBS) != 0); } static int aic7xxx_patch4_func(struct aic7xxx_host *p); static int aic7xxx_patch4_func(struct aic7xxx_host *p) { return ((p->features & AHC_QUEUE_REGS) != 0); } static int aic7xxx_patch3_func(struct aic7xxx_host *p); static int aic7xxx_patch3_func(struct aic7xxx_host *p) { return ((p->features & AHC_TWIN) != 0); } static int aic7xxx_patch2_func(struct aic7xxx_host *p); static int aic7xxx_patch2_func(struct aic7xxx_host *p) { return ((p->features & AHC_QUEUE_REGS) == 0); } static int aic7xxx_patch1_func(struct aic7xxx_host *p); static int aic7xxx_patch1_func(struct aic7xxx_host *p) { return ((p->features & AHC_CMD_CHAN) != 0); } static int aic7xxx_patch0_func(struct aic7xxx_host *p); static int aic7xxx_patch0_func(struct aic7xxx_host *p) { return (0); } struct sequencer_patch { int (*patch_func)(struct aic7xxx_host *); unsigned int begin :10, skip_instr :10, skip_patch :12; } sequencer_patches[] = { { aic7xxx_patch1_func, 3, 2, 1 }, { aic7xxx_patch2_func, 7, 1, 1 }, { aic7xxx_patch2_func, 8, 1, 1 }, { aic7xxx_patch3_func, 11, 4, 1 }, { aic7xxx_patch4_func, 16, 3, 2 }, { aic7xxx_patch0_func, 19, 4, 1 }, { aic7xxx_patch5_func, 23, 1, 1 }, { aic7xxx_patch6_func, 26, 1, 1 }, { aic7xxx_patch1_func, 29, 1, 2 }, { aic7xxx_patch0_func, 30, 3, 1 }, { aic7xxx_patch3_func, 39, 4, 1 }, { aic7xxx_patch7_func, 43, 3, 2 }, { aic7xxx_patch0_func, 46, 3, 1 }, { aic7xxx_patch8_func, 52, 7, 1 }, { aic7xxx_patch3_func, 60, 3, 1 }, { aic7xxx_patch7_func, 63, 2, 1 }, { aic7xxx_patch7_func, 102, 1, 2 }, { aic7xxx_patch0_func, 103, 2, 1 }, { aic7xxx_patch7_func, 107, 2, 1 }, { aic7xxx_patch9_func, 109, 1, 1 }, { aic7xxx_patch10_func, 110, 2, 1 }, { aic7xxx_patch7_func, 113, 1, 2 }, { aic7xxx_patch0_func, 114, 1, 1 }, { aic7xxx_patch1_func, 118, 1, 1 }, { aic7xxx_patch1_func, 121, 3, 3 }, { aic7xxx_patch11_func, 123, 1, 1 }, { aic7xxx_patch0_func, 124, 5, 1 }, { aic7xxx_patch7_func, 132, 1, 1 }, { aic7xxx_patch9_func, 133, 1, 1 }, { aic7xxx_patch10_func, 134, 3, 1 }, { aic7xxx_patch7_func, 137, 3, 2 }, { aic7xxx_patch0_func, 140, 2, 1 }, { aic7xxx_patch7_func, 142, 5, 2 }, { aic7xxx_patch0_func, 147, 3, 1 }, { aic7xxx_patch7_func, 150, 1, 2 }, { aic7xxx_patch0_func, 151, 2, 1 }, { aic7xxx_patch1_func, 153, 15, 4 }, { aic7xxx_patch11_func, 166, 1, 2 }, { aic7xxx_patch0_func, 167, 1, 1 }, { aic7xxx_patch0_func, 168, 10, 1 }, { aic7xxx_patch7_func, 181, 1, 2 }, { aic7xxx_patch0_func, 182, 2, 1 }, { aic7xxx_patch7_func, 184, 18, 1 }, { aic7xxx_patch1_func, 202, 3, 3 }, { aic7xxx_patch7_func, 204, 1, 1 }, { aic7xxx_patch0_func, 205, 4, 1 }, { aic7xxx_patch7_func, 210, 2, 1 }, { aic7xxx_patch7_func, 215, 13, 3 }, { aic7xxx_patch12_func, 218, 1, 1 }, { aic7xxx_patch12_func, 219, 4, 1 }, { aic7xxx_patch1_func, 229, 3, 3 }, { aic7xxx_patch11_func, 231, 1, 1 }, { aic7xxx_patch0_func, 232, 5, 1 }, { aic7xxx_patch11_func, 237, 1, 2 }, { aic7xxx_patch0_func, 238, 9, 1 }, { aic7xxx_patch13_func, 254, 1, 2 }, { aic7xxx_patch0_func, 255, 1, 1 }, { aic7xxx_patch4_func, 316, 1, 2 }, { aic7xxx_patch0_func, 317, 1, 1 }, { aic7xxx_patch2_func, 320, 1, 1 }, { aic7xxx_patch1_func, 330, 3, 2 }, { aic7xxx_patch0_func, 333, 5, 1 }, { aic7xxx_patch13_func, 341, 1, 2 }, { aic7xxx_patch0_func, 342, 1, 1 }, { aic7xxx_patch5_func, 347, 1, 1 }, { aic7xxx_patch11_func, 389, 15, 2 }, { aic7xxx_patch14_func, 402, 1, 1 }, { aic7xxx_patch1_func, 441, 7, 2 }, { aic7xxx_patch0_func, 448, 8, 1 }, { aic7xxx_patch1_func, 457, 4, 2 }, { aic7xxx_patch0_func, 461, 6, 1 }, { aic7xxx_patch1_func, 467, 4, 2 }, { aic7xxx_patch0_func, 471, 3, 1 }, { aic7xxx_patch10_func, 481, 10, 1 }, { aic7xxx_patch1_func, 500, 22, 5 }, { aic7xxx_patch11_func, 508, 4, 1 }, { aic7xxx_patch7_func, 512, 7, 3 }, { aic7xxx_patch15_func, 512, 5, 2 }, { aic7xxx_patch0_func, 517, 2, 1 }, { aic7xxx_patch10_func, 522, 50, 3 }, { aic7xxx_patch14_func, 543, 17, 2 }, { aic7xxx_patch0_func, 560, 4, 1 }, { aic7xxx_patch10_func, 572, 4, 1 }, { aic7xxx_patch5_func, 576, 2, 1 }, { aic7xxx_patch5_func, 579, 9, 1 }, };
gpl-2.0
TaichiN/android_kernel_google_msm
drivers/video/via/vt1636.c
12970
7612
/* * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/via-core.h> #include <linux/via_i2c.h> #include "global.h" static const struct IODATA common_init_data[] = { /* Index, Mask, Value */ /* Set panel power sequence timing */ {0x10, 0xC0, 0x00}, /* T1: VDD on - Data on. Each increment is 1 ms. (50ms = 031h) */ {0x0B, 0xFF, 0x40}, /* T2: Data on - Backlight on. Each increment is 2 ms. (210ms = 068h) */ {0x0C, 0xFF, 0x31}, /* T3: Backlight off -Data off. Each increment is 2 ms. (210ms = 068h)*/ {0x0D, 0xFF, 0x31}, /* T4: Data off - VDD off. Each increment is 1 ms. (50ms = 031h) */ {0x0E, 0xFF, 0x68}, /* T5: VDD off - VDD on. Each increment is 100 ms. (500ms = 04h) */ {0x0F, 0xFF, 0x68}, /* LVDS output power up */ {0x09, 0xA0, 0xA0}, /* turn on back light */ {0x10, 0x33, 0x13} }; /* Index, Mask, Value */ static const struct IODATA dual_channel_enable_data = {0x08, 0xF0, 0xE0}; static const struct IODATA single_channel_enable_data = {0x08, 0xF0, 0x00}; static const struct IODATA dithering_enable_data = {0x0A, 0x70, 0x50}; static const struct IODATA dithering_disable_data = {0x0A, 0x70, 0x00}; static const struct IODATA vdd_on_data = {0x10, 0x20, 0x20}; static const struct IODATA vdd_off_data = {0x10, 0x20, 0x00}; u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info, u8 index) { u8 data; viafb_i2c_readbyte(plvds_chip_info->i2c_port, plvds_chip_info->lvds_chip_slave_addr, index, &data); return data; } void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info, struct IODATA io_data) { int index, data; index = io_data.Index; data = viafb_gpio_i2c_read_lvds(plvds_setting_info, plvds_chip_info, index); data = (data & (~io_data.Mask)) | io_data.Data; viafb_i2c_writebyte(plvds_chip_info->i2c_port, plvds_chip_info->lvds_chip_slave_addr, index, data); } void viafb_init_lvds_vt1636(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { int reg_num, i; /* Common settings: */ reg_num = ARRAY_SIZE(common_init_data); for (i = 0; i < reg_num; i++) viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, common_init_data[i]); /* Input Data Mode Select */ if (plvds_setting_info->device_lcd_dualedge) viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, dual_channel_enable_data); else viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, single_channel_enable_data); if (plvds_setting_info->LCDDithering) viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, dithering_enable_data); else viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, dithering_disable_data); } void viafb_enable_lvds_vt1636(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, vdd_on_data); } void viafb_disable_lvds_vt1636(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, vdd_off_data); } bool viafb_lvds_identify_vt1636(u8 i2c_adapter) { u8 Buffer[2]; DEBUG_MSG(KERN_INFO "viafb_lvds_identify_vt1636.\n"); /* Sense VT1636 LVDS Transmiter */ viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr = VT1636_LVDS_I2C_ADDR; /* Check vendor ID first: */ if (viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x00, &Buffer[0])) return false; viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x01, &Buffer[1]); if (!((Buffer[0] == 0x06) && (Buffer[1] == 0x11))) return false; /* Check Chip ID: */ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x02, &Buffer[0]); viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x03, &Buffer[1]); if ((Buffer[0] == 0x45) && (Buffer[1] == 0x33)) { viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = VT1636_LVDS; return true; } return false; } static int get_clk_range_index(u32 Clk) { if (Clk < DPA_CLK_30M) return DPA_CLK_RANGE_30M; else if (Clk < DPA_CLK_50M) return DPA_CLK_RANGE_30_50M; else if (Clk < DPA_CLK_70M) return DPA_CLK_RANGE_50_70M; else if (Clk < DPA_CLK_100M) return DPA_CLK_RANGE_70_100M; else if (Clk < DPA_CLK_150M) return DPA_CLK_RANGE_100_150M; else return DPA_CLK_RANGE_150M; } static void set_dpa_vt1636(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info, struct VT1636_DPA_SETTING *p_vt1636_dpa_setting) { struct IODATA io_data; io_data.Index = 0x09; io_data.Mask = 0x1F; io_data.Data = p_vt1636_dpa_setting->CLK_SEL_ST1; viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, io_data); io_data.Index = 0x08; io_data.Mask = 0x0F; io_data.Data = p_vt1636_dpa_setting->CLK_SEL_ST2; viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info, io_data); } void viafb_vt1636_patch_skew_on_vt3324( struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { struct VT1636_DPA_SETTING dpa = {0x00, 0x00}, dpa_16x12 = {0x0B, 0x03}, *pdpa; int index; DEBUG_MSG(KERN_INFO "viafb_vt1636_patch_skew_on_vt3324.\n"); /* Graphics DPA settings: */ index = get_clk_range_index(plvds_setting_info->vclk); viafb_set_dpa_gfx(plvds_chip_info->output_interface, &GFX_DPA_SETTING_TBL_VT3324[index]); /* LVDS Transmitter DPA settings: */ if (plvds_setting_info->lcd_panel_hres == 1600 && plvds_setting_info->lcd_panel_vres == 1200) pdpa = &dpa_16x12; else pdpa = &dpa; set_dpa_vt1636(plvds_setting_info, plvds_chip_info, pdpa); } void viafb_vt1636_patch_skew_on_vt3327( struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { struct VT1636_DPA_SETTING dpa = {0x00, 0x00}; int index; DEBUG_MSG(KERN_INFO "viafb_vt1636_patch_skew_on_vt3327.\n"); /* Graphics DPA settings: */ index = get_clk_range_index(plvds_setting_info->vclk); viafb_set_dpa_gfx(plvds_chip_info->output_interface, &GFX_DPA_SETTING_TBL_VT3327[index]); /* LVDS Transmitter DPA settings: */ set_dpa_vt1636(plvds_setting_info, plvds_chip_info, &dpa); } void viafb_vt1636_patch_skew_on_vt3364( struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { int index; DEBUG_MSG(KERN_INFO "viafb_vt1636_patch_skew_on_vt3364.\n"); /* Graphics DPA settings: */ index = get_clk_range_index(plvds_setting_info->vclk); viafb_set_dpa_gfx(plvds_chip_info->output_interface, &GFX_DPA_SETTING_TBL_VT3364[index]); }
gpl-2.0
TeamEOS/kernel_samsung_manta
arch/frv/lib/checksum.c
13994
4201
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: * Fixed some nasty bugs, causing some horrible crashes. * A: At some points, the sum (%0) was used as * length-counter instead of the length counter * (%1). Thanks to Roman Hodek for pointing this out. * B: GCC seems to mess up if one uses too many * data-registers to hold input values and one tries to * specify d0 and d1 as scratch registers. Letting gcc choose these * registers itself solves the problem. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most of the assembly has to go. */ #include <net/checksum.h> #include <linux/module.h> static inline unsigned short from32to16(unsigned long x) { /* add up 16-bit and 16-bit for 16+c bit */ x = (x & 0xffff) + (x >> 16); /* add up carry.. */ x = (x & 0xffff) + (x >> 16); return x; } static unsigned long do_csum(const unsigned char * buff, int len) { int odd, count; unsigned long result = 0; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { result = *buff; len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *) buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { unsigned long carry = 0; do { unsigned long w = *(unsigned long *) buff; count--; buff += 4; result += carry; result += w; carry = (w > result); } while (count); result += carry; result = (result & 0xffff) + (result >> 16); } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += (*buff << 8); result = from32to16(result); if (odd) result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); out: return result; } /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 32-bit boundary */ __wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned int result = do_csum(buff, len); /* add in old sum, and carry.. */ result += (__force u32)sum; if ((__force u32)sum > result) result += 1; return (__force __wsum)result; } EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ __sum16 ip_compute_csum(const void *buff, int len) { return (__force __sum16)~do_csum(buff, len); } EXPORT_SYMBOL(ip_compute_csum); /* * copy from fs while checksumming, otherwise like csum_partial */ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *csum_err) { int rem; if (csum_err) *csum_err = 0; rem = copy_from_user(dst, src, len); if (rem != 0) { if (csum_err) *csum_err = -EFAULT; memset(dst + len - rem, 0, rem); len = rem; } return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_from_user); /* * copy from ds while checksumming, otherwise like csum_partial */ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { memcpy(dst, src, len); return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_nocheck);
gpl-2.0
Aircell/asp-kernel
drivers/video/i810/i810_gtf.c
15530
9148
/*-*- linux-c -*- * linux/drivers/video/i810_main.h -- Intel 810 Non-discrete Video Timings * (VESA GTF) * * Copyright (C) 2001 Antonino Daplas<adaplas@pol.net> * All Rights Reserved * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/kernel.h> #include "i810_regs.h" #include "i810.h" #include "i810_main.h" /* * FIFO and Watermark tables - based almost wholly on i810_wmark.c in * XFree86 v4.03 by Precision Insight. Slightly modified for integer * operation, instead of float */ struct wm_info { u32 freq; u32 wm; }; static struct wm_info i810_wm_8_100[] = { { 15, 0x0070c000 }, { 19, 0x0070c000 }, { 25, 0x22003000 }, { 28, 0x22003000 }, { 31, 0x22003000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22008000 }, { 50, 0x22008000 }, { 56, 0x22008000 }, { 65, 0x22008000 }, { 75, 0x22008000 }, { 78, 0x22008000 }, { 80, 0x22008000 }, { 94, 0x22008000 }, { 96, 0x22107000 }, { 99, 0x22107000 }, { 108, 0x22107000 }, { 121, 0x22107000 }, { 128, 0x22107000 }, { 132, 0x22109000 }, { 135, 0x22109000 }, { 157, 0x2210b000 }, { 162, 0x2210b000 }, { 175, 0x2210b000 }, { 189, 0x2220e000 }, { 195, 0x2220e000 }, { 202, 0x2220e000 }, { 204, 0x2220e000 }, { 218, 0x2220f000 }, { 229, 0x22210000 }, { 234, 0x22210000 }, }; static struct wm_info i810_wm_16_100[] = { { 15, 0x0070c000 }, { 19, 0x0020c000 }, { 25, 0x22006000 }, { 28, 0x22006000 }, { 31, 0x22007000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22009000 }, { 50, 0x22009000 }, { 56, 0x22108000 }, { 65, 0x2210e000 }, { 75, 0x2210e000 }, { 78, 0x2210e000 }, { 80, 0x22210000 }, { 94, 0x22210000 }, { 96, 0x22210000 }, { 99, 0x22210000 }, { 108, 0x22210000 }, { 121, 0x22210000 }, { 128, 0x22210000 }, { 132, 0x22314000 }, { 135, 0x22314000 }, { 157, 0x22415000 }, { 162, 0x22416000 }, { 175, 0x22416000 }, { 189, 0x22416000 }, { 195, 0x22416000 }, { 202, 0x22416000 }, { 204, 0x22416000 }, { 218, 0x22416000 }, { 229, 0x22416000 }, }; static struct wm_info i810_wm_24_100[] = { { 15, 0x0020c000 }, { 19, 0x0040c000 }, { 25, 0x22009000 }, { 28, 0x22009000 }, { 31, 0x2200a000 }, { 36, 0x2210c000 }, { 40, 0x2210c000 }, { 45, 0x2210c000 }, { 49, 0x22111000 }, { 50, 0x22111000 }, { 56, 0x22111000 }, { 65, 0x22214000 }, { 75, 0x22214000 }, { 78, 0x22215000 }, { 80, 0x22216000 }, { 94, 0x22218000 }, { 96, 0x22418000 }, { 99, 0x22418000 }, { 108, 0x22418000 }, { 121, 0x22418000 }, { 128, 0x22419000 }, { 132, 0x22519000 }, { 135, 0x4441d000 }, { 157, 0x44419000 }, { 162, 0x44419000 }, { 175, 0x44419000 }, { 189, 0x44419000 }, { 195, 0x44419000 }, { 202, 0x44419000 }, { 204, 0x44419000 }, }; static struct wm_info i810_wm_8_133[] = { { 15, 0x0070c000 }, { 19, 0x0070c000 }, { 25, 0x22003000 }, { 28, 0x22003000 }, { 31, 0x22003000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22008000 }, { 50, 0x22008000 }, { 56, 0x22008000 }, { 65, 0x22008000 }, { 75, 0x22008000 }, { 78, 0x22008000 }, { 80, 0x22008000 }, { 94, 0x22008000 }, { 96, 0x22107000 }, { 99, 0x22107000 }, { 108, 0x22107000 }, { 121, 0x22107000 }, { 128, 0x22107000 }, { 132, 0x22109000 }, { 135, 0x22109000 }, { 157, 0x2210b000 }, { 162, 0x2210b000 }, { 175, 0x2210b000 }, { 189, 0x2220e000 }, { 195, 0x2220e000 }, { 202, 0x2220e000 }, { 204, 0x2220e000 }, { 218, 0x2220f000 }, { 229, 0x22210000 }, { 234, 0x22210000 }, }; static struct wm_info i810_wm_16_133[] = { { 15, 0x0020c000 }, { 19, 0x0020c000 }, { 25, 0x22006000 }, { 28, 0x22006000 }, { 31, 0x22007000 }, { 36, 0x22007000 }, { 40, 0x22007000 }, { 45, 0x22007000 }, { 49, 0x22009000 }, { 50, 0x22009000 }, { 56, 0x22108000 }, { 65, 0x2210e000 }, { 75, 0x2210e000 }, { 78, 0x2210e000 }, { 80, 0x22210000 }, { 94, 0x22210000 }, { 96, 0x22210000 }, { 99, 0x22210000 }, { 108, 0x22210000 }, { 121, 0x22210000 }, { 128, 0x22210000 }, { 132, 0x22314000 }, { 135, 0x22314000 }, { 157, 0x22415000 }, { 162, 0x22416000 }, { 175, 0x22416000 }, { 189, 0x22416000 }, { 195, 0x22416000 }, { 202, 0x22416000 }, { 204, 0x22416000 }, { 218, 0x22416000 }, { 229, 0x22416000 }, }; static struct wm_info i810_wm_24_133[] = { { 15, 0x0020c000 }, { 19, 0x00408000 }, { 25, 0x22009000 }, { 28, 0x22009000 }, { 31, 0x2200a000 }, { 36, 0x2210c000 }, { 40, 0x2210c000 }, { 45, 0x2210c000 }, { 49, 0x22111000 }, { 50, 0x22111000 }, { 56, 0x22111000 }, { 65, 0x22214000 }, { 75, 0x22214000 }, { 78, 0x22215000 }, { 80, 0x22216000 }, { 94, 0x22218000 }, { 96, 0x22418000 }, { 99, 0x22418000 }, { 108, 0x22418000 }, { 121, 0x22418000 }, { 128, 0x22419000 }, { 132, 0x22519000 }, { 135, 0x4441d000 }, { 157, 0x44419000 }, { 162, 0x44419000 }, { 175, 0x44419000 }, { 189, 0x44419000 }, { 195, 0x44419000 }, { 202, 0x44419000 }, { 204, 0x44419000 }, }; void round_off_xres(u32 *xres) { } void round_off_yres(u32 *xres, u32 *yres) { } /** * i810fb_encode_registers - encode @var to hardware register values * @var: pointer to var structure * @par: pointer to hardware par structure * * DESCRIPTION: * Timing values in @var will be converted to appropriate * register values of @par. */ void i810fb_encode_registers(const struct fb_var_screeninfo *var, struct i810fb_par *par, u32 xres, u32 yres) { int n, blank_s, blank_e; u8 __iomem *mmio = par->mmio_start_virtual; u8 msr = 0; /* Horizontal */ /* htotal */ n = ((xres + var->right_margin + var->hsync_len + var->left_margin) >> 3) - 5; par->regs.cr00 = (u8) n; par->regs.cr35 = (u8) ((n >> 8) & 1); /* xres */ par->regs.cr01 = (u8) ((xres >> 3) - 1); /* hblank */ blank_e = (xres + var->right_margin + var->hsync_len + var->left_margin) >> 3; blank_e--; blank_s = blank_e - 127; if (blank_s < (xres >> 3)) blank_s = xres >> 3; par->regs.cr02 = (u8) blank_s; par->regs.cr03 = (u8) (blank_e & 0x1F); par->regs.cr05 = (u8) ((blank_e & (1 << 5)) << 2); par->regs.cr39 = (u8) ((blank_e >> 6) & 1); /* hsync */ par->regs.cr04 = (u8) ((xres + var->right_margin) >> 3); par->regs.cr05 |= (u8) (((xres + var->right_margin + var->hsync_len) >> 3) & 0x1F); /* Vertical */ /* vtotal */ n = yres + var->lower_margin + var->vsync_len + var->upper_margin - 2; par->regs.cr06 = (u8) (n & 0xFF); par->regs.cr30 = (u8) ((n >> 8) & 0x0F); /* vsync */ n = yres + var->lower_margin; par->regs.cr10 = (u8) (n & 0xFF); par->regs.cr32 = (u8) ((n >> 8) & 0x0F); par->regs.cr11 = i810_readb(CR11, mmio) & ~0x0F; par->regs.cr11 |= (u8) ((yres + var->lower_margin + var->vsync_len) & 0x0F); /* yres */ n = yres - 1; par->regs.cr12 = (u8) (n & 0xFF); par->regs.cr31 = (u8) ((n >> 8) & 0x0F); /* vblank */ blank_e = yres + var->lower_margin + var->vsync_len + var->upper_margin; blank_e--; blank_s = blank_e - 127; if (blank_s < yres) blank_s = yres; par->regs.cr15 = (u8) (blank_s & 0xFF); par->regs.cr33 = (u8) ((blank_s >> 8) & 0x0F); par->regs.cr16 = (u8) (blank_e & 0xFF); par->regs.cr09 = 0; /* sync polarity */ if (!(var->sync & FB_SYNC_HOR_HIGH_ACT)) msr |= 1 << 6; if (!(var->sync & FB_SYNC_VERT_HIGH_ACT)) msr |= 1 << 7; par->regs.msr = msr; /* interlace */ if (var->vmode & FB_VMODE_INTERLACED) par->interlace = (1 << 7) | ((u8) (var->yres >> 4)); else par->interlace = 0; if (var->vmode & FB_VMODE_DOUBLE) par->regs.cr09 |= 1 << 7; /* overlay */ par->ovract = ((var->xres + var->right_margin + var->hsync_len + var->left_margin - 32) | ((var->xres - 32) << 16)); } void i810fb_fill_var_timings(struct fb_var_screeninfo *var) { } /** * i810_get_watermark - gets watermark * @var: pointer to fb_var_screeninfo * @par: pointer to i810fb_par structure * * DESCRIPTION: * Gets the required watermark based on * pixelclock and RAMBUS frequency. * * RETURNS: * watermark */ u32 i810_get_watermark(const struct fb_var_screeninfo *var, struct i810fb_par *par) { struct wm_info *wmark = NULL; u32 i, size = 0, pixclock, wm_best = 0, min, diff; if (par->mem_freq == 100) { switch (var->bits_per_pixel) { case 8: wmark = i810_wm_8_100; size = ARRAY_SIZE(i810_wm_8_100); break; case 16: wmark = i810_wm_16_100; size = ARRAY_SIZE(i810_wm_16_100); break; case 24: case 32: wmark = i810_wm_24_100; size = ARRAY_SIZE(i810_wm_24_100); } } else { switch(var->bits_per_pixel) { case 8: wmark = i810_wm_8_133; size = ARRAY_SIZE(i810_wm_8_133); break; case 16: wmark = i810_wm_16_133; size = ARRAY_SIZE(i810_wm_16_133); break; case 24: case 32: wmark = i810_wm_24_133; size = ARRAY_SIZE(i810_wm_24_133); } } pixclock = 1000000/var->pixclock; min = ~0; for (i = 0; i < size; i++) { if (pixclock <= wmark[i].freq) diff = wmark[i].freq - pixclock; else diff = pixclock - wmark[i].freq; if (diff < min) { wm_best = wmark[i].wm; min = diff; } } return wm_best; }
gpl-2.0
cuviper/linux-uprobes
drivers/video/sm501fb.c
171
54733
/* linux/drivers/video/sm501fb.c * * Copyright (c) 2006 Simtec Electronics * Vincent Sanders <vince@simtec.co.uk> * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Framebuffer driver for the Silicon Motion SM501 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/console.h> #include <linux/io.h> #include <asm/uaccess.h> #include <asm/div64.h> #ifdef CONFIG_PM #include <linux/pm.h> #endif #include <linux/sm501.h> #include <linux/sm501-regs.h> #include "edid.h" static char *fb_mode = "640x480-16@60"; static unsigned long default_bpp = 16; static struct fb_videomode __devinitdata sm501_default_mode = { .refresh = 60, .xres = 640, .yres = 480, .pixclock = 20833, .left_margin = 142, .right_margin = 13, .upper_margin = 21, .lower_margin = 1, .hsync_len = 69, .vsync_len = 3, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }; #define NR_PALETTE 256 enum sm501_controller { HEAD_CRT = 0, HEAD_PANEL = 1, }; /* SM501 memory address. * * This structure is used to track memory usage within the SM501 framebuffer * allocation. The sm_addr field is stored as an offset as it is often used * against both the physical and mapped addresses. */ struct sm501_mem { unsigned long size; unsigned long sm_addr; /* offset from base of sm501 fb. */ void __iomem *k_addr; }; /* private data that is shared between all frambuffers* */ struct sm501fb_info { struct device *dev; struct fb_info *fb[2]; /* fb info for both heads */ struct resource *fbmem_res; /* framebuffer resource */ struct resource *regs_res; /* registers resource */ struct resource *regs2d_res; /* 2d registers resource */ struct sm501_platdata_fb *pdata; /* our platform data */ unsigned long pm_crt_ctrl; /* pm: crt ctrl save */ int irq; int swap_endian; /* set to swap rgb=>bgr */ void __iomem *regs; /* remapped registers */ void __iomem *regs2d; /* 2d remapped registers */ void __iomem *fbmem; /* remapped framebuffer */ size_t fbmem_len; /* length of remapped region */ u8 *edid_data; }; /* per-framebuffer private data */ struct sm501fb_par { u32 pseudo_palette[16]; enum sm501_controller head; struct sm501_mem cursor; struct sm501_mem screen; struct fb_ops ops; void *store_fb; void *store_cursor; void __iomem *cursor_regs; struct sm501fb_info *info; }; /* Helper functions */ static inline int h_total(struct fb_var_screeninfo *var) { return var->xres + var->left_margin + var->right_margin + var->hsync_len; } static inline int v_total(struct fb_var_screeninfo *var) { return var->yres + var->upper_margin + var->lower_margin + var->vsync_len; } /* sm501fb_sync_regs() * * This call is mainly for PCI bus systems where we need to * ensure that any writes to the bus are completed before the * next phase, or after completing a function. */ static inline void sm501fb_sync_regs(struct sm501fb_info *info) { smc501_readl(info->regs); } /* sm501_alloc_mem * * This is an attempt to lay out memory for the two framebuffers and * everything else * * |fbmem_res->start fbmem_res->end| * | | * |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K | * |-> fb[0].fix.smem_len <-| spare |-> fb[1].fix.smem_len <-|-> cursors <-| * * The "spare" space is for the 2d engine data * the fixed is space for the cursors (2x1Kbyte) * * we need to allocate memory for the 2D acceleration engine * command list and the data for the engine to deal with. * * - all allocations must be 128bit aligned * - cursors are 64x64x2 bits (1Kbyte) * */ #define SM501_MEMF_CURSOR (1) #define SM501_MEMF_PANEL (2) #define SM501_MEMF_CRT (4) #define SM501_MEMF_ACCEL (8) static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, unsigned int why, size_t size, u32 smem_len) { struct sm501fb_par *par; struct fb_info *fbi; unsigned int ptr; unsigned int end; switch (why) { case SM501_MEMF_CURSOR: ptr = inf->fbmem_len - size; inf->fbmem_len = ptr; /* adjust available memory. */ break; case SM501_MEMF_PANEL: if (size > inf->fbmem_len) return -ENOMEM; ptr = inf->fbmem_len - size; fbi = inf->fb[HEAD_CRT]; /* round down, some programs such as directfb do not draw * 0,0 correctly unless the start is aligned to a page start. */ if (ptr > 0) ptr &= ~(PAGE_SIZE - 1); if (fbi && ptr < smem_len) return -ENOMEM; break; case SM501_MEMF_CRT: ptr = 0; /* check to see if we have panel memory allocated * which would put an limit on available memory. */ fbi = inf->fb[HEAD_PANEL]; if (fbi) { par = fbi->par; end = par->screen.k_addr ? par->screen.sm_addr : inf->fbmem_len; } else end = inf->fbmem_len; if ((ptr + size) > end) return -ENOMEM; break; case SM501_MEMF_ACCEL: fbi = inf->fb[HEAD_CRT]; ptr = fbi ? smem_len : 0; fbi = inf->fb[HEAD_PANEL]; if (fbi) { par = fbi->par; end = par->screen.sm_addr; } else end = inf->fbmem_len; if ((ptr + size) > end) return -ENOMEM; break; default: return -EINVAL; } mem->size = size; mem->sm_addr = ptr; mem->k_addr = inf->fbmem + ptr; dev_dbg(inf->dev, "%s: result %08lx, %p - %u, %zd\n", __func__, mem->sm_addr, mem->k_addr, why, size); return 0; } /* sm501fb_ps_to_hz * * Converts a period in picoseconds to Hz. * * Note, we try to keep this in Hz to minimise rounding with * the limited PLL settings on the SM501. */ static unsigned long sm501fb_ps_to_hz(unsigned long psvalue) { unsigned long long numerator=1000000000000ULL; /* 10^12 / picosecond period gives frequency in Hz */ do_div(numerator, psvalue); return (unsigned long)numerator; } /* sm501fb_hz_to_ps is identical to the opposite transform */ #define sm501fb_hz_to_ps(x) sm501fb_ps_to_hz(x) /* sm501fb_setup_gamma * * Programs a linear 1.0 gamma ramp in case the gamma * correction is enabled without programming anything else. */ static void sm501fb_setup_gamma(struct sm501fb_info *fbi, unsigned long palette) { unsigned long value = 0; int offset; /* set gamma values */ for (offset = 0; offset < 256 * 4; offset += 4) { smc501_writel(value, fbi->regs + palette + offset); value += 0x010101; /* Advance RGB by 1,1,1.*/ } } /* sm501fb_check_var * * check common variables for both panel and crt */ static int sm501fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *sm = par->info; unsigned long tmp; /* check we can fit these values into the registers */ if (var->hsync_len > 255 || var->vsync_len > 63) return -EINVAL; /* hdisplay end and hsync start */ if ((var->xres + var->right_margin) > 4096) return -EINVAL; /* vdisplay end and vsync start */ if ((var->yres + var->lower_margin) > 2048) return -EINVAL; /* hard limits of device */ if (h_total(var) > 4096 || v_total(var) > 2048) return -EINVAL; /* check our line length is going to be 128 bit aligned */ tmp = (var->xres * var->bits_per_pixel) / 8; if ((tmp & 15) != 0) return -EINVAL; /* check the virtual size */ if (var->xres_virtual > 4096 || var->yres_virtual > 2048) return -EINVAL; /* can cope with 8,16 or 32bpp */ if (var->bits_per_pixel <= 8) var->bits_per_pixel = 8; else if (var->bits_per_pixel <= 16) var->bits_per_pixel = 16; else if (var->bits_per_pixel == 24) var->bits_per_pixel = 32; /* set r/g/b positions and validate bpp */ switch(var->bits_per_pixel) { case 8: var->red.length = var->bits_per_pixel; var->red.offset = 0; var->green.length = var->bits_per_pixel; var->green.offset = 0; var->blue.length = var->bits_per_pixel; var->blue.offset = 0; var->transp.length = 0; var->transp.offset = 0; break; case 16: if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { var->blue.offset = 11; var->green.offset = 5; var->red.offset = 0; } else { var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; } var->transp.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; var->transp.length = 0; break; case 32: if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { var->transp.offset = 0; var->red.offset = 8; var->green.offset = 16; var->blue.offset = 24; } else { var->transp.offset = 24; var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; } var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; break; default: return -EINVAL; } return 0; } /* * sm501fb_check_var_crt(): * * check the parameters for the CRT head, and either bring them * back into range, or return -EINVAL. */ static int sm501fb_check_var_crt(struct fb_var_screeninfo *var, struct fb_info *info) { return sm501fb_check_var(var, info); } /* sm501fb_check_var_pnl(): * * check the parameters for the CRT head, and either bring them * back into range, or return -EINVAL. */ static int sm501fb_check_var_pnl(struct fb_var_screeninfo *var, struct fb_info *info) { return sm501fb_check_var(var, info); } /* sm501fb_set_par_common * * set common registers for framebuffers */ static int sm501fb_set_par_common(struct fb_info *info, struct fb_var_screeninfo *var) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; unsigned long pixclock; /* pixelclock in Hz */ unsigned long sm501pixclock; /* pixelclock the 501 can achieve in Hz */ unsigned int mem_type; unsigned int clock_type; unsigned int head_addr; unsigned int smem_len; dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n", __func__, var->xres, var->yres, var->bits_per_pixel, var->xres_virtual, var->yres_virtual); switch (par->head) { case HEAD_CRT: mem_type = SM501_MEMF_CRT; clock_type = SM501_CLOCK_V2XCLK; head_addr = SM501_DC_CRT_FB_ADDR; break; case HEAD_PANEL: mem_type = SM501_MEMF_PANEL; clock_type = SM501_CLOCK_P2XCLK; head_addr = SM501_DC_PANEL_FB_ADDR; break; default: mem_type = 0; /* stop compiler warnings */ head_addr = 0; clock_type = 0; } switch (var->bits_per_pixel) { case 8: info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 16: info->fix.visual = FB_VISUAL_TRUECOLOR; break; case 32: info->fix.visual = FB_VISUAL_TRUECOLOR; break; } /* allocate fb memory within 501 */ info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8; smem_len = info->fix.line_length * var->yres_virtual; dev_dbg(fbi->dev, "%s: line length = %u\n", __func__, info->fix.line_length); if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) { dev_err(fbi->dev, "no memory available\n"); return -ENOMEM; } mutex_lock(&info->mm_lock); info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr; info->fix.smem_len = smem_len; mutex_unlock(&info->mm_lock); info->screen_base = fbi->fbmem + par->screen.sm_addr; info->screen_size = info->fix.smem_len; /* set start of framebuffer to the screen */ smc501_writel(par->screen.sm_addr | SM501_ADDR_FLIP, fbi->regs + head_addr); /* program CRT clock */ pixclock = sm501fb_ps_to_hz(var->pixclock); sm501pixclock = sm501_set_clock(fbi->dev->parent, clock_type, pixclock); /* update fb layer with actual clock used */ var->pixclock = sm501fb_hz_to_ps(sm501pixclock); dev_dbg(fbi->dev, "%s: pixclock(ps) = %u, pixclock(Hz) = %lu, " "sm501pixclock = %lu, error = %ld%%\n", __func__, var->pixclock, pixclock, sm501pixclock, ((pixclock - sm501pixclock)*100)/pixclock); return 0; } /* sm501fb_set_par_geometry * * set the geometry registers for specified framebuffer. */ static void sm501fb_set_par_geometry(struct fb_info *info, struct fb_var_screeninfo *var) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; void __iomem *base = fbi->regs; unsigned long reg; if (par->head == HEAD_CRT) base += SM501_DC_CRT_H_TOT; else base += SM501_DC_PANEL_H_TOT; /* set framebuffer width and display width */ reg = info->fix.line_length; reg |= ((var->xres * var->bits_per_pixel)/8) << 16; smc501_writel(reg, fbi->regs + (par->head == HEAD_CRT ? SM501_DC_CRT_FB_OFFSET : SM501_DC_PANEL_FB_OFFSET)); /* program horizontal total */ reg = (h_total(var) - 1) << 16; reg |= (var->xres - 1); smc501_writel(reg, base + SM501_OFF_DC_H_TOT); /* program horizontal sync */ reg = var->hsync_len << 16; reg |= var->xres + var->right_margin - 1; smc501_writel(reg, base + SM501_OFF_DC_H_SYNC); /* program vertical total */ reg = (v_total(var) - 1) << 16; reg |= (var->yres - 1); smc501_writel(reg, base + SM501_OFF_DC_V_TOT); /* program vertical sync */ reg = var->vsync_len << 16; reg |= var->yres + var->lower_margin - 1; smc501_writel(reg, base + SM501_OFF_DC_V_SYNC); } /* sm501fb_pan_crt * * pan the CRT display output within an virtual framebuffer */ static int sm501fb_pan_crt(struct fb_var_screeninfo *var, struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; unsigned int bytes_pixel = info->var.bits_per_pixel / 8; unsigned long reg; unsigned long xoffs; xoffs = var->xoffset * bytes_pixel; reg = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL); reg &= ~SM501_DC_CRT_CONTROL_PIXEL_MASK; reg |= ((xoffs & 15) / bytes_pixel) << 4; smc501_writel(reg, fbi->regs + SM501_DC_CRT_CONTROL); reg = (par->screen.sm_addr + xoffs + var->yoffset * info->fix.line_length); smc501_writel(reg | SM501_ADDR_FLIP, fbi->regs + SM501_DC_CRT_FB_ADDR); sm501fb_sync_regs(fbi); return 0; } /* sm501fb_pan_pnl * * pan the panel display output within an virtual framebuffer */ static int sm501fb_pan_pnl(struct fb_var_screeninfo *var, struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; unsigned long reg; reg = var->xoffset | (info->var.xres_virtual << 16); smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_WIDTH); reg = var->yoffset | (info->var.yres_virtual << 16); smc501_writel(reg, fbi->regs + SM501_DC_PANEL_FB_HEIGHT); sm501fb_sync_regs(fbi); return 0; } /* sm501fb_set_par_crt * * Set the CRT video mode from the fb_info structure */ static int sm501fb_set_par_crt(struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; struct fb_var_screeninfo *var = &info->var; unsigned long control; /* control register */ int ret; /* activate new configuration */ dev_dbg(fbi->dev, "%s(%p)\n", __func__, info); /* enable CRT DAC - note 0 is on!*/ sm501_misc_control(fbi->dev->parent, 0, SM501_MISC_DAC_POWER); control = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL); control &= (SM501_DC_CRT_CONTROL_PIXEL_MASK | SM501_DC_CRT_CONTROL_GAMMA | SM501_DC_CRT_CONTROL_BLANK | SM501_DC_CRT_CONTROL_SEL | SM501_DC_CRT_CONTROL_CP | SM501_DC_CRT_CONTROL_TVP); /* set the sync polarities before we check data source */ if ((var->sync & FB_SYNC_HOR_HIGH_ACT) == 0) control |= SM501_DC_CRT_CONTROL_HSP; if ((var->sync & FB_SYNC_VERT_HIGH_ACT) == 0) control |= SM501_DC_CRT_CONTROL_VSP; if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) { /* the head is displaying panel data... */ sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0, info->fix.smem_len); goto out_update; } ret = sm501fb_set_par_common(info, var); if (ret) { dev_err(fbi->dev, "failed to set common parameters\n"); return ret; } sm501fb_pan_crt(var, info); sm501fb_set_par_geometry(info, var); control |= SM501_FIFO_3; /* fill if >3 free slots */ switch(var->bits_per_pixel) { case 8: control |= SM501_DC_CRT_CONTROL_8BPP; break; case 16: control |= SM501_DC_CRT_CONTROL_16BPP; sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE); break; case 32: control |= SM501_DC_CRT_CONTROL_32BPP; sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE); break; default: BUG(); } control |= SM501_DC_CRT_CONTROL_SEL; /* CRT displays CRT data */ control |= SM501_DC_CRT_CONTROL_TE; /* enable CRT timing */ control |= SM501_DC_CRT_CONTROL_ENABLE; /* enable CRT plane */ out_update: dev_dbg(fbi->dev, "new control is %08lx\n", control); smc501_writel(control, fbi->regs + SM501_DC_CRT_CONTROL); sm501fb_sync_regs(fbi); return 0; } static void sm501fb_panel_power(struct sm501fb_info *fbi, int to) { unsigned long control; void __iomem *ctrl_reg = fbi->regs + SM501_DC_PANEL_CONTROL; struct sm501_platdata_fbsub *pd = fbi->pdata->fb_pnl; control = smc501_readl(ctrl_reg); if (to && (control & SM501_DC_PANEL_CONTROL_VDD) == 0) { /* enable panel power */ control |= SM501_DC_PANEL_CONTROL_VDD; /* FPVDDEN */ smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); control |= SM501_DC_PANEL_CONTROL_DATA; /* DATA */ smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); /* VBIASEN */ if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN) control &= ~SM501_DC_PANEL_CONTROL_BIAS; else control |= SM501_DC_PANEL_CONTROL_BIAS; smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); } if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN) control &= ~SM501_DC_PANEL_CONTROL_FPEN; else control |= SM501_DC_PANEL_CONTROL_FPEN; smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); } } else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) { /* disable panel power */ if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN) control |= SM501_DC_PANEL_CONTROL_FPEN; else control &= ~SM501_DC_PANEL_CONTROL_FPEN; smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); } if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN) control |= SM501_DC_PANEL_CONTROL_BIAS; else control &= ~SM501_DC_PANEL_CONTROL_BIAS; smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); } control &= ~SM501_DC_PANEL_CONTROL_DATA; smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); control &= ~SM501_DC_PANEL_CONTROL_VDD; smc501_writel(control, ctrl_reg); sm501fb_sync_regs(fbi); mdelay(10); } sm501fb_sync_regs(fbi); } /* sm501fb_set_par_pnl * * Set the panel video mode from the fb_info structure */ static int sm501fb_set_par_pnl(struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; struct fb_var_screeninfo *var = &info->var; unsigned long control; unsigned long reg; int ret; dev_dbg(fbi->dev, "%s(%p)\n", __func__, info); /* activate this new configuration */ ret = sm501fb_set_par_common(info, var); if (ret) return ret; sm501fb_pan_pnl(var, info); sm501fb_set_par_geometry(info, var); /* update control register */ control = smc501_readl(fbi->regs + SM501_DC_PANEL_CONTROL); control &= (SM501_DC_PANEL_CONTROL_GAMMA | SM501_DC_PANEL_CONTROL_VDD | SM501_DC_PANEL_CONTROL_DATA | SM501_DC_PANEL_CONTROL_BIAS | SM501_DC_PANEL_CONTROL_FPEN | SM501_DC_PANEL_CONTROL_CP | SM501_DC_PANEL_CONTROL_CK | SM501_DC_PANEL_CONTROL_HP | SM501_DC_PANEL_CONTROL_VP | SM501_DC_PANEL_CONTROL_HPD | SM501_DC_PANEL_CONTROL_VPD); control |= SM501_FIFO_3; /* fill if >3 free slots */ switch(var->bits_per_pixel) { case 8: control |= SM501_DC_PANEL_CONTROL_8BPP; break; case 16: control |= SM501_DC_PANEL_CONTROL_16BPP; sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE); break; case 32: control |= SM501_DC_PANEL_CONTROL_32BPP; sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE); break; default: BUG(); } smc501_writel(0x0, fbi->regs + SM501_DC_PANEL_PANNING_CONTROL); /* panel plane top left and bottom right location */ smc501_writel(0x00, fbi->regs + SM501_DC_PANEL_TL_LOC); reg = var->xres - 1; reg |= (var->yres - 1) << 16; smc501_writel(reg, fbi->regs + SM501_DC_PANEL_BR_LOC); /* program panel control register */ control |= SM501_DC_PANEL_CONTROL_TE; /* enable PANEL timing */ control |= SM501_DC_PANEL_CONTROL_EN; /* enable PANEL gfx plane */ if ((var->sync & FB_SYNC_HOR_HIGH_ACT) == 0) control |= SM501_DC_PANEL_CONTROL_HSP; if ((var->sync & FB_SYNC_VERT_HIGH_ACT) == 0) control |= SM501_DC_PANEL_CONTROL_VSP; smc501_writel(control, fbi->regs + SM501_DC_PANEL_CONTROL); sm501fb_sync_regs(fbi); /* ensure the panel interface is not tristated at this point */ sm501_modify_reg(fbi->dev->parent, SM501_SYSTEM_CONTROL, 0, SM501_SYSCTRL_PANEL_TRISTATE); /* power the panel up */ sm501fb_panel_power(fbi, 1); return 0; } /* chan_to_field * * convert a colour value into a field position * * from pxafb.c */ static inline unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } /* sm501fb_setcolreg * * set the colour mapping for modes that support palettised data */ static int sm501fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; void __iomem *base = fbi->regs; unsigned int val; if (par->head == HEAD_CRT) base += SM501_DC_CRT_PALETTE; else base += SM501_DC_PANEL_PALETTE; switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: /* true-colour, use pseuo-palette */ if (regno < 16) { u32 *pal = par->pseudo_palette; val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue, &info->var.blue); pal[regno] = val; } break; case FB_VISUAL_PSEUDOCOLOR: if (regno < 256) { val = (red >> 8) << 16; val |= (green >> 8) << 8; val |= blue >> 8; smc501_writel(val, base + (regno * 4)); } break; default: return 1; /* unknown type */ } return 0; } /* sm501fb_blank_pnl * * Blank or un-blank the panel interface */ static int sm501fb_blank_pnl(int blank_mode, struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; dev_dbg(fbi->dev, "%s(mode=%d, %p)\n", __func__, blank_mode, info); switch (blank_mode) { case FB_BLANK_POWERDOWN: sm501fb_panel_power(fbi, 0); break; case FB_BLANK_UNBLANK: sm501fb_panel_power(fbi, 1); break; case FB_BLANK_NORMAL: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: default: return 1; } return 0; } /* sm501fb_blank_crt * * Blank or un-blank the crt interface */ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; unsigned long ctrl; dev_dbg(fbi->dev, "%s(mode=%d, %p)\n", __func__, blank_mode, info); ctrl = smc501_readl(fbi->regs + SM501_DC_CRT_CONTROL); switch (blank_mode) { case FB_BLANK_POWERDOWN: ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE; sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0); case FB_BLANK_NORMAL: ctrl |= SM501_DC_CRT_CONTROL_BLANK; break; case FB_BLANK_UNBLANK: ctrl &= ~SM501_DC_CRT_CONTROL_BLANK; ctrl |= SM501_DC_CRT_CONTROL_ENABLE; sm501_misc_control(fbi->dev->parent, 0, SM501_MISC_DAC_POWER); break; case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: default: return 1; } smc501_writel(ctrl, fbi->regs + SM501_DC_CRT_CONTROL); sm501fb_sync_regs(fbi); return 0; } /* sm501fb_cursor * * set or change the hardware cursor parameters */ static int sm501fb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; void __iomem *base = fbi->regs; unsigned long hwc_addr; unsigned long fg, bg; dev_dbg(fbi->dev, "%s(%p,%p)\n", __func__, info, cursor); if (par->head == HEAD_CRT) base += SM501_DC_CRT_HWC_BASE; else base += SM501_DC_PANEL_HWC_BASE; /* check not being asked to exceed capabilities */ if (cursor->image.width > 64) return -EINVAL; if (cursor->image.height > 64) return -EINVAL; if (cursor->image.depth > 1) return -EINVAL; hwc_addr = smc501_readl(base + SM501_OFF_HWC_ADDR); if (cursor->enable) smc501_writel(hwc_addr | SM501_HWC_EN, base + SM501_OFF_HWC_ADDR); else smc501_writel(hwc_addr & ~SM501_HWC_EN, base + SM501_OFF_HWC_ADDR); /* set data */ if (cursor->set & FB_CUR_SETPOS) { unsigned int x = cursor->image.dx; unsigned int y = cursor->image.dy; if (x >= 2048 || y >= 2048 ) return -EINVAL; dev_dbg(fbi->dev, "set position %d,%d\n", x, y); //y += cursor->image.height; smc501_writel(x | (y << 16), base + SM501_OFF_HWC_LOC); } if (cursor->set & FB_CUR_SETCMAP) { unsigned int bg_col = cursor->image.bg_color; unsigned int fg_col = cursor->image.fg_color; dev_dbg(fbi->dev, "%s: update cmap (%08x,%08x)\n", __func__, bg_col, fg_col); bg = ((info->cmap.red[bg_col] & 0xF8) << 8) | ((info->cmap.green[bg_col] & 0xFC) << 3) | ((info->cmap.blue[bg_col] & 0xF8) >> 3); fg = ((info->cmap.red[fg_col] & 0xF8) << 8) | ((info->cmap.green[fg_col] & 0xFC) << 3) | ((info->cmap.blue[fg_col] & 0xF8) >> 3); dev_dbg(fbi->dev, "fgcol %08lx, bgcol %08lx\n", fg, bg); smc501_writel(bg, base + SM501_OFF_HWC_COLOR_1_2); smc501_writel(fg, base + SM501_OFF_HWC_COLOR_3); } if (cursor->set & FB_CUR_SETSIZE || cursor->set & (FB_CUR_SETIMAGE | FB_CUR_SETSHAPE)) { /* SM501 cursor is a two bpp 64x64 bitmap this routine * clears it to transparent then combines the cursor * shape plane with the colour plane to set the * cursor */ int x, y; const unsigned char *pcol = cursor->image.data; const unsigned char *pmsk = cursor->mask; void __iomem *dst = par->cursor.k_addr; unsigned char dcol = 0; unsigned char dmsk = 0; unsigned int op; dev_dbg(fbi->dev, "%s: setting shape (%d,%d)\n", __func__, cursor->image.width, cursor->image.height); for (op = 0; op < (64*64*2)/8; op+=4) smc501_writel(0x0, dst + op); for (y = 0; y < cursor->image.height; y++) { for (x = 0; x < cursor->image.width; x++) { if ((x % 8) == 0) { dcol = *pcol++; dmsk = *pmsk++; } else { dcol >>= 1; dmsk >>= 1; } if (dmsk & 1) { op = (dcol & 1) ? 1 : 3; op <<= ((x % 4) * 2); op |= readb(dst + (x / 4)); writeb(op, dst + (x / 4)); } } dst += (64*2)/8; } } sm501fb_sync_regs(fbi); /* ensure cursor data flushed */ return 0; } /* sm501fb_crtsrc_show * * device attribute code to show where the crt output is sourced from */ static ssize_t sm501fb_crtsrc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct sm501fb_info *info = dev_get_drvdata(dev); unsigned long ctrl; ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL); ctrl &= SM501_DC_CRT_CONTROL_SEL; return snprintf(buf, PAGE_SIZE, "%s\n", ctrl ? "crt" : "panel"); } /* sm501fb_crtsrc_show * * device attribute code to set where the crt output is sourced from */ static ssize_t sm501fb_crtsrc_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct sm501fb_info *info = dev_get_drvdata(dev); enum sm501_controller head; unsigned long ctrl; if (len < 1) return -EINVAL; if (strnicmp(buf, "crt", 3) == 0) head = HEAD_CRT; else if (strnicmp(buf, "panel", 5) == 0) head = HEAD_PANEL; else return -EINVAL; dev_info(dev, "setting crt source to head %d\n", head); ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL); if (head == HEAD_CRT) { ctrl |= SM501_DC_CRT_CONTROL_SEL; ctrl |= SM501_DC_CRT_CONTROL_ENABLE; ctrl |= SM501_DC_CRT_CONTROL_TE; } else { ctrl &= ~SM501_DC_CRT_CONTROL_SEL; ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE; ctrl &= ~SM501_DC_CRT_CONTROL_TE; } smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); sm501fb_sync_regs(info); return len; } /* Prepare the device_attr for registration with sysfs later */ static DEVICE_ATTR(crt_src, 0666, sm501fb_crtsrc_show, sm501fb_crtsrc_store); /* sm501fb_show_regs * * show the primary sm501 registers */ static int sm501fb_show_regs(struct sm501fb_info *info, char *ptr, unsigned int start, unsigned int len) { void __iomem *mem = info->regs; char *buf = ptr; unsigned int reg; for (reg = start; reg < (len + start); reg += 4) ptr += sprintf(ptr, "%08x = %08x\n", reg, smc501_readl(mem + reg)); return ptr - buf; } /* sm501fb_debug_show_crt * * show the crt control and cursor registers */ static ssize_t sm501fb_debug_show_crt(struct device *dev, struct device_attribute *attr, char *buf) { struct sm501fb_info *info = dev_get_drvdata(dev); char *ptr = buf; ptr += sm501fb_show_regs(info, ptr, SM501_DC_CRT_CONTROL, 0x40); ptr += sm501fb_show_regs(info, ptr, SM501_DC_CRT_HWC_BASE, 0x10); return ptr - buf; } static DEVICE_ATTR(fbregs_crt, 0444, sm501fb_debug_show_crt, NULL); /* sm501fb_debug_show_pnl * * show the panel control and cursor registers */ static ssize_t sm501fb_debug_show_pnl(struct device *dev, struct device_attribute *attr, char *buf) { struct sm501fb_info *info = dev_get_drvdata(dev); char *ptr = buf; ptr += sm501fb_show_regs(info, ptr, 0x0, 0x40); ptr += sm501fb_show_regs(info, ptr, SM501_DC_PANEL_HWC_BASE, 0x10); return ptr - buf; } static DEVICE_ATTR(fbregs_pnl, 0444, sm501fb_debug_show_pnl, NULL); /* acceleration operations */ static int sm501fb_sync(struct fb_info *info) { int count = 1000000; struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; /* wait for the 2d engine to be ready */ while ((count > 0) && (smc501_readl(fbi->regs + SM501_SYSTEM_CONTROL) & SM501_SYSCTRL_2D_ENGINE_STATUS) != 0) count--; if (count <= 0) { dev_err(info->dev, "Timeout waiting for 2d engine sync\n"); return 1; } return 0; } static void sm501fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; int width = area->width; int height = area->height; int sx = area->sx; int sy = area->sy; int dx = area->dx; int dy = area->dy; unsigned long rtl = 0; /* source clip */ if ((sx >= info->var.xres_virtual) || (sy >= info->var.yres_virtual)) /* source Area not within virtual screen, skipping */ return; if ((sx + width) >= info->var.xres_virtual) width = info->var.xres_virtual - sx - 1; if ((sy + height) >= info->var.yres_virtual) height = info->var.yres_virtual - sy - 1; /* dest clip */ if ((dx >= info->var.xres_virtual) || (dy >= info->var.yres_virtual)) /* Destination Area not within virtual screen, skipping */ return; if ((dx + width) >= info->var.xres_virtual) width = info->var.xres_virtual - dx - 1; if ((dy + height) >= info->var.yres_virtual) height = info->var.yres_virtual - dy - 1; if ((sx < dx) || (sy < dy)) { rtl = 1 << 27; sx += width - 1; dx += width - 1; sy += height - 1; dy += height - 1; } if (sm501fb_sync(info)) return; /* set the base addresses */ smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE); smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE); /* set the window width */ smc501_writel((info->var.xres << 16) | info->var.xres, fbi->regs2d + SM501_2D_WINDOW_WIDTH); /* set window stride */ smc501_writel((info->var.xres_virtual << 16) | info->var.xres_virtual, fbi->regs2d + SM501_2D_PITCH); /* set data format */ switch (info->var.bits_per_pixel) { case 8: smc501_writel(0, fbi->regs2d + SM501_2D_STRETCH); break; case 16: smc501_writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH); break; case 32: smc501_writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH); break; } /* 2d compare mask */ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK); /* 2d mask */ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_MASK); /* source and destination x y */ smc501_writel((sx << 16) | sy, fbi->regs2d + SM501_2D_SOURCE); smc501_writel((dx << 16) | dy, fbi->regs2d + SM501_2D_DESTINATION); /* w/h */ smc501_writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION); /* do area move */ smc501_writel(0x800000cc | rtl, fbi->regs2d + SM501_2D_CONTROL); } static void sm501fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct sm501fb_par *par = info->par; struct sm501fb_info *fbi = par->info; int width = rect->width, height = rect->height; if ((rect->dx >= info->var.xres_virtual) || (rect->dy >= info->var.yres_virtual)) /* Rectangle not within virtual screen, skipping */ return; if ((rect->dx + width) >= info->var.xres_virtual) width = info->var.xres_virtual - rect->dx - 1; if ((rect->dy + height) >= info->var.yres_virtual) height = info->var.yres_virtual - rect->dy - 1; if (sm501fb_sync(info)) return; /* set the base addresses */ smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE); smc501_writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE); /* set the window width */ smc501_writel((info->var.xres << 16) | info->var.xres, fbi->regs2d + SM501_2D_WINDOW_WIDTH); /* set window stride */ smc501_writel((info->var.xres_virtual << 16) | info->var.xres_virtual, fbi->regs2d + SM501_2D_PITCH); /* set data format */ switch (info->var.bits_per_pixel) { case 8: smc501_writel(0, fbi->regs2d + SM501_2D_STRETCH); break; case 16: smc501_writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH); break; case 32: smc501_writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH); break; } /* 2d compare mask */ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK); /* 2d mask */ smc501_writel(0xffffffff, fbi->regs2d + SM501_2D_MASK); /* colour */ smc501_writel(rect->color, fbi->regs2d + SM501_2D_FOREGROUND); /* x y */ smc501_writel((rect->dx << 16) | rect->dy, fbi->regs2d + SM501_2D_DESTINATION); /* w/h */ smc501_writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION); /* do rectangle fill */ smc501_writel(0x800100cc, fbi->regs2d + SM501_2D_CONTROL); } static struct fb_ops sm501fb_ops_crt = { .owner = THIS_MODULE, .fb_check_var = sm501fb_check_var_crt, .fb_set_par = sm501fb_set_par_crt, .fb_blank = sm501fb_blank_crt, .fb_setcolreg = sm501fb_setcolreg, .fb_pan_display = sm501fb_pan_crt, .fb_cursor = sm501fb_cursor, .fb_fillrect = sm501fb_fillrect, .fb_copyarea = sm501fb_copyarea, .fb_imageblit = cfb_imageblit, .fb_sync = sm501fb_sync, }; static struct fb_ops sm501fb_ops_pnl = { .owner = THIS_MODULE, .fb_check_var = sm501fb_check_var_pnl, .fb_set_par = sm501fb_set_par_pnl, .fb_pan_display = sm501fb_pan_pnl, .fb_blank = sm501fb_blank_pnl, .fb_setcolreg = sm501fb_setcolreg, .fb_cursor = sm501fb_cursor, .fb_fillrect = sm501fb_fillrect, .fb_copyarea = sm501fb_copyarea, .fb_imageblit = cfb_imageblit, .fb_sync = sm501fb_sync, }; /* sm501_init_cursor * * initialise hw cursor parameters */ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base) { struct sm501fb_par *par; struct sm501fb_info *info; int ret; if (fbi == NULL) return 0; par = fbi->par; info = par->info; par->cursor_regs = info->regs + reg_base; ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024, fbi->fix.smem_len); if (ret < 0) return ret; /* initialise the colour registers */ smc501_writel(par->cursor.sm_addr, par->cursor_regs + SM501_OFF_HWC_ADDR); smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_LOC); smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_1_2); smc501_writel(0x00, par->cursor_regs + SM501_OFF_HWC_COLOR_3); sm501fb_sync_regs(info); return 0; } /* sm501fb_info_start * * fills the par structure claiming resources and remapping etc. */ static int sm501fb_start(struct sm501fb_info *info, struct platform_device *pdev) { struct resource *res; struct device *dev = &pdev->dev; int k; int ret; info->irq = ret = platform_get_irq(pdev, 0); if (ret < 0) { /* we currently do not use the IRQ */ dev_warn(dev, "no irq for device\n"); } /* allocate, reserve and remap resources for display * controller registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "no resource definition for registers\n"); ret = -ENOENT; goto err_release; } info->regs_res = request_mem_region(res->start, resource_size(res), pdev->name); if (info->regs_res == NULL) { dev_err(dev, "cannot claim registers\n"); ret = -ENXIO; goto err_release; } info->regs = ioremap(res->start, resource_size(res)); if (info->regs == NULL) { dev_err(dev, "cannot remap registers\n"); ret = -ENXIO; goto err_regs_res; } /* allocate, reserve and remap resources for 2d * controller registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res == NULL) { dev_err(dev, "no resource definition for 2d registers\n"); ret = -ENOENT; goto err_regs_map; } info->regs2d_res = request_mem_region(res->start, resource_size(res), pdev->name); if (info->regs2d_res == NULL) { dev_err(dev, "cannot claim registers\n"); ret = -ENXIO; goto err_regs_map; } info->regs2d = ioremap(res->start, resource_size(res)); if (info->regs2d == NULL) { dev_err(dev, "cannot remap registers\n"); ret = -ENXIO; goto err_regs2d_res; } /* allocate, reserve resources for framebuffer */ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (res == NULL) { dev_err(dev, "no memory resource defined\n"); ret = -ENXIO; goto err_regs2d_map; } info->fbmem_res = request_mem_region(res->start, resource_size(res), pdev->name); if (info->fbmem_res == NULL) { dev_err(dev, "cannot claim framebuffer\n"); ret = -ENXIO; goto err_regs2d_map; } info->fbmem = ioremap(res->start, resource_size(res)); if (info->fbmem == NULL) { dev_err(dev, "cannot remap framebuffer\n"); goto err_mem_res; } info->fbmem_len = resource_size(res); /* clear framebuffer memory - avoids garbage data on unused fb */ memset(info->fbmem, 0, info->fbmem_len); /* clear palette ram - undefined at power on */ for (k = 0; k < (256 * 3); k++) smc501_writel(0, info->regs + SM501_DC_PANEL_PALETTE + (k * 4)); /* enable display controller */ sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1); /* enable 2d controller */ sm501_unit_power(dev->parent, SM501_GATE_2D_ENGINE, 1); /* setup cursors */ sm501_init_cursor(info->fb[HEAD_CRT], SM501_DC_CRT_HWC_ADDR); sm501_init_cursor(info->fb[HEAD_PANEL], SM501_DC_PANEL_HWC_ADDR); return 0; /* everything is setup */ err_mem_res: release_mem_region(info->fbmem_res->start, resource_size(info->fbmem_res)); err_regs2d_map: iounmap(info->regs2d); err_regs2d_res: release_mem_region(info->regs2d_res->start, resource_size(info->regs2d_res)); err_regs_map: iounmap(info->regs); err_regs_res: release_mem_region(info->regs_res->start, resource_size(info->regs_res)); err_release: return ret; } static void sm501fb_stop(struct sm501fb_info *info) { /* disable display controller */ sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0); iounmap(info->fbmem); release_mem_region(info->fbmem_res->start, resource_size(info->fbmem_res)); iounmap(info->regs2d); release_mem_region(info->regs2d_res->start, resource_size(info->regs2d_res)); iounmap(info->regs); release_mem_region(info->regs_res->start, resource_size(info->regs_res)); } static int __devinit sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head, const char *fbname) { struct sm501_platdata_fbsub *pd; struct sm501fb_par *par = fb->par; struct sm501fb_info *info = par->info; unsigned long ctrl; unsigned int enable; int ret; switch (head) { case HEAD_CRT: pd = info->pdata->fb_crt; ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL); enable = (ctrl & SM501_DC_CRT_CONTROL_ENABLE) ? 1 : 0; /* ensure we set the correct source register */ if (info->pdata->fb_route != SM501_FB_CRT_PANEL) { ctrl |= SM501_DC_CRT_CONTROL_SEL; smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); } break; case HEAD_PANEL: pd = info->pdata->fb_pnl; ctrl = smc501_readl(info->regs + SM501_DC_PANEL_CONTROL); enable = (ctrl & SM501_DC_PANEL_CONTROL_EN) ? 1 : 0; break; default: pd = NULL; /* stop compiler warnings */ ctrl = 0; enable = 0; BUG(); } dev_info(info->dev, "fb %s %sabled at start\n", fbname, enable ? "en" : "dis"); /* check to see if our routing allows this */ if (head == HEAD_CRT && info->pdata->fb_route == SM501_FB_CRT_PANEL) { ctrl &= ~SM501_DC_CRT_CONTROL_SEL; smc501_writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); enable = 0; } strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id)); memcpy(&par->ops, (head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl, sizeof(struct fb_ops)); /* update ops dependent on what we've been passed */ if ((pd->flags & SM501FB_FLAG_USE_HWCURSOR) == 0) par->ops.fb_cursor = NULL; fb->fbops = &par->ops; fb->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; #if defined(CONFIG_OF) #ifdef __BIG_ENDIAN if (of_get_property(info->dev->parent->of_node, "little-endian", NULL)) fb->flags |= FBINFO_FOREIGN_ENDIAN; #else if (of_get_property(info->dev->parent->of_node, "big-endian", NULL)) fb->flags |= FBINFO_FOREIGN_ENDIAN; #endif #endif /* fixed data */ fb->fix.type = FB_TYPE_PACKED_PIXELS; fb->fix.type_aux = 0; fb->fix.xpanstep = 1; fb->fix.ypanstep = 1; fb->fix.ywrapstep = 0; fb->fix.accel = FB_ACCEL_NONE; /* screenmode */ fb->var.nonstd = 0; fb->var.activate = FB_ACTIVATE_NOW; fb->var.accel_flags = 0; fb->var.vmode = FB_VMODE_NONINTERLACED; fb->var.bits_per_pixel = 16; if (info->edid_data) { /* Now build modedb from EDID */ fb_edid_to_monspecs(info->edid_data, &fb->monspecs); fb_videomode_to_modelist(fb->monspecs.modedb, fb->monspecs.modedb_len, &fb->modelist); } if (enable && (pd->flags & SM501FB_FLAG_USE_INIT_MODE) && 0) { /* TODO read the mode from the current display */ } else { if (pd->def_mode) { dev_info(info->dev, "using supplied mode\n"); fb_videomode_to_var(&fb->var, pd->def_mode); fb->var.bits_per_pixel = pd->def_bpp ? pd->def_bpp : 8; fb->var.xres_virtual = fb->var.xres; fb->var.yres_virtual = fb->var.yres; } else { if (info->edid_data) { ret = fb_find_mode(&fb->var, fb, fb_mode, fb->monspecs.modedb, fb->monspecs.modedb_len, &sm501_default_mode, default_bpp); /* edid_data is no longer needed, free it */ kfree(info->edid_data); } else { ret = fb_find_mode(&fb->var, fb, NULL, NULL, 0, NULL, 8); } switch (ret) { case 1: dev_info(info->dev, "using mode specified in " "@mode\n"); break; case 2: dev_info(info->dev, "using mode specified in " "@mode with ignored refresh rate\n"); break; case 3: dev_info(info->dev, "using mode default " "mode\n"); break; case 4: dev_info(info->dev, "using mode from list\n"); break; default: dev_info(info->dev, "ret = %d\n", ret); dev_info(info->dev, "failed to find mode\n"); return -EINVAL; } } } /* initialise and set the palette */ if (fb_alloc_cmap(&fb->cmap, NR_PALETTE, 0)) { dev_err(info->dev, "failed to allocate cmap memory\n"); return -ENOMEM; } fb_set_cmap(&fb->cmap, fb); ret = (fb->fbops->fb_check_var)(&fb->var, fb); if (ret) dev_err(info->dev, "check_var() failed on initial setup?\n"); return 0; } /* default platform data if none is supplied (ie, PCI device) */ static struct sm501_platdata_fbsub sm501fb_pdata_crt = { .flags = (SM501FB_FLAG_USE_INIT_MODE | SM501FB_FLAG_USE_HWCURSOR | SM501FB_FLAG_USE_HWACCEL | SM501FB_FLAG_DISABLE_AT_EXIT), }; static struct sm501_platdata_fbsub sm501fb_pdata_pnl = { .flags = (SM501FB_FLAG_USE_INIT_MODE | SM501FB_FLAG_USE_HWCURSOR | SM501FB_FLAG_USE_HWACCEL | SM501FB_FLAG_DISABLE_AT_EXIT), }; static struct sm501_platdata_fb sm501fb_def_pdata = { .fb_route = SM501_FB_OWN, .fb_crt = &sm501fb_pdata_crt, .fb_pnl = &sm501fb_pdata_pnl, }; static char driver_name_crt[] = "sm501fb-crt"; static char driver_name_pnl[] = "sm501fb-panel"; static int __devinit sm501fb_probe_one(struct sm501fb_info *info, enum sm501_controller head) { unsigned char *name = (head == HEAD_CRT) ? "crt" : "panel"; struct sm501_platdata_fbsub *pd; struct sm501fb_par *par; struct fb_info *fbi; pd = (head == HEAD_CRT) ? info->pdata->fb_crt : info->pdata->fb_pnl; /* Do not initialise if we've not been given any platform data */ if (pd == NULL) { dev_info(info->dev, "no data for fb %s (disabled)\n", name); return 0; } fbi = framebuffer_alloc(sizeof(struct sm501fb_par), info->dev); if (fbi == NULL) { dev_err(info->dev, "cannot allocate %s framebuffer\n", name); return -ENOMEM; } par = fbi->par; par->info = info; par->head = head; fbi->pseudo_palette = &par->pseudo_palette; info->fb[head] = fbi; return 0; } /* Free up anything allocated by sm501fb_init_fb */ static void sm501_free_init_fb(struct sm501fb_info *info, enum sm501_controller head) { struct fb_info *fbi = info->fb[head]; fb_dealloc_cmap(&fbi->cmap); } static int __devinit sm501fb_start_one(struct sm501fb_info *info, enum sm501_controller head, const char *drvname) { struct fb_info *fbi = info->fb[head]; int ret; if (!fbi) return 0; mutex_init(&info->fb[head]->mm_lock); ret = sm501fb_init_fb(info->fb[head], head, drvname); if (ret) { dev_err(info->dev, "cannot initialise fb %s\n", drvname); return ret; } ret = register_framebuffer(info->fb[head]); if (ret) { dev_err(info->dev, "failed to register fb %s\n", drvname); sm501_free_init_fb(info, head); return ret; } dev_info(info->dev, "fb%d: %s frame buffer\n", fbi->node, fbi->fix.id); return 0; } static int __devinit sm501fb_probe(struct platform_device *pdev) { struct sm501fb_info *info; struct device *dev = &pdev->dev; int ret; /* allocate our framebuffers */ info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL); if (!info) { dev_err(dev, "failed to allocate state\n"); return -ENOMEM; } info->dev = dev = &pdev->dev; platform_set_drvdata(pdev, info); if (dev->parent->platform_data) { struct sm501_platdata *pd = dev->parent->platform_data; info->pdata = pd->fb; } if (info->pdata == NULL) { int found = 0; #if defined(CONFIG_OF) struct device_node *np = pdev->dev.parent->of_node; const u8 *prop; const char *cp; int len; info->pdata = &sm501fb_def_pdata; if (np) { /* Get EDID */ cp = of_get_property(np, "mode", &len); if (cp) strcpy(fb_mode, cp); prop = of_get_property(np, "edid", &len); if (prop && len == EDID_LENGTH) { info->edid_data = kmemdup(prop, EDID_LENGTH, GFP_KERNEL); if (info->edid_data) found = 1; } } #endif if (!found) { dev_info(dev, "using default configuration data\n"); info->pdata = &sm501fb_def_pdata; } } /* probe for the presence of each panel */ ret = sm501fb_probe_one(info, HEAD_CRT); if (ret < 0) { dev_err(dev, "failed to probe CRT\n"); goto err_alloc; } ret = sm501fb_probe_one(info, HEAD_PANEL); if (ret < 0) { dev_err(dev, "failed to probe PANEL\n"); goto err_probed_crt; } if (info->fb[HEAD_PANEL] == NULL && info->fb[HEAD_CRT] == NULL) { dev_err(dev, "no framebuffers found\n"); goto err_alloc; } /* get the resources for both of the framebuffers */ ret = sm501fb_start(info, pdev); if (ret) { dev_err(dev, "cannot initialise SM501\n"); goto err_probed_panel; } ret = sm501fb_start_one(info, HEAD_CRT, driver_name_crt); if (ret) { dev_err(dev, "failed to start CRT\n"); goto err_started; } ret = sm501fb_start_one(info, HEAD_PANEL, driver_name_pnl); if (ret) { dev_err(dev, "failed to start Panel\n"); goto err_started_crt; } /* create device files */ ret = device_create_file(dev, &dev_attr_crt_src); if (ret) goto err_started_panel; ret = device_create_file(dev, &dev_attr_fbregs_pnl); if (ret) goto err_attached_crtsrc_file; ret = device_create_file(dev, &dev_attr_fbregs_crt); if (ret) goto err_attached_pnlregs_file; /* we registered, return ok */ return 0; err_attached_pnlregs_file: device_remove_file(dev, &dev_attr_fbregs_pnl); err_attached_crtsrc_file: device_remove_file(dev, &dev_attr_crt_src); err_started_panel: unregister_framebuffer(info->fb[HEAD_PANEL]); sm501_free_init_fb(info, HEAD_PANEL); err_started_crt: unregister_framebuffer(info->fb[HEAD_CRT]); sm501_free_init_fb(info, HEAD_CRT); err_started: sm501fb_stop(info); err_probed_panel: framebuffer_release(info->fb[HEAD_PANEL]); err_probed_crt: framebuffer_release(info->fb[HEAD_CRT]); err_alloc: kfree(info); return ret; } /* * Cleanup */ static int sm501fb_remove(struct platform_device *pdev) { struct sm501fb_info *info = platform_get_drvdata(pdev); struct fb_info *fbinfo_crt = info->fb[0]; struct fb_info *fbinfo_pnl = info->fb[1]; device_remove_file(&pdev->dev, &dev_attr_fbregs_crt); device_remove_file(&pdev->dev, &dev_attr_fbregs_pnl); device_remove_file(&pdev->dev, &dev_attr_crt_src); sm501_free_init_fb(info, HEAD_CRT); sm501_free_init_fb(info, HEAD_PANEL); unregister_framebuffer(fbinfo_crt); unregister_framebuffer(fbinfo_pnl); sm501fb_stop(info); kfree(info); framebuffer_release(fbinfo_pnl); framebuffer_release(fbinfo_crt); return 0; } #ifdef CONFIG_PM static int sm501fb_suspend_fb(struct sm501fb_info *info, enum sm501_controller head) { struct fb_info *fbi = info->fb[head]; struct sm501fb_par *par = fbi->par; if (par->screen.size == 0) return 0; /* blank the relevant interface to ensure unit power minimised */ (par->ops.fb_blank)(FB_BLANK_POWERDOWN, fbi); /* tell console/fb driver we are suspending */ console_lock(); fb_set_suspend(fbi, 1); console_unlock(); /* backup copies in case chip is powered down over suspend */ par->store_fb = vmalloc(par->screen.size); if (par->store_fb == NULL) { dev_err(info->dev, "no memory to store screen\n"); return -ENOMEM; } par->store_cursor = vmalloc(par->cursor.size); if (par->store_cursor == NULL) { dev_err(info->dev, "no memory to store cursor\n"); goto err_nocursor; } dev_dbg(info->dev, "suspending screen to %p\n", par->store_fb); dev_dbg(info->dev, "suspending cursor to %p\n", par->store_cursor); memcpy_fromio(par->store_fb, par->screen.k_addr, par->screen.size); memcpy_fromio(par->store_cursor, par->cursor.k_addr, par->cursor.size); return 0; err_nocursor: vfree(par->store_fb); par->store_fb = NULL; return -ENOMEM; } static void sm501fb_resume_fb(struct sm501fb_info *info, enum sm501_controller head) { struct fb_info *fbi = info->fb[head]; struct sm501fb_par *par = fbi->par; if (par->screen.size == 0) return; /* re-activate the configuration */ (par->ops.fb_set_par)(fbi); /* restore the data */ dev_dbg(info->dev, "restoring screen from %p\n", par->store_fb); dev_dbg(info->dev, "restoring cursor from %p\n", par->store_cursor); if (par->store_fb) memcpy_toio(par->screen.k_addr, par->store_fb, par->screen.size); if (par->store_cursor) memcpy_toio(par->cursor.k_addr, par->store_cursor, par->cursor.size); console_lock(); fb_set_suspend(fbi, 0); console_unlock(); vfree(par->store_fb); vfree(par->store_cursor); } /* suspend and resume support */ static int sm501fb_suspend(struct platform_device *pdev, pm_message_t state) { struct sm501fb_info *info = platform_get_drvdata(pdev); /* store crt control to resume with */ info->pm_crt_ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL); sm501fb_suspend_fb(info, HEAD_CRT); sm501fb_suspend_fb(info, HEAD_PANEL); /* turn off the clocks, in case the device is not powered down */ sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0); return 0; } #define SM501_CRT_CTRL_SAVE (SM501_DC_CRT_CONTROL_TVP | \ SM501_DC_CRT_CONTROL_SEL) static int sm501fb_resume(struct platform_device *pdev) { struct sm501fb_info *info = platform_get_drvdata(pdev); unsigned long crt_ctrl; sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 1); /* restore the items we want to be saved for crt control */ crt_ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL); crt_ctrl &= ~SM501_CRT_CTRL_SAVE; crt_ctrl |= info->pm_crt_ctrl & SM501_CRT_CTRL_SAVE; smc501_writel(crt_ctrl, info->regs + SM501_DC_CRT_CONTROL); sm501fb_resume_fb(info, HEAD_CRT); sm501fb_resume_fb(info, HEAD_PANEL); return 0; } #else #define sm501fb_suspend NULL #define sm501fb_resume NULL #endif static struct platform_driver sm501fb_driver = { .probe = sm501fb_probe, .remove = sm501fb_remove, .suspend = sm501fb_suspend, .resume = sm501fb_resume, .driver = { .name = "sm501-fb", .owner = THIS_MODULE, }, }; static int __devinit sm501fb_init(void) { return platform_driver_register(&sm501fb_driver); } static void __exit sm501fb_cleanup(void) { platform_driver_unregister(&sm501fb_driver); } module_init(sm501fb_init); module_exit(sm501fb_cleanup); module_param_named(mode, fb_mode, charp, 0); MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" "); module_param_named(bpp, default_bpp, ulong, 0); MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified mode"); MODULE_AUTHOR("Ben Dooks, Vincent Sanders"); MODULE_DESCRIPTION("SM501 Framebuffer driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
jthornber/linux-2.6
arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
939
5542
/* * CS5536 General timer functions * * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Author: Yanhua, yanh@lemote.com * * Copyright (C) 2009 Lemote Inc. * Author: Wu zhangjin, wuzhangjin@gmail.com * * Reference: AMD Geode(TM) CS5536 Companion Device Data Book * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/io.h> #include <linux/init.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/clockchips.h> #include <asm/time.h> #include <cs5536/cs5536_mfgpt.h> static DEFINE_RAW_SPINLOCK(mfgpt_lock); static u32 mfgpt_base; /* * Initialize the MFGPT timer. * * This is also called after resume to bring the MFGPT into operation again. */ /* disable counter */ void disable_mfgpt0_counter(void) { outw(inw(MFGPT0_SETUP) & 0x7fff, MFGPT0_SETUP); } EXPORT_SYMBOL(disable_mfgpt0_counter); /* enable counter, comparator2 to event mode, 14.318MHz clock */ void enable_mfgpt0_counter(void) { outw(0xe310, MFGPT0_SETUP); } EXPORT_SYMBOL(enable_mfgpt0_counter); static void init_mfgpt_timer(enum clock_event_mode mode, struct clock_event_device *evt) { raw_spin_lock(&mfgpt_lock); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: outw(COMPARE, MFGPT0_CMP2); /* set comparator2 */ outw(0, MFGPT0_CNT); /* set counter to 0 */ enable_mfgpt0_counter(); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: if (evt->mode == CLOCK_EVT_MODE_PERIODIC || evt->mode == CLOCK_EVT_MODE_ONESHOT) disable_mfgpt0_counter(); break; case CLOCK_EVT_MODE_ONESHOT: /* The oneshot mode have very high deviation, Not use it! */ break; case CLOCK_EVT_MODE_RESUME: /* Nothing to do here */ break; } raw_spin_unlock(&mfgpt_lock); } static struct clock_event_device mfgpt_clockevent = { .name = "mfgpt", .features = CLOCK_EVT_FEAT_PERIODIC, .set_mode = init_mfgpt_timer, .irq = CS5536_MFGPT_INTR, }; static irqreturn_t timer_interrupt(int irq, void *dev_id) { u32 basehi; /* * get MFGPT base address * * NOTE: do not remove me, it's need for the value of mfgpt_base is * variable */ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base); /* ack */ outw(inw(MFGPT0_SETUP) | 0x4000, MFGPT0_SETUP); mfgpt_clockevent.event_handler(&mfgpt_clockevent); return IRQ_HANDLED; } static struct irqaction irq5 = { .handler = timer_interrupt, .flags = IRQF_NOBALANCING | IRQF_TIMER, .name = "timer" }; /* * Initialize the conversion factor and the min/max deltas of the clock event * structure and register the clock event source with the framework. */ void __init setup_mfgpt0_timer(void) { u32 basehi; struct clock_event_device *cd = &mfgpt_clockevent; unsigned int cpu = smp_processor_id(); cd->cpumask = cpumask_of(cpu); clockevent_set_clock(cd, MFGPT_TICK_RATE); cd->max_delta_ns = clockevent_delta2ns(0xffff, cd); cd->min_delta_ns = clockevent_delta2ns(0xf, cd); /* Enable MFGPT0 Comparator 2 Output to the Interrupt Mapper */ _wrmsr(DIVIL_MSR_REG(MFGPT_IRQ), 0, 0x100); /* Enable Interrupt Gate 5 */ _wrmsr(DIVIL_MSR_REG(PIC_ZSEL_LOW), 0, 0x50000); /* get MFGPT base address */ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base); clockevents_register_device(cd); setup_irq(CS5536_MFGPT_INTR, &irq5); } /* * Since the MFGPT overflows every tick, its not very useful * to just read by itself. So use jiffies to emulate a free * running counter: */ static cycle_t mfgpt_read(struct clocksource *cs) { unsigned long flags; int count; u32 jifs; static int old_count; static u32 old_jifs; raw_spin_lock_irqsave(&mfgpt_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine * by having side effects on state that we cannot undo if * there is a collision on the seqlock and our caller has to * retry. (Namely, old_jifs and old_count.) So we must treat * jiffies as volatile despite the lock. We read jiffies * before latching the timer count to guarantee that although * the jiffies value might be older than the count (that is, * the counter may underflow between the last point where * jiffies was incremented and the point where we latch the * count), it cannot be newer. */ jifs = jiffies; /* read the count */ count = inw(MFGPT0_CNT); /* * It's possible for count to appear to go the wrong way for this * reason: * * The timer counter underflows, but we haven't handled the resulting * interrupt and incremented jiffies yet. * * Previous attempts to handle these cases intelligently were buggy, so * we just do the simple thing now. */ if (count < old_count && jifs == old_jifs) count = old_count; old_count = count; old_jifs = jifs; raw_spin_unlock_irqrestore(&mfgpt_lock, flags); return (cycle_t) (jifs * COMPARE) + count; } static struct clocksource clocksource_mfgpt = { .name = "mfgpt", .rating = 120, /* Functional for real use, but not desired */ .read = mfgpt_read, .mask = CLOCKSOURCE_MASK(32), }; int __init init_mfgpt_clocksource(void) { if (num_possible_cpus() > 1) /* MFGPT does not scale! */ return 0; return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE); } arch_initcall(init_mfgpt_clocksource);
gpl-2.0
AndreiLux/ref-3.10
drivers/input/keyboard/adp5520-keys.c
2475
5135
/* * Keypad driver for Analog Devices ADP5520 MFD PMICs * * Copyright 2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/mfd/adp5520.h> #include <linux/slab.h> struct adp5520_keys { struct input_dev *input; struct notifier_block notifier; struct device *master; unsigned short keycode[ADP5520_KEYMAPSIZE]; }; static void adp5520_keys_report_event(struct adp5520_keys *dev, unsigned short keymask, int value) { int i; for (i = 0; i < ADP5520_MAXKEYS; i++) if (keymask & (1 << i)) input_report_key(dev->input, dev->keycode[i], value); input_sync(dev->input); } static int adp5520_keys_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct adp5520_keys *dev; uint8_t reg_val_lo, reg_val_hi; unsigned short keymask; dev = container_of(nb, struct adp5520_keys, notifier); if (event & ADP5520_KP_INT) { adp5520_read(dev->master, ADP5520_KP_INT_STAT_1, &reg_val_lo); adp5520_read(dev->master, ADP5520_KP_INT_STAT_2, &reg_val_hi); keymask = (reg_val_hi << 8) | reg_val_lo; /* Read twice to clear */ adp5520_read(dev->master, ADP5520_KP_INT_STAT_1, &reg_val_lo); adp5520_read(dev->master, ADP5520_KP_INT_STAT_2, &reg_val_hi); keymask |= (reg_val_hi << 8) | reg_val_lo; adp5520_keys_report_event(dev, keymask, 1); } if (event & ADP5520_KR_INT) { adp5520_read(dev->master, ADP5520_KR_INT_STAT_1, &reg_val_lo); adp5520_read(dev->master, ADP5520_KR_INT_STAT_2, &reg_val_hi); keymask = (reg_val_hi << 8) | reg_val_lo; /* Read twice to clear */ adp5520_read(dev->master, ADP5520_KR_INT_STAT_1, &reg_val_lo); adp5520_read(dev->master, ADP5520_KR_INT_STAT_2, &reg_val_hi); keymask |= (reg_val_hi << 8) | reg_val_lo; adp5520_keys_report_event(dev, keymask, 0); } return 0; } static int adp5520_keys_probe(struct platform_device *pdev) { struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data; struct input_dev *input; struct adp5520_keys *dev; int ret, i; unsigned char en_mask, ctl_mask = 0; if (pdev->id != ID_ADP5520) { dev_err(&pdev->dev, "only ADP5520 supports Keypad\n"); return -EINVAL; } if (pdata == NULL) { dev_err(&pdev->dev, "missing platform data\n"); return -EINVAL; } if (!(pdata->rows_en_mask && pdata->cols_en_mask)) return -EINVAL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { dev_err(&pdev->dev, "failed to alloc memory\n"); return -ENOMEM; } input = input_allocate_device(); if (!input) { ret = -ENOMEM; goto err; } dev->master = pdev->dev.parent; dev->input = input; input->name = pdev->name; input->phys = "adp5520-keys/input0"; input->dev.parent = &pdev->dev; input_set_drvdata(input, dev); input->id.bustype = BUS_I2C; input->id.vendor = 0x0001; input->id.product = 0x5520; input->id.version = 0x0001; input->keycodesize = sizeof(dev->keycode[0]); input->keycodemax = pdata->keymapsize; input->keycode = dev->keycode; memcpy(dev->keycode, pdata->keymap, pdata->keymapsize * input->keycodesize); /* setup input device */ __set_bit(EV_KEY, input->evbit); if (pdata->repeat) __set_bit(EV_REP, input->evbit); for (i = 0; i < input->keycodemax; i++) __set_bit(dev->keycode[i], input->keybit); __clear_bit(KEY_RESERVED, input->keybit); ret = input_register_device(input); if (ret) { dev_err(&pdev->dev, "unable to register input device\n"); goto err; } en_mask = pdata->rows_en_mask | pdata->cols_en_mask; ret = adp5520_set_bits(dev->master, ADP5520_GPIO_CFG_1, en_mask); if (en_mask & ADP5520_COL_C3) ctl_mask |= ADP5520_C3_MODE; if (en_mask & ADP5520_ROW_R3) ctl_mask |= ADP5520_R3_MODE; if (ctl_mask) ret |= adp5520_set_bits(dev->master, ADP5520_LED_CONTROL, ctl_mask); ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_PULLUP, pdata->rows_en_mask); if (ret) { dev_err(&pdev->dev, "failed to write\n"); ret = -EIO; goto err1; } dev->notifier.notifier_call = adp5520_keys_notifier; ret = adp5520_register_notifier(dev->master, &dev->notifier, ADP5520_KP_IEN | ADP5520_KR_IEN); if (ret) { dev_err(&pdev->dev, "failed to register notifier\n"); goto err1; } platform_set_drvdata(pdev, dev); return 0; err1: input_unregister_device(input); input = NULL; err: input_free_device(input); kfree(dev); return ret; } static int adp5520_keys_remove(struct platform_device *pdev) { struct adp5520_keys *dev = platform_get_drvdata(pdev); adp5520_unregister_notifier(dev->master, &dev->notifier, ADP5520_KP_IEN | ADP5520_KR_IEN); input_unregister_device(dev->input); kfree(dev); return 0; } static struct platform_driver adp5520_keys_driver = { .driver = { .name = "adp5520-keys", .owner = THIS_MODULE, }, .probe = adp5520_keys_probe, .remove = adp5520_keys_remove, }; module_platform_driver(adp5520_keys_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Keys ADP5520 Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:adp5520-keys");
gpl-2.0
Snuzzo/funky_msm8960
arch/sparc/kernel/sysfs.c
2731
8927
/* sysfs.c: Toplogy sysfs support code for sparc64. * * Copyright (C) 2007 David S. Miller <davem@davemloft.net> */ #include <linux/sched.h> #include <linux/sysdev.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/percpu.h> #include <linux/init.h> #include <asm/cpudata.h> #include <asm/hypervisor.h> #include <asm/spitfire.h> static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); #define SHOW_MMUSTAT_ULONG(NAME) \ static ssize_t show_##NAME(struct sys_device *dev, \ struct sysdev_attribute *attr, char *buf) \ { \ struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ return sprintf(buf, "%lu\n", p->NAME); \ } \ static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL) SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte); SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte); SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte); SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte); static struct attribute *mmu_stat_attrs[] = { &attr_immu_tsb_hits_ctx0_8k_tte.attr, &attr_immu_tsb_ticks_ctx0_8k_tte.attr, &attr_immu_tsb_hits_ctx0_64k_tte.attr, &attr_immu_tsb_ticks_ctx0_64k_tte.attr, &attr_immu_tsb_hits_ctx0_4mb_tte.attr, &attr_immu_tsb_ticks_ctx0_4mb_tte.attr, &attr_immu_tsb_hits_ctx0_256mb_tte.attr, &attr_immu_tsb_ticks_ctx0_256mb_tte.attr, &attr_immu_tsb_hits_ctxnon0_8k_tte.attr, &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, &attr_immu_tsb_hits_ctxnon0_64k_tte.attr, &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, &attr_dmmu_tsb_hits_ctx0_8k_tte.attr, &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, &attr_dmmu_tsb_hits_ctx0_64k_tte.attr, &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, NULL, }; static struct attribute_group mmu_stat_group = { .attrs = mmu_stat_attrs, .name = "mmu_stats", }; /* XXX convert to rusty's on_one_cpu */ static unsigned long run_on_cpu(unsigned long cpu, unsigned long (*func)(unsigned long), unsigned long arg) { cpumask_t old_affinity; unsigned long ret; cpumask_copy(&old_affinity, tsk_cpus_allowed(current)); /* should return -EINVAL to userspace */ if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) return 0; ret = func(arg); set_cpus_allowed_ptr(current, &old_affinity); return ret; } static unsigned long read_mmustat_enable(unsigned long junk) { unsigned long ra = 0; sun4v_mmustat_info(&ra); return ra != 0; } static unsigned long write_mmustat_enable(unsigned long val) { unsigned long ra, orig_ra; if (val) ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); else ra = 0UL; return sun4v_mmustat_conf(ra, &orig_ra); } static ssize_t show_mmustat_enable(struct sys_device *s, struct sysdev_attribute *attr, char *buf) { unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0); return sprintf(buf, "%lx\n", val); } static ssize_t store_mmustat_enable(struct sys_device *s, struct sysdev_attribute *attr, const char *buf, size_t count) { unsigned long val, err; int ret = sscanf(buf, "%ld", &val); if (ret != 1) return -EINVAL; err = run_on_cpu(s->id, write_mmustat_enable, val); if (err) return -EIO; return count; } static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); static int mmu_stats_supported; static int register_mmu_stats(struct sys_device *s) { if (!mmu_stats_supported) return 0; sysdev_create_file(s, &attr_mmustat_enable); return sysfs_create_group(&s->kobj, &mmu_stat_group); } #ifdef CONFIG_HOTPLUG_CPU static void unregister_mmu_stats(struct sys_device *s) { if (!mmu_stats_supported) return; sysfs_remove_group(&s->kobj, &mmu_stat_group); sysdev_remove_file(s, &attr_mmustat_enable); } #endif #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ static ssize_t show_##NAME(struct sys_device *dev, \ struct sysdev_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%lu\n", c->MEMBER); \ } #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ static ssize_t show_##NAME(struct sys_device *dev, \ struct sysdev_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%u\n", c->MEMBER); \ } SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick); SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size); SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size); SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size); SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size); SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size); SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); static struct sysdev_attribute cpu_core_attrs[] = { _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL), _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), }; static DEFINE_PER_CPU(struct cpu, cpu_devices); static void register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); struct sys_device *s = &c->sysdev; int i; for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) sysdev_create_file(s, &cpu_core_attrs[i]); register_mmu_stats(s); } #ifdef CONFIG_HOTPLUG_CPU static void unregister_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); struct sys_device *s = &c->sysdev; int i; unregister_mmu_stats(s); for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) sysdev_remove_file(s, &cpu_core_attrs[i]); } #endif static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: register_cpu_online(cpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: case CPU_DEAD_FROZEN: unregister_cpu_online(cpu); break; #endif } return NOTIFY_OK; } static struct notifier_block __cpuinitdata sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; static void __init check_mmu_stats(void) { unsigned long dummy1, err; if (tlb_type != hypervisor) return; err = sun4v_mmustat_info(&dummy1); if (!err) mmu_stats_supported = 1; } static void register_nodes(void) { #ifdef CONFIG_NUMA int i; for (i = 0; i < MAX_NUMNODES; i++) register_one_node(i); #endif } static int __init topology_init(void) { int cpu; register_nodes(); check_mmu_stats(); register_cpu_notifier(&sysfs_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu(c, cpu); if (cpu_online(cpu)) register_cpu_online(cpu); } return 0; } subsys_initcall(topology_init);
gpl-2.0
bluechiptechnology/linux-bctrx3
arch/blackfin/mach-bf527/boards/ezbrd.c
2731
21188
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/ad7877.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF526-EZBRD"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct resource musb_resources[] = { [0] = { .start = 0xffc03800, .end = 0xffc03cff, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "mc" }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "dma" }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, .gpio_vrsel = GPIO_PG13, /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, .clkin = 24, /* musb CLKIN in MHZ */ }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezbrd_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ezbrd_flash_data = { .width = 2, .parts = ezbrd_partitions, .nr_parts = ARRAY_SIZE(ezbrd_partitions), }; static struct resource ezbrd_flash_resource = { .start = 0x20000000, .end = 0x203fffff, .flags = IORESOURCE_MEM, }; static struct platform_device ezbrd_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ezbrd_flash_data, }, .num_resources = 1, .resource = &ezbrd_flash_resource, }; #endif #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) static struct mtd_partition partition_info[] = { { .name = "bootloader(nand)", .offset = 0, .size = 0x40000, }, { .name = "linux kernel(nand)", .offset = MTDPART_OFS_APPEND, .size = 4 * 1024 * 1024, }, { .name = "file system(nand)", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct bf5xx_nand_platform bf5xx_nand_platform = { .data_width = NFC_NWIDTH_8, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), .rd_dly = 3, .wr_dly = 3, }; static struct resource bf5xx_nand_resources[] = { { .start = NFC_CTL, .end = NFC_DATA_RD + 2, .flags = IORESOURCE_MEM, }, { .start = CH_NFC, .end = CH_NFC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf5xx_nand_device = { .name = "bf5xx-nand", .id = 0, .num_resources = ARRAY_SIZE(bf5xx_nand_resources), .resource = bf5xx_nand_resources, .dev = { .platform_data = &bf5xx_nand_platform, }, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "sst25wf040", }; /* SPI flash chip (sst25wf040) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> static const struct ad7879_platform_data bfin_ad7879_ts_info = { .model = 7879, /* Model = AD7879 */ .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, .first_conversion_delay = 3, /* wait 512us before do a first conversion */ .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF8, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", .platform_data = &bfin_ad7879_ts_info, .irq = IRQ_PG0, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ && defined(CONFIG_SND_SOC_WM8731_SPI) { .modalias = "wm8731", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .mode = SPI_CPHA | SPI_CPOL, }, #endif }; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin */ .start = GPIO_PG0, .end = GPIO_PG0, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PF10, .end = GPIO_PF10, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, .dev = { .platform_data = &bfin_twi0_pins, }, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) { I2C_BOARD_INFO("pcf8574_keypad", 0x27), .irq = IRQ_PF8, }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/input.h> #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, .ppi_mode = USE_RGB565_16_BIT_PPI, .use_bl = 1, .gpio_bl = GPIO_PG12, }; static struct resource bfin_lq035q1_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, }; #endif static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) &bf5xx_nand_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) &musb_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) &bfin_lq035q1_device, #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezbrd_flash_device, #endif }; static int __init ezbrd_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ezbrd_init); static struct platform_device *ezbrd_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ezbrd_early_devices, ARRAY_SIZE(ezbrd_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { /* the MAC is stored in OTP memory page 0xDF */ u32 ret; u64 otp_mac; u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A; ret = otp_read(0xDF, 0x00, &otp_mac); if (!(ret & 0x1)) { char *otp_mac_p = (char *)&otp_mac; for (ret = 0; ret < 6; ++ret) addr[ret] = otp_mac_p[5 - ret]; } return 0; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
xiaognol/android_kernel_zte_nx503a
drivers/gpu/drm/nouveau/nouveau_acpi.c
4011
11247
#include <linux/pci.h> #include <linux/acpi.h> #include <linux/slab.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> #include <acpi/video.h> #include <acpi/acpi.h> #include <linux/mxm-wmi.h> #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nv50_display.h" #include "nouveau_connector.h" #include <linux/vga_switcheroo.h> #define NOUVEAU_DSM_LED 0x02 #define NOUVEAU_DSM_LED_STATE 0x00 #define NOUVEAU_DSM_LED_OFF 0x10 #define NOUVEAU_DSM_LED_STAMINA 0x11 #define NOUVEAU_DSM_LED_SPEED 0x12 #define NOUVEAU_DSM_POWER 0x03 #define NOUVEAU_DSM_POWER_STATE 0x00 #define NOUVEAU_DSM_POWER_SPEED 0x01 #define NOUVEAU_DSM_POWER_STAMINA 0x02 #define NOUVEAU_DSM_OPTIMUS_FN 0x1A #define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; acpi_handle dhandle; acpi_handle rom_handle; } nouveau_dsm_priv; #define NOUVEAU_DSM_HAS_MUX 0x1 #define NOUVEAU_DSM_HAS_OPT 0x2 static const char nouveau_dsm_muid[] = { 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, }; static const char nouveau_op_dsm_muid[] = { 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, }; static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; union acpi_object params[4]; union acpi_object *obj; int i, err; char args_buff[4]; input.count = 4; input.pointer = params; params[0].type = ACPI_TYPE_BUFFER; params[0].buffer.length = sizeof(nouveau_op_dsm_muid); params[0].buffer.pointer = (char *)nouveau_op_dsm_muid; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 0x00000100; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; params[3].type = ACPI_TYPE_BUFFER; params[3].buffer.length = 4; /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */ for (i = 0; i < 4; i++) args_buff[i] = (arg >> i * 8) & 0xFF; params[3].buffer.pointer = args_buff; err = acpi_evaluate_object(handle, "_DSM", &input, &output); if (err) { printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); return err; } obj = (union acpi_object *)output.pointer; if (obj->type == ACPI_TYPE_INTEGER) if (obj->integer.value == 0x80000002) { return -ENODEV; } if (obj->type == ACPI_TYPE_BUFFER) { if (obj->buffer.length == 4 && result) { *result = 0; *result |= obj->buffer.pointer[0]; *result |= (obj->buffer.pointer[1] << 8); *result |= (obj->buffer.pointer[2] << 16); *result |= (obj->buffer.pointer[3] << 24); } } kfree(output.pointer); return 0; } static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; union acpi_object params[4]; union acpi_object *obj; int err; input.count = 4; input.pointer = params; params[0].type = ACPI_TYPE_BUFFER; params[0].buffer.length = sizeof(nouveau_dsm_muid); params[0].buffer.pointer = (char *)nouveau_dsm_muid; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 0x00000102; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; params[3].type = ACPI_TYPE_INTEGER; params[3].integer.value = arg; err = acpi_evaluate_object(handle, "_DSM", &input, &output); if (err) { printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); return err; } obj = (union acpi_object *)output.pointer; if (obj->type == ACPI_TYPE_INTEGER) if (obj->integer.value == 0x80000002) return -ENODEV; if (obj->type == ACPI_TYPE_BUFFER) { if (obj->buffer.length == 4 && result) { *result = 0; *result |= obj->buffer.pointer[0]; *result |= (obj->buffer.pointer[1] << 8); *result |= (obj->buffer.pointer[2] << 16); *result |= (obj->buffer.pointer[3] << 24); } } kfree(output.pointer); return 0; } /* Returns 1 if a DSM function is usable and 0 otherwise */ static int nouveau_test_dsm(acpi_handle test_handle, int (*dsm_func)(acpi_handle, int, int, uint32_t *), int sfnc) { u32 result = 0; /* Function 0 returns a Buffer containing available functions. The args * parameter is ignored for function 0, so just put 0 in it */ if (dsm_func(test_handle, 0, 0, &result)) return 0; /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If * the n-th bit is enabled, function n is supported */ return result & 1 && result & (1 << sfnc); } static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) { mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); } static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state) { int arg; if (state == VGA_SWITCHEROO_ON) arg = NOUVEAU_DSM_POWER_SPEED; else arg = NOUVEAU_DSM_POWER_STAMINA; nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL); return 0; } static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) { /* perhaps the _DSM functions are mutually exclusive, but prepare for * the future */ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; if (id == VGA_SWITCHEROO_IGD) return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); else return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); } static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state) { if (id == VGA_SWITCHEROO_IGD) return 0; /* Optimus laptops have the card already disabled in * nouveau_switcheroo_set_state */ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } static int nouveau_dsm_init(void) { return 0; } static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { /* easy option one - intel vendor ID means Integrated */ if (pdev->vendor == PCI_VENDOR_ID_INTEL) return VGA_SWITCHEROO_IGD; /* is this device on Bus 0? - this may need improving */ if (pdev->bus->number == 0) return VGA_SWITCHEROO_IGD; return VGA_SWITCHEROO_DIS; } static struct vga_switcheroo_handler nouveau_dsm_handler = { .switchto = nouveau_dsm_switchto, .power_state = nouveau_dsm_power_state, .init = nouveau_dsm_init, .get_client_id = nouveau_dsm_get_client_id, }; static int nouveau_dsm_pci_probe(struct pci_dev *pdev) { acpi_handle dhandle, nvidia_handle; acpi_status status; int retval = 0; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); if (ACPI_FAILURE(status)) { return false; } if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) retval |= NOUVEAU_DSM_HAS_MUX; if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, NOUVEAU_DSM_OPTIMUS_FN)) retval |= NOUVEAU_DSM_HAS_OPT; if (retval) nouveau_dsm_priv.dhandle = dhandle; return retval; } static bool nouveau_dsm_detect(void) { char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; int has_dsm = 0; int has_optimus = 0; int vga_count = 0; bool guid_valid; int retval; bool ret = false; /* lookup the MXM GUID */ guid_valid = mxm_wmi_supported(); if (guid_valid) printk("MXM: GUID detected in BIOS\n"); /* now do DSM detection */ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; retval = nouveau_dsm_pci_probe(pdev); if (retval & NOUVEAU_DSM_HAS_MUX) has_dsm |= 1; if (retval & NOUVEAU_DSM_HAS_OPT) has_optimus = 1; } if (vga_count == 2 && has_dsm && guid_valid) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); nouveau_dsm_priv.dsm_detected = true; ret = true; } if (has_optimus == 1) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); nouveau_dsm_priv.optimus_detected = true; ret = true; } return ret; } void nouveau_register_dsm_handler(void) { bool r; r = nouveau_dsm_detect(); if (!r) return; vga_switcheroo_register_handler(&nouveau_dsm_handler); } /* Must be called for Optimus models before the card can be turned off */ void nouveau_switcheroo_optimus_dsm(void) { u32 result = 0; if (!nouveau_dsm_priv.optimus_detected) return; nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, NOUVEAU_DSM_OPTIMUS_ARGS, &result); } void nouveau_unregister_dsm_handler(void) { vga_switcheroo_unregister_handler(); } /* retrieve the ROM in 4k blocks */ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, int offset, int len) { acpi_status status; union acpi_object rom_arg_elements[2], *obj; struct acpi_object_list rom_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; rom_arg.count = 2; rom_arg.pointer = &rom_arg_elements[0]; rom_arg_elements[0].type = ACPI_TYPE_INTEGER; rom_arg_elements[0].integer.value = offset; rom_arg_elements[1].type = ACPI_TYPE_INTEGER; rom_arg_elements[1].integer.value = len; status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); if (ACPI_FAILURE(status)) { printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); return -ENODEV; } obj = (union acpi_object *)buffer.pointer; memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; } bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { acpi_status status; acpi_handle dhandle, rom_handle; if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) return false; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "_ROM", &rom_handle); if (ACPI_FAILURE(status)) return false; nouveau_dsm_priv.rom_handle = rom_handle; return true; } int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); } int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { struct nouveau_connector *nv_connector = nouveau_connector(connector); struct acpi_device *acpidev; acpi_handle handle; int type, ret; void *edid; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: type = ACPI_VIDEO_DISPLAY_LCD; break; default: return -EINVAL; } handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); if (!handle) return -ENODEV; ret = acpi_bus_get_device(handle, &acpidev); if (ret) return -ENODEV; ret = acpi_video_get_edid(acpidev, type, -1, &edid); if (ret < 0) return ret; nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); return 0; }
gpl-2.0
Kalashnikitty/ville-kernel
drivers/staging/usbip/userspace/src/usbip_list.c
5803
7304
/* * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <sys/types.h> #include <sysfs/libsysfs.h> #include <errno.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <netdb.h> #include <unistd.h> #include "usbip_common.h" #include "usbip_network.h" #include "usbip.h" static const char usbip_list_usage_string[] = "usbip list [-p|--parsable] <args>\n" " -p, --parsable Parsable list format\n" " -r, --remote=<host> List the exportable USB devices on <host>\n" " -l, --local List the local USB devices\n"; void usbip_list_usage(void) { printf("usage: %s", usbip_list_usage_string); } static int get_exported_devices(char *host, int sockfd) { char product_name[100]; char class_name[100]; struct op_devlist_reply reply; uint16_t code = OP_REP_DEVLIST; struct usbip_usb_device udev; struct usbip_usb_interface uintf; unsigned int i; int j, rc; rc = usbip_net_send_op_common(sockfd, OP_REQ_DEVLIST, 0); if (rc < 0) { dbg("usbip_net_send_op_common failed"); return -1; } rc = usbip_net_recv_op_common(sockfd, &code); if (rc < 0) { dbg("usbip_net_recv_op_common failed"); return -1; } memset(&reply, 0, sizeof(reply)); rc = usbip_net_recv(sockfd, &reply, sizeof(reply)); if (rc < 0) { dbg("usbip_net_recv_op_devlist failed"); return -1; } PACK_OP_DEVLIST_REPLY(0, &reply); dbg("exportable devices: %d\n", reply.ndev); if (reply.ndev == 0) { info("no exportable devices found on %s", host); return 0; } printf("Exportable USB devices\n"); printf("======================\n"); printf(" - %s\n", host); for (i = 0; i < reply.ndev; i++) { memset(&udev, 0, sizeof(udev)); rc = usbip_net_recv(sockfd, &udev, sizeof(udev)); if (rc < 0) { dbg("usbip_net_recv failed: usbip_usb_device[%d]", i); return -1; } usbip_net_pack_usb_device(0, &udev); usbip_names_get_product(product_name, sizeof(product_name), udev.idVendor, udev.idProduct); usbip_names_get_class(class_name, sizeof(class_name), udev.bDeviceClass, udev.bDeviceSubClass, udev.bDeviceProtocol); printf("%11s: %s\n", udev.busid, product_name); printf("%11s: %s\n", "", udev.path); printf("%11s: %s\n", "", class_name); for (j = 0; j < udev.bNumInterfaces; j++) { rc = usbip_net_recv(sockfd, &uintf, sizeof(uintf)); if (rc < 0) { dbg("usbip_net_recv failed: usbip_usb_intf[%d]", j); return -1; } usbip_net_pack_usb_interface(0, &uintf); usbip_names_get_class(class_name, sizeof(class_name), uintf.bInterfaceClass, uintf.bInterfaceSubClass, uintf.bInterfaceProtocol); printf("%11s: %2d - %s\n", "", j, class_name); } printf("\n"); } return 0; } static int list_exported_devices(char *host) { int rc; int sockfd; sockfd = usbip_net_tcp_connect(host, USBIP_PORT_STRING); if (sockfd < 0) { err("could not connect to %s:%s: %s", host, USBIP_PORT_STRING, gai_strerror(sockfd)); return -1; } dbg("connected to %s:%s", host, USBIP_PORT_STRING); rc = get_exported_devices(host, sockfd); if (rc < 0) { err("failed to get device list from %s", host); return -1; } close(sockfd); return 0; } static void print_device(char *busid, char *vendor, char *product, bool parsable) { if (parsable) printf("busid=%s#usbid=%.4s:%.4s#", busid, vendor, product); else printf(" - busid %s (%.4s:%.4s)\n", busid, vendor, product); } static void print_interface(char *busid, char *driver, bool parsable) { if (parsable) printf("%s=%s#", busid, driver); else printf("%9s%s -> %s\n", "", busid, driver); } static int is_device(void *x) { struct sysfs_attribute *devpath; struct sysfs_device *dev = x; int ret = 0; devpath = sysfs_get_device_attr(dev, "devpath"); if (devpath && *devpath->value != '0') ret = 1; return ret; } static int devcmp(void *a, void *b) { return strcmp(a, b); } static int list_devices(bool parsable) { char bus_type[] = "usb"; char busid[SYSFS_BUS_ID_SIZE]; struct sysfs_bus *ubus; struct sysfs_device *dev; struct sysfs_device *intf; struct sysfs_attribute *idVendor; struct sysfs_attribute *idProduct; struct sysfs_attribute *bConfValue; struct sysfs_attribute *bNumIntfs; struct dlist *devlist; int i; int ret = -1; ubus = sysfs_open_bus(bus_type); if (!ubus) { err("could not open %s bus: %s", bus_type, strerror(errno)); return -1; } devlist = sysfs_get_bus_devices(ubus); if (!devlist) { err("could not get %s bus devices: %s", bus_type, strerror(errno)); goto err_out; } /* remove interfaces and root hubs from device list */ dlist_filter_sort(devlist, is_device, devcmp); if (!parsable) { printf("Local USB devices\n"); printf("=================\n"); } dlist_for_each_data(devlist, dev, struct sysfs_device) { idVendor = sysfs_get_device_attr(dev, "idVendor"); idProduct = sysfs_get_device_attr(dev, "idProduct"); bConfValue = sysfs_get_device_attr(dev, "bConfigurationValue"); bNumIntfs = sysfs_get_device_attr(dev, "bNumInterfaces"); if (!idVendor || !idProduct || !bConfValue || !bNumIntfs) { err("problem getting device attributes: %s", strerror(errno)); goto err_out; } print_device(dev->bus_id, idVendor->value, idProduct->value, parsable); for (i = 0; i < atoi(bNumIntfs->value); i++) { snprintf(busid, sizeof(busid), "%s:%.1s.%d", dev->bus_id, bConfValue->value, i); intf = sysfs_open_device(bus_type, busid); if (!intf) { err("could not open device interface: %s", strerror(errno)); goto err_out; } print_interface(busid, intf->driver_name, parsable); sysfs_close_device(intf); } printf("\n"); } ret = 0; err_out: sysfs_close_bus(ubus); return ret; } int usbip_list(int argc, char *argv[]) { static const struct option opts[] = { { "parsable", no_argument, NULL, 'p' }, { "remote", required_argument, NULL, 'r' }, { "local", no_argument, NULL, 'l' }, { NULL, 0, NULL, 0 } }; bool parsable = false; int opt; int ret = -1; if (usbip_names_init(USBIDS_FILE)) err("failed to open %s", USBIDS_FILE); for (;;) { opt = getopt_long(argc, argv, "pr:l", opts, NULL); if (opt == -1) break; switch (opt) { case 'p': parsable = true; break; case 'r': ret = list_exported_devices(optarg); goto out; case 'l': ret = list_devices(parsable); goto out; default: goto err_out; } } err_out: usbip_list_usage(); out: usbip_names_free(); return ret; }
gpl-2.0
ngxson/android_kernel_sony_msm8x27
drivers/media/dvb/frontends/lgdt330x.c
8363
22802
/* * Support for LGDT3302 and LGDT3303 - VSB/QAM * * Copyright (C) 2005 Wilson Michaels <wilsonmichaels@earthlink.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* * NOTES ABOUT THIS DRIVER * * This Linux driver supports: * DViCO FusionHDTV 3 Gold-Q * DViCO FusionHDTV 3 Gold-T * DViCO FusionHDTV 5 Gold * DViCO FusionHDTV 5 Lite * DViCO FusionHDTV 5 USB Gold * Air2PC/AirStar 2 ATSC 3rd generation (HD5000) * pcHDTV HD5500 * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/byteorder.h> #include "dvb_frontend.h" #include "dvb_math.h" #include "lgdt330x_priv.h" #include "lgdt330x.h" /* Use Equalizer Mean Squared Error instead of Phaser Tracker MSE */ /* #define USE_EQMSE */ static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug,"Turn on/off lgdt330x frontend debugging (default:off)."); #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "lgdt330x: " args); \ } while (0) struct lgdt330x_state { struct i2c_adapter* i2c; /* Configuration settings */ const struct lgdt330x_config* config; struct dvb_frontend frontend; /* Demodulator private data */ fe_modulation_t current_modulation; u32 snr; /* Result of last SNR calculation */ /* Tuner private data */ u32 current_frequency; }; static int i2c_write_demod_bytes (struct lgdt330x_state* state, u8 *buf, /* data bytes to send */ int len /* number of bytes to send */ ) { struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; int i; int err; for (i=0; i<len-1; i+=2){ if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) { printk(KERN_WARNING "lgdt330x: %s error (addr %02x <- %02x, err = %i)\n", __func__, msg.buf[0], msg.buf[1], err); if (err < 0) return err; else return -EREMOTEIO; } msg.buf += 2; } return 0; } /* * This routine writes the register (reg) to the demod bus * then reads the data returned for (len) bytes. */ static int i2c_read_demod_bytes(struct lgdt330x_state *state, enum I2C_REG reg, u8 *buf, int len) { u8 wr [] = { reg }; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = wr, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = buf, .len = len }, }; int ret; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_WARNING "lgdt330x: %s: addr 0x%02x select 0x%02x error (ret == %i)\n", __func__, state->config->demod_address, reg, ret); if (ret >= 0) ret = -EIO; } else { ret = 0; } return ret; } /* Software reset */ static int lgdt3302_SwReset(struct lgdt330x_state* state) { u8 ret; u8 reset[] = { IRQ_MASK, 0x00 /* bit 6 is active low software reset * bits 5-0 are 1 to mask interrupts */ }; ret = i2c_write_demod_bytes(state, reset, sizeof(reset)); if (ret == 0) { /* force reset high (inactive) and unmask interrupts */ reset[1] = 0x7f; ret = i2c_write_demod_bytes(state, reset, sizeof(reset)); } return ret; } static int lgdt3303_SwReset(struct lgdt330x_state* state) { u8 ret; u8 reset[] = { 0x02, 0x00 /* bit 0 is active low software reset */ }; ret = i2c_write_demod_bytes(state, reset, sizeof(reset)); if (ret == 0) { /* force reset high (inactive) */ reset[1] = 0x01; ret = i2c_write_demod_bytes(state, reset, sizeof(reset)); } return ret; } static int lgdt330x_SwReset(struct lgdt330x_state* state) { switch (state->config->demod_chip) { case LGDT3302: return lgdt3302_SwReset(state); case LGDT3303: return lgdt3303_SwReset(state); default: return -ENODEV; } } static int lgdt330x_init(struct dvb_frontend* fe) { /* Hardware reset is done using gpio[0] of cx23880x chip. * I'd like to do it here, but don't know how to find chip address. * cx88-cards.c arranges for the reset bit to be inactive (high). * Maybe there needs to be a callable function in cx88-core or * the caller of this function needs to do it. */ /* * Array of byte pairs <address, value> * to initialize each different chip */ static u8 lgdt3302_init_data[] = { /* Use 50MHz parameter values from spec sheet since xtal is 50 */ /* Change the value of NCOCTFV[25:0] of carrier recovery center frequency register */ VSB_CARRIER_FREQ0, 0x00, VSB_CARRIER_FREQ1, 0x87, VSB_CARRIER_FREQ2, 0x8e, VSB_CARRIER_FREQ3, 0x01, /* Change the TPCLK pin polarity data is valid on falling clock */ DEMUX_CONTROL, 0xfb, /* Change the value of IFBW[11:0] of AGC IF/RF loop filter bandwidth register */ AGC_RF_BANDWIDTH0, 0x40, AGC_RF_BANDWIDTH1, 0x93, AGC_RF_BANDWIDTH2, 0x00, /* Change the value of bit 6, 'nINAGCBY' and 'NSSEL[1:0] of ACG function control register 2 */ AGC_FUNC_CTRL2, 0xc6, /* Change the value of bit 6 'RFFIX' of AGC function control register 3 */ AGC_FUNC_CTRL3, 0x40, /* Set the value of 'INLVTHD' register 0x2a/0x2c to 0x7fe */ AGC_DELAY0, 0x07, AGC_DELAY2, 0xfe, /* Change the value of IAGCBW[15:8] of inner AGC loop filter bandwidth */ AGC_LOOP_BANDWIDTH0, 0x08, AGC_LOOP_BANDWIDTH1, 0x9a }; static u8 lgdt3303_init_data[] = { 0x4c, 0x14 }; static u8 flip_1_lgdt3303_init_data[] = { 0x4c, 0x14, 0x87, 0xf3 }; static u8 flip_2_lgdt3303_init_data[] = { 0x4c, 0x14, 0x87, 0xda }; struct lgdt330x_state* state = fe->demodulator_priv; char *chip_name; int err; switch (state->config->demod_chip) { case LGDT3302: chip_name = "LGDT3302"; err = i2c_write_demod_bytes(state, lgdt3302_init_data, sizeof(lgdt3302_init_data)); break; case LGDT3303: chip_name = "LGDT3303"; switch (state->config->clock_polarity_flip) { case 2: err = i2c_write_demod_bytes(state, flip_2_lgdt3303_init_data, sizeof(flip_2_lgdt3303_init_data)); break; case 1: err = i2c_write_demod_bytes(state, flip_1_lgdt3303_init_data, sizeof(flip_1_lgdt3303_init_data)); break; case 0: default: err = i2c_write_demod_bytes(state, lgdt3303_init_data, sizeof(lgdt3303_init_data)); } break; default: chip_name = "undefined"; printk (KERN_WARNING "Only LGDT3302 and LGDT3303 are supported chips.\n"); err = -ENODEV; } dprintk("%s entered as %s\n", __func__, chip_name); if (err < 0) return err; return lgdt330x_SwReset(state); } static int lgdt330x_read_ber(struct dvb_frontend* fe, u32* ber) { *ber = 0; /* Not supplied by the demod chips */ return 0; } static int lgdt330x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct lgdt330x_state* state = fe->demodulator_priv; int err; u8 buf[2]; *ucblocks = 0; switch (state->config->demod_chip) { case LGDT3302: err = i2c_read_demod_bytes(state, LGDT3302_PACKET_ERR_COUNTER1, buf, sizeof(buf)); break; case LGDT3303: err = i2c_read_demod_bytes(state, LGDT3303_PACKET_ERR_COUNTER1, buf, sizeof(buf)); break; default: printk(KERN_WARNING "Only LGDT3302 and LGDT3303 are supported chips.\n"); err = -ENODEV; } if (err < 0) return err; *ucblocks = (buf[0] << 8) | buf[1]; return 0; } static int lgdt330x_set_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; /* * Array of byte pairs <address, value> * to initialize 8VSB for lgdt3303 chip 50 MHz IF */ static u8 lgdt3303_8vsb_44_data[] = { 0x04, 0x00, 0x0d, 0x40, 0x0e, 0x87, 0x0f, 0x8e, 0x10, 0x01, 0x47, 0x8b }; /* * Array of byte pairs <address, value> * to initialize QAM for lgdt3303 chip */ static u8 lgdt3303_qam_data[] = { 0x04, 0x00, 0x0d, 0x00, 0x0e, 0x00, 0x0f, 0x00, 0x10, 0x00, 0x51, 0x63, 0x47, 0x66, 0x48, 0x66, 0x4d, 0x1a, 0x49, 0x08, 0x4a, 0x9b }; struct lgdt330x_state* state = fe->demodulator_priv; static u8 top_ctrl_cfg[] = { TOP_CONTROL, 0x03 }; int err = 0; /* Change only if we are actually changing the modulation */ if (state->current_modulation != p->modulation) { switch (p->modulation) { case VSB_8: dprintk("%s: VSB_8 MODE\n", __func__); /* Select VSB mode */ top_ctrl_cfg[1] = 0x03; /* Select ANT connector if supported by card */ if (state->config->pll_rf_set) state->config->pll_rf_set(fe, 1); if (state->config->demod_chip == LGDT3303) { err = i2c_write_demod_bytes(state, lgdt3303_8vsb_44_data, sizeof(lgdt3303_8vsb_44_data)); } break; case QAM_64: dprintk("%s: QAM_64 MODE\n", __func__); /* Select QAM_64 mode */ top_ctrl_cfg[1] = 0x00; /* Select CABLE connector if supported by card */ if (state->config->pll_rf_set) state->config->pll_rf_set(fe, 0); if (state->config->demod_chip == LGDT3303) { err = i2c_write_demod_bytes(state, lgdt3303_qam_data, sizeof(lgdt3303_qam_data)); } break; case QAM_256: dprintk("%s: QAM_256 MODE\n", __func__); /* Select QAM_256 mode */ top_ctrl_cfg[1] = 0x01; /* Select CABLE connector if supported by card */ if (state->config->pll_rf_set) state->config->pll_rf_set(fe, 0); if (state->config->demod_chip == LGDT3303) { err = i2c_write_demod_bytes(state, lgdt3303_qam_data, sizeof(lgdt3303_qam_data)); } break; default: printk(KERN_WARNING "lgdt330x: %s: Modulation type(%d) UNSUPPORTED\n", __func__, p->modulation); return -1; } if (err < 0) printk(KERN_WARNING "lgdt330x: %s: error blasting " "bytes to lgdt3303 for modulation type(%d)\n", __func__, p->modulation); /* * select serial or parallel MPEG harware interface * Serial: 0x04 for LGDT3302 or 0x40 for LGDT3303 * Parallel: 0x00 */ top_ctrl_cfg[1] |= state->config->serial_mpeg; /* Select the requested mode */ i2c_write_demod_bytes(state, top_ctrl_cfg, sizeof(top_ctrl_cfg)); if (state->config->set_ts_params) state->config->set_ts_params(fe, 0); state->current_modulation = p->modulation; } /* Tune to the specified frequency */ if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* Keep track of the new frequency */ /* FIXME this is the wrong way to do this... */ /* The tuner is shared with the video4linux analog API */ state->current_frequency = p->frequency; lgdt330x_SwReset(state); return 0; } static int lgdt330x_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct lgdt330x_state *state = fe->demodulator_priv; p->frequency = state->current_frequency; return 0; } static int lgdt3302_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct lgdt330x_state* state = fe->demodulator_priv; u8 buf[3]; *status = 0; /* Reset status result */ /* AGC status register */ i2c_read_demod_bytes(state, AGC_STATUS, buf, 1); dprintk("%s: AGC_STATUS = 0x%02x\n", __func__, buf[0]); if ((buf[0] & 0x0c) == 0x8){ /* Test signal does not exist flag */ /* as well as the AGC lock flag. */ *status |= FE_HAS_SIGNAL; } /* * You must set the Mask bits to 1 in the IRQ_MASK in order * to see that status bit in the IRQ_STATUS register. * This is done in SwReset(); */ /* signal status */ i2c_read_demod_bytes(state, TOP_CONTROL, buf, sizeof(buf)); dprintk("%s: TOP_CONTROL = 0x%02x, IRO_MASK = 0x%02x, IRQ_STATUS = 0x%02x\n", __func__, buf[0], buf[1], buf[2]); /* sync status */ if ((buf[2] & 0x03) == 0x01) { *status |= FE_HAS_SYNC; } /* FEC error status */ if ((buf[2] & 0x0c) == 0x08) { *status |= FE_HAS_LOCK; *status |= FE_HAS_VITERBI; } /* Carrier Recovery Lock Status Register */ i2c_read_demod_bytes(state, CARRIER_LOCK, buf, 1); dprintk("%s: CARRIER_LOCK = 0x%02x\n", __func__, buf[0]); switch (state->current_modulation) { case QAM_256: case QAM_64: /* Need to understand why there are 3 lock levels here */ if ((buf[0] & 0x07) == 0x07) *status |= FE_HAS_CARRIER; break; case VSB_8: if ((buf[0] & 0x80) == 0x80) *status |= FE_HAS_CARRIER; break; default: printk(KERN_WARNING "lgdt330x: %s: Modulation set to unsupported value\n", __func__); } return 0; } static int lgdt3303_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct lgdt330x_state* state = fe->demodulator_priv; int err; u8 buf[3]; *status = 0; /* Reset status result */ /* lgdt3303 AGC status register */ err = i2c_read_demod_bytes(state, 0x58, buf, 1); if (err < 0) return err; dprintk("%s: AGC_STATUS = 0x%02x\n", __func__, buf[0]); if ((buf[0] & 0x21) == 0x01){ /* Test input signal does not exist flag */ /* as well as the AGC lock flag. */ *status |= FE_HAS_SIGNAL; } /* Carrier Recovery Lock Status Register */ i2c_read_demod_bytes(state, CARRIER_LOCK, buf, 1); dprintk("%s: CARRIER_LOCK = 0x%02x\n", __func__, buf[0]); switch (state->current_modulation) { case QAM_256: case QAM_64: /* Need to understand why there are 3 lock levels here */ if ((buf[0] & 0x07) == 0x07) *status |= FE_HAS_CARRIER; else break; i2c_read_demod_bytes(state, 0x8a, buf, 1); if ((buf[0] & 0x04) == 0x04) *status |= FE_HAS_SYNC; if ((buf[0] & 0x01) == 0x01) *status |= FE_HAS_LOCK; if ((buf[0] & 0x08) == 0x08) *status |= FE_HAS_VITERBI; break; case VSB_8: if ((buf[0] & 0x80) == 0x80) *status |= FE_HAS_CARRIER; else break; i2c_read_demod_bytes(state, 0x38, buf, 1); if ((buf[0] & 0x02) == 0x00) *status |= FE_HAS_SYNC; if ((buf[0] & 0x01) == 0x01) { *status |= FE_HAS_LOCK; *status |= FE_HAS_VITERBI; } break; default: printk(KERN_WARNING "lgdt330x: %s: Modulation set to unsupported value\n", __func__); } return 0; } /* Calculate SNR estimation (scaled by 2^24) 8-VSB SNR equations from LGDT3302 and LGDT3303 datasheets, QAM equations from LGDT3303 datasheet. VSB is the same between the '02 and '03, so maybe QAM is too? Perhaps someone with a newer datasheet that has QAM information could verify? For 8-VSB: (two ways, take your pick) LGDT3302: SNR_EQ = 10 * log10(25 * 24^2 / EQ_MSE) LGDT3303: SNR_EQ = 10 * log10(25 * 32^2 / EQ_MSE) LGDT3302 & LGDT3303: SNR_PT = 10 * log10(25 * 32^2 / PT_MSE) (we use this one) For 64-QAM: SNR = 10 * log10( 688128 / MSEQAM) For 256-QAM: SNR = 10 * log10( 696320 / MSEQAM) We re-write the snr equation as: SNR * 2^24 = 10*(c - intlog10(MSE)) Where for 256-QAM, c = log10(696320) * 2^24, and so on. */ static u32 calculate_snr(u32 mse, u32 c) { if (mse == 0) /* No signal */ return 0; mse = intlog10(mse); if (mse > c) { /* Negative SNR, which is possible, but realisticly the demod will lose lock before the signal gets this bad. The API only allows for unsigned values, so just return 0 */ return 0; } return 10*(c - mse); } static int lgdt3302_read_snr(struct dvb_frontend* fe, u16* snr) { struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv; u8 buf[5]; /* read data buffer */ u32 noise; /* noise value */ u32 c; /* per-modulation SNR calculation constant */ switch(state->current_modulation) { case VSB_8: i2c_read_demod_bytes(state, LGDT3302_EQPH_ERR0, buf, 5); #ifdef USE_EQMSE /* Use Equalizer Mean-Square Error Register */ /* SNR for ranges from -15.61 to +41.58 */ noise = ((buf[0] & 7) << 16) | (buf[1] << 8) | buf[2]; c = 69765745; /* log10(25*24^2)*2^24 */ #else /* Use Phase Tracker Mean-Square Error Register */ /* SNR for ranges from -13.11 to +44.08 */ noise = ((buf[0] & 7<<3) << 13) | (buf[3] << 8) | buf[4]; c = 73957994; /* log10(25*32^2)*2^24 */ #endif break; case QAM_64: case QAM_256: i2c_read_demod_bytes(state, CARRIER_MSEQAM1, buf, 2); noise = ((buf[0] & 3) << 8) | buf[1]; c = state->current_modulation == QAM_64 ? 97939837 : 98026066; /* log10(688128)*2^24 and log10(696320)*2^24 */ break; default: printk(KERN_ERR "lgdt330x: %s: Modulation set to unsupported value\n", __func__); return -EREMOTEIO; /* return -EDRIVER_IS_GIBBERED; */ } state->snr = calculate_snr(noise, c); *snr = (state->snr) >> 16; /* Convert from 8.24 fixed-point to 8.8 */ dprintk("%s: noise = 0x%08x, snr = %d.%02d dB\n", __func__, noise, state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16); return 0; } static int lgdt3303_read_snr(struct dvb_frontend* fe, u16* snr) { struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv; u8 buf[5]; /* read data buffer */ u32 noise; /* noise value */ u32 c; /* per-modulation SNR calculation constant */ switch(state->current_modulation) { case VSB_8: i2c_read_demod_bytes(state, LGDT3303_EQPH_ERR0, buf, 5); #ifdef USE_EQMSE /* Use Equalizer Mean-Square Error Register */ /* SNR for ranges from -16.12 to +44.08 */ noise = ((buf[0] & 0x78) << 13) | (buf[1] << 8) | buf[2]; c = 73957994; /* log10(25*32^2)*2^24 */ #else /* Use Phase Tracker Mean-Square Error Register */ /* SNR for ranges from -13.11 to +44.08 */ noise = ((buf[0] & 7) << 16) | (buf[3] << 8) | buf[4]; c = 73957994; /* log10(25*32^2)*2^24 */ #endif break; case QAM_64: case QAM_256: i2c_read_demod_bytes(state, CARRIER_MSEQAM1, buf, 2); noise = (buf[0] << 8) | buf[1]; c = state->current_modulation == QAM_64 ? 97939837 : 98026066; /* log10(688128)*2^24 and log10(696320)*2^24 */ break; default: printk(KERN_ERR "lgdt330x: %s: Modulation set to unsupported value\n", __func__); return -EREMOTEIO; /* return -EDRIVER_IS_GIBBERED; */ } state->snr = calculate_snr(noise, c); *snr = (state->snr) >> 16; /* Convert from 8.24 fixed-point to 8.8 */ dprintk("%s: noise = 0x%08x, snr = %d.%02d dB\n", __func__, noise, state->snr >> 24, (((state->snr >> 8) & 0xffff) * 100) >> 16); return 0; } static int lgdt330x_read_signal_strength(struct dvb_frontend* fe, u16* strength) { /* Calculate Strength from SNR up to 35dB */ /* Even though the SNR can go higher than 35dB, there is some comfort */ /* factor in having a range of strong signals that can show at 100% */ struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv; u16 snr; int ret; ret = fe->ops.read_snr(fe, &snr); if (ret != 0) return ret; /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */ /* scale the range 0 - 35*2^24 into 0 - 65535 */ if (state->snr >= 8960 * 0x10000) *strength = 0xffff; else *strength = state->snr / 8960; return 0; } static int lgdt330x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fe_tune_settings) { /* I have no idea about this - it may not be needed */ fe_tune_settings->min_delay_ms = 500; fe_tune_settings->step_size = 0; fe_tune_settings->max_drift = 0; return 0; } static void lgdt330x_release(struct dvb_frontend* fe) { struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops lgdt3302_ops; static struct dvb_frontend_ops lgdt3303_ops; struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config, struct i2c_adapter* i2c) { struct lgdt330x_state* state = NULL; u8 buf[1]; /* Allocate memory for the internal state */ state = kzalloc(sizeof(struct lgdt330x_state), GFP_KERNEL); if (state == NULL) goto error; /* Setup the state */ state->config = config; state->i2c = i2c; /* Create dvb_frontend */ switch (config->demod_chip) { case LGDT3302: memcpy(&state->frontend.ops, &lgdt3302_ops, sizeof(struct dvb_frontend_ops)); break; case LGDT3303: memcpy(&state->frontend.ops, &lgdt3303_ops, sizeof(struct dvb_frontend_ops)); break; default: goto error; } state->frontend.demodulator_priv = state; /* Verify communication with demod chip */ if (i2c_read_demod_bytes(state, 2, buf, 1)) goto error; state->current_frequency = -1; state->current_modulation = -1; return &state->frontend; error: kfree(state); dprintk("%s: ERROR\n",__func__); return NULL; } static struct dvb_frontend_ops lgdt3302_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name= "LG Electronics LGDT3302 VSB/QAM Frontend", .frequency_min= 54000000, .frequency_max= 858000000, .frequency_stepsize= 62500, .symbol_rate_min = 5056941, /* QAM 64 */ .symbol_rate_max = 10762000, /* VSB 8 */ .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB }, .init = lgdt330x_init, .set_frontend = lgdt330x_set_parameters, .get_frontend = lgdt330x_get_frontend, .get_tune_settings = lgdt330x_get_tune_settings, .read_status = lgdt3302_read_status, .read_ber = lgdt330x_read_ber, .read_signal_strength = lgdt330x_read_signal_strength, .read_snr = lgdt3302_read_snr, .read_ucblocks = lgdt330x_read_ucblocks, .release = lgdt330x_release, }; static struct dvb_frontend_ops lgdt3303_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name= "LG Electronics LGDT3303 VSB/QAM Frontend", .frequency_min= 54000000, .frequency_max= 858000000, .frequency_stepsize= 62500, .symbol_rate_min = 5056941, /* QAM 64 */ .symbol_rate_max = 10762000, /* VSB 8 */ .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB }, .init = lgdt330x_init, .set_frontend = lgdt330x_set_parameters, .get_frontend = lgdt330x_get_frontend, .get_tune_settings = lgdt330x_get_tune_settings, .read_status = lgdt3303_read_status, .read_ber = lgdt330x_read_ber, .read_signal_strength = lgdt330x_read_signal_strength, .read_snr = lgdt3303_read_snr, .read_ucblocks = lgdt330x_read_ucblocks, .release = lgdt330x_release, }; MODULE_DESCRIPTION("LGDT330X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulator Driver"); MODULE_AUTHOR("Wilson Michaels"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(lgdt330x_attach); /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
bigzz/linux-ext4
arch/parisc/math-emu/sfsub.c
12203
14757
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/sfsub.c $Revision: 1.1 $ * * Purpose: * Single_subtract: subtract two single precision values. * * External Interfaces: * sgl_fsub(leftptr, rightptr, dstptr, status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" /* * Single_subtract: subtract two single precision values. */ int sgl_fsub( sgl_floating_point *leftptr, sgl_floating_point *rightptr, sgl_floating_point *dstptr, unsigned int *status) { register unsigned int left, right, result, extent; register unsigned int signless_upper_left, signless_upper_right, save; register int result_exponent, right_exponent, diff_exponent; register int sign_save, jumpsize; register boolean inexact = FALSE, underflowtrap; /* Create local copies of the numbers */ left = *leftptr; right = *rightptr; /* A zero "save" helps discover equal operands (for later), * * and is used in swapping operands (if needed). */ Sgl_xortointp1(left,right,/*to*/save); /* * check first operand for NaN's or infinity */ if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT) { if (Sgl_iszero_mantissa(left)) { if (Sgl_isnotnan(right)) { if (Sgl_isinfinity(right) && save==0) { /* * invalid since operands are same signed infinity's */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(result); *dstptr = result; return(NOEXCEPTION); } /* * return infinity */ *dstptr = left; return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(left)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(left); } /* * is second operand a signaling NaN? */ else if (Sgl_is_signalingnan(right)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(right); *dstptr = right; return(NOEXCEPTION); } /* * return quiet NaN */ *dstptr = left; return(NOEXCEPTION); } } /* End left NaN or Infinity processing */ /* * check second operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(right)) { if (Sgl_iszero_mantissa(right)) { /* return infinity */ Sgl_invert_sign(right); *dstptr = right; return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(right)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(right); } /* * return quiet NaN */ *dstptr = right; return(NOEXCEPTION); } /* End right NaN or Infinity processing */ /* Invariant: Must be dealing with finite numbers */ /* Compare operands by removing the sign */ Sgl_copytoint_exponentmantissa(left,signless_upper_left); Sgl_copytoint_exponentmantissa(right,signless_upper_right); /* sign difference selects sub or add operation. */ if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right)) { /* Set the left operand to the larger one by XOR swap * * First finish the first word using "save" */ Sgl_xorfromintp1(save,right,/*to*/right); Sgl_xorfromintp1(save,left,/*to*/left); result_exponent = Sgl_exponent(left); Sgl_invert_sign(left); } /* Invariant: left is not smaller than right. */ if((right_exponent = Sgl_exponent(right)) == 0) { /* Denormalized operands. First look for zeroes */ if(Sgl_iszero_mantissa(right)) { /* right is zero */ if(Sgl_iszero_exponentmantissa(left)) { /* Both operands are zeros */ Sgl_invert_sign(right); if(Is_rounding_mode(ROUNDMINUS)) { Sgl_or_signs(left,/*with*/right); } else { Sgl_and_signs(left,/*with*/right); } } else { /* Left is not a zero and must be the result. Trapped * underflows are signaled if left is denormalized. Result * is always exact. */ if( (result_exponent == 0) && Is_underflowtrap_enabled() ) { /* need to normalize results mantissa */ sign_save = Sgl_signextendedsign(left); Sgl_leftshiftby1(left); Sgl_normalize(left,result_exponent); Sgl_set_sign(left,/*using*/sign_save); Sgl_setwrapped_exponent(left,result_exponent,unfl); *dstptr = left; /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } } *dstptr = left; return(NOEXCEPTION); } /* Neither are zeroes */ Sgl_clear_sign(right); /* Exponent is already cleared */ if(result_exponent == 0 ) { /* Both operands are denormalized. The result must be exact * and is simply calculated. A sum could become normalized and a * difference could cancel to a true zero. */ if( (/*signed*/int) save >= 0 ) { Sgl_subtract(left,/*minus*/right,/*into*/result); if(Sgl_iszero_mantissa(result)) { if(Is_rounding_mode(ROUNDMINUS)) { Sgl_setone_sign(result); } else { Sgl_setzero_sign(result); } *dstptr = result; return(NOEXCEPTION); } } else { Sgl_addition(left,right,/*into*/result); if(Sgl_isone_hidden(result)) { *dstptr = result; return(NOEXCEPTION); } } if(Is_underflowtrap_enabled()) { /* need to normalize result */ sign_save = Sgl_signextendedsign(result); Sgl_leftshiftby1(result); Sgl_normalize(result,result_exponent); Sgl_set_sign(result,/*using*/sign_save); Sgl_setwrapped_exponent(result,result_exponent,unfl); *dstptr = result; /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } *dstptr = result; return(NOEXCEPTION); } right_exponent = 1; /* Set exponent to reflect different bias * with denomalized numbers. */ } else { Sgl_clear_signexponent_set_hidden(right); } Sgl_clear_exponent_set_hidden(left); diff_exponent = result_exponent - right_exponent; /* * Special case alignment of operands that would force alignment * beyond the extent of the extension. A further optimization * could special case this but only reduces the path length for this * infrequent case. */ if(diff_exponent > SGL_THRESHOLD) { diff_exponent = SGL_THRESHOLD; } /* Align right operand by shifting to right */ Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent, /*and lower to*/extent); /* Treat sum and difference of the operands separately. */ if( (/*signed*/int) save >= 0 ) { /* * Difference of the two operands. Their can be no overflow. A * borrow can occur out of the hidden bit and force a post * normalization phase. */ Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result); if(Sgl_iszero_hidden(result)) { /* Handle normalization */ /* A straightforward algorithm would now shift the result * and extension left until the hidden bit becomes one. Not * all of the extension bits need participate in the shift. * Only the two most significant bits (round and guard) are * needed. If only a single shift is needed then the guard * bit becomes a significant low order bit and the extension * must participate in the rounding. If more than a single * shift is needed, then all bits to the right of the guard * bit are zeros, and the guard bit may or may not be zero. */ sign_save = Sgl_signextendedsign(result); Sgl_leftshiftby1_withextent(result,extent,result); /* Need to check for a zero result. The sign and exponent * fields have already been zeroed. The more efficient test * of the full object can be used. */ if(Sgl_iszero(result)) /* Must have been "x-x" or "x+(-x)". */ { if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result); *dstptr = result; return(NOEXCEPTION); } result_exponent--; /* Look to see if normalization is finished. */ if(Sgl_isone_hidden(result)) { if(result_exponent==0) { /* Denormalized, exponent should be zero. Left operand * * was normalized, so extent (guard, round) was zero */ goto underflow; } else { /* No further normalization is needed. */ Sgl_set_sign(result,/*using*/sign_save); Ext_leftshiftby1(extent); goto round; } } /* Check for denormalized, exponent should be zero. Left * * operand was normalized, so extent (guard, round) was zero */ if(!(underflowtrap = Is_underflowtrap_enabled()) && result_exponent==0) goto underflow; /* Shift extension to complete one bit of normalization and * update exponent. */ Ext_leftshiftby1(extent); /* Discover first one bit to determine shift amount. Use a * modified binary search. We have already shifted the result * one position right and still not found a one so the remainder * of the extension must be zero and simplifies rounding. */ /* Scan bytes */ while(Sgl_iszero_hiddenhigh7mantissa(result)) { Sgl_leftshiftby8(result); if((result_exponent -= 8) <= 0 && !underflowtrap) goto underflow; } /* Now narrow it down to the nibble */ if(Sgl_iszero_hiddenhigh3mantissa(result)) { /* The lower nibble contains the normalizing one */ Sgl_leftshiftby4(result); if((result_exponent -= 4) <= 0 && !underflowtrap) goto underflow; } /* Select case were first bit is set (already normalized) * otherwise select the proper shift. */ if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7) { /* Already normalized */ if(result_exponent <= 0) goto underflow; Sgl_set_sign(result,/*using*/sign_save); Sgl_set_exponent(result,/*using*/result_exponent); *dstptr = result; return(NOEXCEPTION); } Sgl_sethigh4bits(result,/*using*/sign_save); switch(jumpsize) { case 1: { Sgl_leftshiftby3(result); result_exponent -= 3; break; } case 2: case 3: { Sgl_leftshiftby2(result); result_exponent -= 2; break; } case 4: case 5: case 6: case 7: { Sgl_leftshiftby1(result); result_exponent -= 1; break; } } if(result_exponent > 0) { Sgl_set_exponent(result,/*using*/result_exponent); *dstptr = result; /* Sign bit is already set */ return(NOEXCEPTION); } /* Fixup potential underflows */ underflow: if(Is_underflowtrap_enabled()) { Sgl_set_sign(result,sign_save); Sgl_setwrapped_exponent(result,result_exponent,unfl); *dstptr = result; /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } /* * Since we cannot get an inexact denormalized result, * we can now return. */ Sgl_right_align(result,/*by*/(1-result_exponent),extent); Sgl_clear_signexponent(result); Sgl_set_sign(result,sign_save); *dstptr = result; return(NOEXCEPTION); } /* end if(hidden...)... */ /* Fall through and round */ } /* end if(save >= 0)... */ else { /* Add magnitudes */ Sgl_addition(left,right,/*to*/result); if(Sgl_isone_hiddenoverflow(result)) { /* Prenormalization required. */ Sgl_rightshiftby1_withextent(result,extent,extent); Sgl_arithrightshiftby1(result); result_exponent++; } /* end if hiddenoverflow... */ } /* end else ...sub magnitudes... */ /* Round the result. If the extension is all zeros,then the result is * exact. Otherwise round in the correct direction. No underflow is * possible. If a postnormalization is necessary, then the mantissa is * all zeros so no shift is needed. */ round: if(Ext_isnotzero(extent)) { inexact = TRUE; switch(Rounding_mode()) { case ROUNDNEAREST: /* The default. */ if(Ext_isone_sign(extent)) { /* at least 1/2 ulp */ if(Ext_isnotzero_lower(extent) || Sgl_isone_lowmantissa(result)) { /* either exactly half way and odd or more than 1/2ulp */ Sgl_increment(result); } } break; case ROUNDPLUS: if(Sgl_iszero_sign(result)) { /* Round up positive results */ Sgl_increment(result); } break; case ROUNDMINUS: if(Sgl_isone_sign(result)) { /* Round down negative results */ Sgl_increment(result); } case ROUNDZERO:; /* truncate is simple */ } /* end switch... */ if(Sgl_isone_hiddenoverflow(result)) result_exponent++; } if(result_exponent == SGL_INFINITY_EXPONENT) { /* Overflow */ if(Is_overflowtrap_enabled()) { Sgl_setwrapped_exponent(result,result_exponent,ovfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } else { Set_overflowflag(); inexact = TRUE; Sgl_setoverflow(result); } } else Sgl_set_exponent(result,result_exponent); *dstptr = result; if(inexact) if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); return(NOEXCEPTION); }
gpl-2.0
durandj/devkitadv
newlib-1.11.0/newlib/libm/math/k_cos.c
172
3079
/* @(#)k_cos.c 5.1 93/09/24 */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ /* * __kernel_cos( x, y ) * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164 * Input x is assumed to be bounded by ~pi/4 in magnitude. * Input y is the tail of x. * * Algorithm * 1. Since cos(-x) = cos(x), we need only to consider positive x. * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0. * 3. cos(x) is approximated by a polynomial of degree 14 on * [0,pi/4] * 4 14 * cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x * where the remez error is * * | 2 4 6 8 10 12 14 | -58 * |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2 * | | * * 4 6 8 10 12 14 * 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then * cos(x) = 1 - x*x/2 + r * since cos(x+y) ~ cos(x) - sin(x)*y * ~ cos(x) - x*y, * a correction term is necessary in cos(x) and hence * cos(x+y) = 1 - (x*x/2 - (r - x*y)) * For better accuracy when x > 0.3, let qx = |x|/4 with * the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125. * Then * cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)). * Note that 1-qx and (x*x/2-qx) is EXACT here, and the * magnitude of the latter is at least a quarter of x*x/2, * thus, reducing the rounding error in the subtraction. */ #include "fdlibm.h" #ifndef _DOUBLE_IS_32BITS #ifdef __STDC__ static const double #else static double #endif one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */ C1 = 4.16666666666666019037e-02, /* 0x3FA55555, 0x5555554C */ C2 = -1.38888888888741095749e-03, /* 0xBF56C16C, 0x16C15177 */ C3 = 2.48015872894767294178e-05, /* 0x3EFA01A0, 0x19CB1590 */ C4 = -2.75573143513906633035e-07, /* 0xBE927E4F, 0x809C52AD */ C5 = 2.08757232129817482790e-09, /* 0x3E21EE9E, 0xBDB4B1C4 */ C6 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */ #ifdef __STDC__ double __kernel_cos(double x, double y) #else double __kernel_cos(x, y) double x,y; #endif { double a,hz,z,r,qx; __int32_t ix; GET_HIGH_WORD(ix,x); ix &= 0x7fffffff; /* ix = |x|'s high word*/ if(ix<0x3e400000) { /* if x < 2**27 */ if(((int)x)==0) return one; /* generate inexact */ } z = x*x; r = z*(C1+z*(C2+z*(C3+z*(C4+z*(C5+z*C6))))); if(ix < 0x3FD33333) /* if |x| < 0.3 */ return one - (0.5*z - (z*r - x*y)); else { if(ix > 0x3fe90000) { /* x > 0.78125 */ qx = 0.28125; } else { INSERT_WORDS(qx,ix-0x00200000,0); /* x/4 */ } hz = 0.5*z-qx; a = one-qx; return a - (hz - (z*r-x*y)); } } #endif /* defined(_DOUBLE_IS_32BITS) */
gpl-2.0
coolbho3k/galaxysii_oc
fs/cifs/file.c
428
65070
/* * fs/cifs/file.c * * vfs operations that deal with files * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French (sfrench@us.ibm.com) * Jeremy Allison (jra@samba.org) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/backing-dev.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/writeback.h> #include <linux/task_io_accounting_ops.h> #include <linux/delay.h> #include <linux/mount.h> #include <linux/slab.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" static inline int cifs_convert_flags(unsigned int flags) { if ((flags & O_ACCMODE) == O_RDONLY) return GENERIC_READ; else if ((flags & O_ACCMODE) == O_WRONLY) return GENERIC_WRITE; else if ((flags & O_ACCMODE) == O_RDWR) { /* GENERIC_ALL is too much permission to request can cause unnecessary access denied on create */ /* return GENERIC_ALL; */ return (GENERIC_READ | GENERIC_WRITE); } return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | FILE_READ_DATA); } static inline fmode_t cifs_posix_convert_flags(unsigned int flags) { fmode_t posix_flags = 0; if ((flags & O_ACCMODE) == O_RDONLY) posix_flags = FMODE_READ; else if ((flags & O_ACCMODE) == O_WRONLY) posix_flags = FMODE_WRITE; else if ((flags & O_ACCMODE) == O_RDWR) { /* GENERIC_ALL is too much permission to request can cause unnecessary access denied on create */ /* return GENERIC_ALL; */ posix_flags = FMODE_READ | FMODE_WRITE; } /* can not map O_CREAT or O_EXCL or O_TRUNC flags when reopening a file. They had their effect on the original open */ if (flags & O_APPEND) posix_flags |= (fmode_t)O_APPEND; if (flags & O_DSYNC) posix_flags |= (fmode_t)O_DSYNC; if (flags & __O_SYNC) posix_flags |= (fmode_t)__O_SYNC; if (flags & O_DIRECTORY) posix_flags |= (fmode_t)O_DIRECTORY; if (flags & O_NOFOLLOW) posix_flags |= (fmode_t)O_NOFOLLOW; if (flags & O_DIRECT) posix_flags |= (fmode_t)O_DIRECT; return posix_flags; } static inline int cifs_get_disposition(unsigned int flags) { if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) return FILE_CREATE; else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) return FILE_OVERWRITE_IF; else if ((flags & O_CREAT) == O_CREAT) return FILE_OPEN_IF; else if ((flags & O_TRUNC) == O_TRUNC) return FILE_OVERWRITE; else return FILE_OPEN; } /* all arguments to this function must be checked for validity in caller */ static inline int cifs_posix_open_inode_helper(struct inode *inode, struct file *file, struct cifsInodeInfo *pCifsInode, __u32 oplock, u16 netfid) { write_lock(&GlobalSMBSeslock); pCifsInode = CIFS_I(file->f_path.dentry->d_inode); if (pCifsInode == NULL) { write_unlock(&GlobalSMBSeslock); return -EINVAL; } if (pCifsInode->clientCanCacheRead) { /* we have the inode open somewhere else no need to discard cache data */ goto psx_client_can_cache; } /* BB FIXME need to fix this check to move it earlier into posix_open BB fIX following section BB FIXME */ /* if not oplocked, invalidate inode pages if mtime or file size changed */ /* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime)); if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) && (file->f_path.dentry->d_inode->i_size == (loff_t)le64_to_cpu(buf->EndOfFile))) { cFYI(1, "inode unchanged on server"); } else { if (file->f_path.dentry->d_inode->i_mapping) { rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping); if (rc != 0) CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; } cFYI(1, "invalidating remote inode since open detected it " "changed"); invalidate_remote_inode(file->f_path.dentry->d_inode); } */ psx_client_can_cache: if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { pCifsInode->clientCanCacheAll = true; pCifsInode->clientCanCacheRead = true; cFYI(1, "Exclusive Oplock granted on inode %p", file->f_path.dentry->d_inode); } else if ((oplock & 0xF) == OPLOCK_READ) pCifsInode->clientCanCacheRead = true; /* will have to change the unlock if we reenable the filemap_fdatawrite (which does not seem necessary */ write_unlock(&GlobalSMBSeslock); return 0; } /* all arguments to this function must be checked for validity in caller */ static inline int cifs_open_inode_helper(struct inode *inode, struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf, char *full_path, int xid) { struct cifsInodeInfo *pCifsInode = CIFS_I(inode); struct timespec temp; int rc; if (pCifsInode->clientCanCacheRead) { /* we have the inode open somewhere else no need to discard cache data */ goto client_can_cache; } /* BB need same check in cifs_create too? */ /* if not oplocked, invalidate inode pages if mtime or file size changed */ temp = cifs_NTtimeToUnix(buf->LastWriteTime); if (timespec_equal(&inode->i_mtime, &temp) && (inode->i_size == (loff_t)le64_to_cpu(buf->EndOfFile))) { cFYI(1, "inode unchanged on server"); } else { if (inode->i_mapping) { /* BB no need to lock inode until after invalidate since namei code should already have it locked? */ rc = filemap_write_and_wait(inode->i_mapping); if (rc != 0) pCifsInode->write_behind_rc = rc; } cFYI(1, "invalidating remote inode since open detected it " "changed"); invalidate_remote_inode(inode); } client_can_cache: if (pTcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, xid); else rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, xid, NULL); if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) { pCifsInode->clientCanCacheAll = true; pCifsInode->clientCanCacheRead = true; cFYI(1, "Exclusive Oplock granted on inode %p", inode); } else if ((*oplock & 0xF) == OPLOCK_READ) pCifsInode->clientCanCacheRead = true; return rc; } int cifs_open(struct inode *inode, struct file *file) { int rc = -EACCES; int xid; __u32 oplock; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *tcon; struct cifsFileInfo *pCifsFile = NULL; struct cifsInodeInfo *pCifsInode; char *full_path = NULL; int desiredAccess; int disposition; __u16 netfid; FILE_ALL_INFO *buf = NULL; xid = GetXid(); cifs_sb = CIFS_SB(inode->i_sb); tcon = cifs_sb->tcon; pCifsInode = CIFS_I(file->f_path.dentry->d_inode); full_path = build_path_from_dentry(file->f_path.dentry); if (full_path == NULL) { rc = -ENOMEM; FreeXid(xid); return rc; } cFYI(1, "inode = 0x%p file flags are 0x%x for %s", inode, file->f_flags, full_path); if (oplockEnabled) oplock = REQ_OPLOCK; else oplock = 0; if (!tcon->broken_posix_open && tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { int oflags = (int) cifs_posix_convert_flags(file->f_flags); oflags |= SMB_O_CREAT; /* can not refresh inode info since size could be stale */ rc = cifs_posix_open(full_path, &inode, inode->i_sb, cifs_sb->mnt_file_mode /* ignored */, oflags, &oplock, &netfid, xid); if (rc == 0) { cFYI(1, "posix open succeeded"); /* no need for special case handling of setting mode on read only files needed here */ rc = cifs_posix_open_inode_helper(inode, file, pCifsInode, oplock, netfid); if (rc != 0) { CIFSSMBClose(xid, tcon, netfid); goto out; } pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, oflags); if (pCifsFile == NULL) { CIFSSMBClose(xid, tcon, netfid); rc = -ENOMEM; } goto out; } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { if (tcon->ses->serverNOS) cERROR(1, "server %s of type %s returned" " unexpected error on SMB posix open" ", disabling posix open support." " Check if server update available.", tcon->ses->serverName, tcon->ses->serverNOS); tcon->broken_posix_open = true; } else if ((rc != -EIO) && (rc != -EREMOTE) && (rc != -EOPNOTSUPP)) /* path not found or net err */ goto out; /* else fallthrough to retry open the old way on network i/o or DFS errors */ } desiredAccess = cifs_convert_flags(file->f_flags); /********************************************************************* * open flag mapping table: * * POSIX Flag CIFS Disposition * ---------- ---------------- * O_CREAT FILE_OPEN_IF * O_CREAT | O_EXCL FILE_CREATE * O_CREAT | O_TRUNC FILE_OVERWRITE_IF * O_TRUNC FILE_OVERWRITE * none of the above FILE_OPEN * * Note that there is not a direct match between disposition * FILE_SUPERSEDE (ie create whether or not file exists although * O_CREAT | O_TRUNC is similar but truncates the existing * file rather than creating a new file as FILE_SUPERSEDE does * (which uses the attributes / metadata passed in on open call) *? *? O_SYNC is a reasonable match to CIFS writethrough flag *? and the read write flags match reasonably. O_LARGEFILE *? is irrelevant because largefile support is always used *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation *********************************************************************/ disposition = cifs_get_disposition(file->f_flags); /* BB pass O_SYNC flag through on file attributes .. BB */ /* Also refresh inode by passing in file_info buf returned by SMBOpen and calling get_inode_info with returned buf (at least helps non-Unix server case) */ /* BB we can not do this if this is the second open of a file and the first handle has writebehind data, we might be able to simply do a filemap_fdatawrite/filemap_fdatawait first */ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (!buf) { rc = -ENOMEM; goto out; } if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS) rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); else rc = -EIO; /* no NT SMB support fall into legacy open below */ if (rc == -EIO) { /* Old server, try legacy style OpenX */ rc = SMBLegacyOpen(xid, tcon, full_path, disposition, desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } if (rc) { cFYI(1, "cifs_open returned 0x%x", rc); goto out; } rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid); if (rc != 0) goto out; pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, file->f_flags); if (pCifsFile == NULL) { rc = -ENOMEM; goto out; } if (oplock & CIFS_CREATE_ACTION) { /* time to set mode which we can not set earlier due to problems creating new read-only files */ if (tcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = inode->i_mode, .uid = NO_CHANGE_64, .gid = NO_CHANGE_64, .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } } out: kfree(buf); kfree(full_path); FreeXid(xid); return rc; } /* Try to reacquire byte range locks that were released when session */ /* to server was lost */ static int cifs_relock_file(struct cifsFileInfo *cifsFile) { int rc = 0; /* BB list all locks open on this file and relock */ return rc; } static int cifs_reopen_file(struct file *file, bool can_flush) { int rc = -EACCES; int xid; __u32 oplock; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *tcon; struct cifsFileInfo *pCifsFile; struct cifsInodeInfo *pCifsInode; struct inode *inode; char *full_path = NULL; int desiredAccess; int disposition = FILE_OPEN; __u16 netfid; if (file->private_data) pCifsFile = (struct cifsFileInfo *)file->private_data; else return -EBADF; xid = GetXid(); mutex_lock(&pCifsFile->fh_mutex); if (!pCifsFile->invalidHandle) { mutex_unlock(&pCifsFile->fh_mutex); rc = 0; FreeXid(xid); return rc; } if (file->f_path.dentry == NULL) { cERROR(1, "no valid name if dentry freed"); dump_stack(); rc = -EBADF; goto reopen_error_exit; } inode = file->f_path.dentry->d_inode; if (inode == NULL) { cERROR(1, "inode not valid"); dump_stack(); rc = -EBADF; goto reopen_error_exit; } cifs_sb = CIFS_SB(inode->i_sb); tcon = cifs_sb->tcon; /* can not grab rename sem here because various ops, including those that already have the rename sem can end up causing writepage to get called and if the server was down that means we end up here, and we can never tell if the caller already has the rename_sem */ full_path = build_path_from_dentry(file->f_path.dentry); if (full_path == NULL) { rc = -ENOMEM; reopen_error_exit: mutex_unlock(&pCifsFile->fh_mutex); FreeXid(xid); return rc; } cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, file->f_flags, full_path); if (oplockEnabled) oplock = REQ_OPLOCK; else oplock = 0; if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { int oflags = (int) cifs_posix_convert_flags(file->f_flags); /* can not refresh inode info since size could be stale */ rc = cifs_posix_open(full_path, NULL, inode->i_sb, cifs_sb->mnt_file_mode /* ignored */, oflags, &oplock, &netfid, xid); if (rc == 0) { cFYI(1, "posix reopen succeeded"); goto reopen_success; } /* fallthrough to retry open the old way on errors, especially in the reconnect path it is important to retry hard */ } desiredAccess = cifs_convert_flags(file->f_flags); /* Can not refresh inode by passing in file_info buf to be returned by SMBOpen and then calling get_inode_info with returned buf since file might have write behind data that needs to be flushed and server version of file size can be stale. If we knew for sure that inode was not dirty locally we could do this */ rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, NULL, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc) { mutex_unlock(&pCifsFile->fh_mutex); cFYI(1, "cifs_open returned 0x%x", rc); cFYI(1, "oplock: %d", oplock); } else { reopen_success: pCifsFile->netfid = netfid; pCifsFile->invalidHandle = false; mutex_unlock(&pCifsFile->fh_mutex); pCifsInode = CIFS_I(inode); if (pCifsInode) { if (can_flush) { rc = filemap_write_and_wait(inode->i_mapping); if (rc != 0) CIFS_I(inode)->write_behind_rc = rc; /* temporarily disable caching while we go to server to get inode info */ pCifsInode->clientCanCacheAll = false; pCifsInode->clientCanCacheRead = false; if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, xid); else rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb, xid, NULL); } /* else we are writing out data to server already and could deadlock if we tried to flush data, and since we do not know if we have data that would invalidate the current end of file on the server we can not go to the server to get the new inod info */ if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { pCifsInode->clientCanCacheAll = true; pCifsInode->clientCanCacheRead = true; cFYI(1, "Exclusive Oplock granted on inode %p", file->f_path.dentry->d_inode); } else if ((oplock & 0xF) == OPLOCK_READ) { pCifsInode->clientCanCacheRead = true; pCifsInode->clientCanCacheAll = false; } else { pCifsInode->clientCanCacheRead = false; pCifsInode->clientCanCacheAll = false; } cifs_relock_file(pCifsFile); } } kfree(full_path); FreeXid(xid); return rc; } int cifs_close(struct inode *inode, struct file *file) { int rc = 0; int xid, timeout; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; struct cifsFileInfo *pSMBFile = (struct cifsFileInfo *)file->private_data; xid = GetXid(); cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; if (pSMBFile) { struct cifsLockInfo *li, *tmp; write_lock(&GlobalSMBSeslock); pSMBFile->closePend = true; if (pTcon) { /* no sense reconnecting to close a file that is already closed */ if (!pTcon->need_reconnect) { write_unlock(&GlobalSMBSeslock); timeout = 2; while ((atomic_read(&pSMBFile->count) != 1) && (timeout <= 2048)) { /* Give write a better chance to get to server ahead of the close. We do not want to add a wait_q here as it would increase the memory utilization as the struct would be in each open file, but this should give enough time to clear the socket */ cFYI(DBG2, "close delay, write pending"); msleep(timeout); timeout *= 4; } if (!pTcon->need_reconnect && !pSMBFile->invalidHandle) rc = CIFSSMBClose(xid, pTcon, pSMBFile->netfid); } else write_unlock(&GlobalSMBSeslock); } else write_unlock(&GlobalSMBSeslock); /* Delete any outstanding lock records. We'll lose them when the file is closed anyway. */ mutex_lock(&pSMBFile->lock_mutex); list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) { list_del(&li->llist); kfree(li); } mutex_unlock(&pSMBFile->lock_mutex); write_lock(&GlobalSMBSeslock); list_del(&pSMBFile->flist); list_del(&pSMBFile->tlist); write_unlock(&GlobalSMBSeslock); cifsFileInfo_put(file->private_data); file->private_data = NULL; } else rc = -EBADF; read_lock(&GlobalSMBSeslock); if (list_empty(&(CIFS_I(inode)->openFileList))) { cFYI(1, "closing last open instance for inode %p", inode); /* if the file is not open we do not know if we can cache info on this inode, much less write behind and read ahead */ CIFS_I(inode)->clientCanCacheRead = false; CIFS_I(inode)->clientCanCacheAll = false; } read_unlock(&GlobalSMBSeslock); if ((rc == 0) && CIFS_I(inode)->write_behind_rc) rc = CIFS_I(inode)->write_behind_rc; FreeXid(xid); return rc; } int cifs_closedir(struct inode *inode, struct file *file) { int rc = 0; int xid; struct cifsFileInfo *pCFileStruct = (struct cifsFileInfo *)file->private_data; char *ptmp; cFYI(1, "Closedir inode = 0x%p", inode); xid = GetXid(); if (pCFileStruct) { struct cifsTconInfo *pTcon; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); pTcon = cifs_sb->tcon; cFYI(1, "Freeing private data in close dir"); write_lock(&GlobalSMBSeslock); if (!pCFileStruct->srch_inf.endOfSearch && !pCFileStruct->invalidHandle) { pCFileStruct->invalidHandle = true; write_unlock(&GlobalSMBSeslock); rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); cFYI(1, "Closing uncompleted readdir with rc %d", rc); /* not much we can do if it fails anyway, ignore rc */ rc = 0; } else write_unlock(&GlobalSMBSeslock); ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; if (ptmp) { cFYI(1, "closedir free smb buf in srch struct"); pCFileStruct->srch_inf.ntwrk_buf_start = NULL; if (pCFileStruct->srch_inf.smallBuf) cifs_small_buf_release(ptmp); else cifs_buf_release(ptmp); } kfree(file->private_data); file->private_data = NULL; } /* BB can we lock the filestruct while this is going on? */ FreeXid(xid); return rc; } static int store_file_lock(struct cifsFileInfo *fid, __u64 len, __u64 offset, __u8 lockType) { struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); if (li == NULL) return -ENOMEM; li->offset = offset; li->length = len; li->type = lockType; mutex_lock(&fid->lock_mutex); list_add(&li->llist, &fid->llist); mutex_unlock(&fid->lock_mutex); return 0; } int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) { int rc, xid; __u32 numLock = 0; __u32 numUnlock = 0; __u64 length; bool wait_flag = false; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *tcon; __u16 netfid; __u8 lockType = LOCKING_ANDX_LARGE_FILES; bool posix_locking = 0; length = 1 + pfLock->fl_end - pfLock->fl_start; rc = -EACCES; xid = GetXid(); cFYI(1, "Lock parm: 0x%x flockflags: " "0x%x flocktype: 0x%x start: %lld end: %lld", cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start, pfLock->fl_end); if (pfLock->fl_flags & FL_POSIX) cFYI(1, "Posix"); if (pfLock->fl_flags & FL_FLOCK) cFYI(1, "Flock"); if (pfLock->fl_flags & FL_SLEEP) { cFYI(1, "Blocking lock"); wait_flag = true; } if (pfLock->fl_flags & FL_ACCESS) cFYI(1, "Process suspended by mandatory locking - " "not implemented yet"); if (pfLock->fl_flags & FL_LEASE) cFYI(1, "Lease on file - not implemented yet"); if (pfLock->fl_flags & (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE))) cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags); if (pfLock->fl_type == F_WRLCK) { cFYI(1, "F_WRLCK "); numLock = 1; } else if (pfLock->fl_type == F_UNLCK) { cFYI(1, "F_UNLCK"); numUnlock = 1; /* Check if unlock includes more than one lock range */ } else if (pfLock->fl_type == F_RDLCK) { cFYI(1, "F_RDLCK"); lockType |= LOCKING_ANDX_SHARED_LOCK; numLock = 1; } else if (pfLock->fl_type == F_EXLCK) { cFYI(1, "F_EXLCK"); numLock = 1; } else if (pfLock->fl_type == F_SHLCK) { cFYI(1, "F_SHLCK"); lockType |= LOCKING_ANDX_SHARED_LOCK; numLock = 1; } else cFYI(1, "Unknown type of lock"); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); tcon = cifs_sb->tcon; if (file->private_data == NULL) { rc = -EBADF; FreeXid(xid); return rc; } netfid = ((struct cifsFileInfo *)file->private_data)->netfid; if ((tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) posix_locking = 1; /* BB add code here to normalize offset and length to account for negative length which we can not accept over the wire */ if (IS_GETLK(cmd)) { if (posix_locking) { int posix_lock_type; if (lockType & LOCKING_ANDX_SHARED_LOCK) posix_lock_type = CIFS_RDLCK; else posix_lock_type = CIFS_WRLCK; rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */, length, pfLock, posix_lock_type, wait_flag); FreeXid(xid); return rc; } /* BB we could chain these into one lock request BB */ rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, 1, lockType, 0 /* wait flag */ ); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1 /* numUnlock */ , 0 /* numLock */ , lockType, 0 /* wait flag */ ); pfLock->fl_type = F_UNLCK; if (rc != 0) cERROR(1, "Error unlocking previously locked " "range %d during test of lock", rc); rc = 0; } else { /* if rc == ERR_SHARING_VIOLATION ? */ rc = 0; if (lockType & LOCKING_ANDX_SHARED_LOCK) { pfLock->fl_type = F_WRLCK; } else { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, 1, lockType | LOCKING_ANDX_SHARED_LOCK, 0 /* wait flag */); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1, 0, lockType | LOCKING_ANDX_SHARED_LOCK, 0 /* wait flag */); pfLock->fl_type = F_RDLCK; if (rc != 0) cERROR(1, "Error unlocking " "previously locked range %d " "during test of lock", rc); rc = 0; } else { pfLock->fl_type = F_WRLCK; rc = 0; } } } FreeXid(xid); return rc; } if (!numLock && !numUnlock) { /* if no lock or unlock then nothing to do since we do not know what it is */ FreeXid(xid); return -EOPNOTSUPP; } if (posix_locking) { int posix_lock_type; if (lockType & LOCKING_ANDX_SHARED_LOCK) posix_lock_type = CIFS_RDLCK; else posix_lock_type = CIFS_WRLCK; if (numUnlock == 1) posix_lock_type = CIFS_UNLCK; rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, length, pfLock, posix_lock_type, wait_flag); } else { struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data; if (numLock) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, numLock, lockType, wait_flag); if (rc == 0) { /* For Windows locks we must store them. */ rc = store_file_lock(fid, length, pfLock->fl_start, lockType); } } else if (numUnlock) { /* For each stored lock that this unlock overlaps completely, unlock it. */ int stored_rc = 0; struct cifsLockInfo *li, *tmp; rc = 0; mutex_lock(&fid->lock_mutex); list_for_each_entry_safe(li, tmp, &fid->llist, llist) { if (pfLock->fl_start <= li->offset && (pfLock->fl_start + length) >= (li->offset + li->length)) { stored_rc = CIFSSMBLock(xid, tcon, netfid, li->length, li->offset, 1, 0, li->type, false); if (stored_rc) rc = stored_rc; else { list_del(&li->llist); kfree(li); } } } mutex_unlock(&fid->lock_mutex); } } if (pfLock->fl_flags & FL_POSIX) posix_lock_file_wait(file, pfLock); FreeXid(xid); return rc; } /* * Set the timeout on write requests past EOF. For some servers (Windows) * these calls can be very long. * * If we're writing >10M past the EOF we give a 180s timeout. Anything less * than that gets a 45s timeout. Writes not past EOF get 15s timeouts. * The 10M cutoff is totally arbitrary. A better scheme for this would be * welcome if someone wants to suggest one. * * We may be able to do a better job with this if there were some way to * declare that a file should be sparse. */ static int cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset) { if (offset <= cifsi->server_eof) return CIFS_STD_OP; else if (offset > (cifsi->server_eof + (10 * 1024 * 1024))) return CIFS_VLONG_OP; else return CIFS_LONG_OP; } /* update the file size (if needed) after a write */ static void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, unsigned int bytes_written) { loff_t end_of_write = offset + bytes_written; if (end_of_write > cifsi->server_eof) cifsi->server_eof = end_of_write; } ssize_t cifs_user_write(struct file *file, const char __user *write_data, size_t write_size, loff_t *poffset) { int rc = 0; unsigned int bytes_written = 0; unsigned int total_written; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; int xid, long_op; struct cifsFileInfo *open_file; struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); pTcon = cifs_sb->tcon; /* cFYI(1, " write %d bytes to offset %lld of %s", write_size, *poffset, file->f_path.dentry->d_name.name); */ if (file->private_data == NULL) return -EBADF; open_file = (struct cifsFileInfo *) file->private_data; rc = generic_write_checks(file, poffset, &write_size, 0); if (rc) return rc; xid = GetXid(); long_op = cifs_write_timeout(cifsi, *poffset); for (total_written = 0; write_size > total_written; total_written += bytes_written) { rc = -EAGAIN; while (rc == -EAGAIN) { if (file->private_data == NULL) { /* file has been closed on us */ FreeXid(xid); /* if we have gotten here we have written some data and blocked, and the file has been freed on us while we blocked so return what we managed to write */ return total_written; } if (open_file->closePend) { FreeXid(xid); if (total_written) return total_written; else return -EBADF; } if (open_file->invalidHandle) { /* we could deadlock if we called filemap_fdatawait from here so tell reopen_file not to flush data to server now */ rc = cifs_reopen_file(file, false); if (rc != 0) break; } rc = CIFSSMBWrite(xid, pTcon, open_file->netfid, min_t(const int, cifs_sb->wsize, write_size - total_written), *poffset, &bytes_written, NULL, write_data + total_written, long_op); } if (rc || (bytes_written == 0)) { if (total_written) break; else { FreeXid(xid); return rc; } } else { cifs_update_eof(cifsi, *poffset, bytes_written); *poffset += bytes_written; } long_op = CIFS_STD_OP; /* subsequent writes fast - 15 seconds is plenty */ } cifs_stats_bytes_written(pTcon, total_written); /* since the write may have blocked check these pointers again */ if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { struct inode *inode = file->f_path.dentry->d_inode; /* Do not update local mtime - server will set its actual value on write * inode->i_ctime = inode->i_mtime = * current_fs_time(inode->i_sb);*/ if (total_written > 0) { spin_lock(&inode->i_lock); if (*poffset > file->f_path.dentry->d_inode->i_size) i_size_write(file->f_path.dentry->d_inode, *poffset); spin_unlock(&inode->i_lock); } mark_inode_dirty_sync(file->f_path.dentry->d_inode); } FreeXid(xid); return total_written; } static ssize_t cifs_write(struct file *file, const char *write_data, size_t write_size, loff_t *poffset) { int rc = 0; unsigned int bytes_written = 0; unsigned int total_written; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; int xid, long_op; struct cifsFileInfo *open_file; struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); pTcon = cifs_sb->tcon; cFYI(1, "write %zd bytes to offset %lld of %s", write_size, *poffset, file->f_path.dentry->d_name.name); if (file->private_data == NULL) return -EBADF; open_file = (struct cifsFileInfo *)file->private_data; xid = GetXid(); long_op = cifs_write_timeout(cifsi, *poffset); for (total_written = 0; write_size > total_written; total_written += bytes_written) { rc = -EAGAIN; while (rc == -EAGAIN) { if (file->private_data == NULL) { /* file has been closed on us */ FreeXid(xid); /* if we have gotten here we have written some data and blocked, and the file has been freed on us while we blocked so return what we managed to write */ return total_written; } if (open_file->closePend) { FreeXid(xid); if (total_written) return total_written; else return -EBADF; } if (open_file->invalidHandle) { /* we could deadlock if we called filemap_fdatawait from here so tell reopen_file not to flush data to server now */ rc = cifs_reopen_file(file, false); if (rc != 0) break; } if (experimEnabled || (pTcon->ses->server && ((pTcon->ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0))) { struct kvec iov[2]; unsigned int len; len = min((size_t)cifs_sb->wsize, write_size - total_written); /* iov[0] is reserved for smb header */ iov[1].iov_base = (char *)write_data + total_written; iov[1].iov_len = len; rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len, *poffset, &bytes_written, iov, 1, long_op); } else rc = CIFSSMBWrite(xid, pTcon, open_file->netfid, min_t(const int, cifs_sb->wsize, write_size - total_written), *poffset, &bytes_written, write_data + total_written, NULL, long_op); } if (rc || (bytes_written == 0)) { if (total_written) break; else { FreeXid(xid); return rc; } } else { cifs_update_eof(cifsi, *poffset, bytes_written); *poffset += bytes_written; } long_op = CIFS_STD_OP; /* subsequent writes fast - 15 seconds is plenty */ } cifs_stats_bytes_written(pTcon, total_written); /* since the write may have blocked check these pointers again */ if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { /*BB We could make this contingent on superblock ATIME flag too */ /* file->f_path.dentry->d_inode->i_ctime = file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/ if (total_written > 0) { spin_lock(&file->f_path.dentry->d_inode->i_lock); if (*poffset > file->f_path.dentry->d_inode->i_size) i_size_write(file->f_path.dentry->d_inode, *poffset); spin_unlock(&file->f_path.dentry->d_inode->i_lock); } mark_inode_dirty_sync(file->f_path.dentry->d_inode); } FreeXid(xid); return total_written; } #ifdef CONFIG_CIFS_EXPERIMENTAL struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *open_file = NULL; read_lock(&GlobalSMBSeslock); /* we could simply get the first_list_entry since write-only entries are always at the end of the list but since the first entry might have a close pending, we go through the whole list */ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (open_file->closePend) continue; if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) || (open_file->pfile->f_flags & O_RDONLY))) { if (!open_file->invalidHandle) { /* found a good file */ /* lock it so it will not be closed on us */ cifsFileInfo_get(open_file); read_unlock(&GlobalSMBSeslock); return open_file; } /* else might as well continue, and look for another, or simply have the caller reopen it again rather than trying to fix this handle */ } else /* write only file */ break; /* write only files are last so must be done */ } read_unlock(&GlobalSMBSeslock); return NULL; } #endif struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *open_file; bool any_available = false; int rc; /* Having a null inode here (because mapping->host was set to zero by the VFS or MM) should not happen but we had reports of on oops (due to it being zero) during stress testcases so we need to check for it */ if (cifs_inode == NULL) { cERROR(1, "Null inode passed to cifs_writeable_file"); dump_stack(); return NULL; } read_lock(&GlobalSMBSeslock); refind_writable: list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (open_file->closePend || (!any_available && open_file->pid != current->tgid)) continue; if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) || (open_file->pfile->f_flags & O_WRONLY))) { cifsFileInfo_get(open_file); if (!open_file->invalidHandle) { /* found a good writable file */ read_unlock(&GlobalSMBSeslock); return open_file; } read_unlock(&GlobalSMBSeslock); /* Had to unlock since following call can block */ rc = cifs_reopen_file(open_file->pfile, false); if (!rc) { if (!open_file->closePend) return open_file; else { /* start over in case this was deleted */ /* since the list could be modified */ read_lock(&GlobalSMBSeslock); cifsFileInfo_put(open_file); goto refind_writable; } } /* if it fails, try another handle if possible - (we can not do this if closePending since loop could be modified - in which case we have to start at the beginning of the list again. Note that it would be bad to hold up writepages here (rather than in caller) with continuous retries */ cFYI(1, "wp failed on reopen file"); read_lock(&GlobalSMBSeslock); /* can not use this handle, no write pending on this one after all */ cifsFileInfo_put(open_file); if (open_file->closePend) /* list could have changed */ goto refind_writable; /* else we simply continue to the next entry. Thus we do not loop on reopen errors. If we can not reopen the file, for example if we reconnected to a server with another client racing to delete or lock the file we would not make progress if we restarted before the beginning of the loop here. */ } } /* couldn't find useable FH with same pid, try any available */ if (!any_available) { any_available = true; goto refind_writable; } read_unlock(&GlobalSMBSeslock); return NULL; } static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; char *write_data; int rc = -EFAULT; int bytes_written = 0; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; struct inode *inode; struct cifsFileInfo *open_file; if (!mapping || !mapping->host) return -EFAULT; inode = page->mapping->host; cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; offset += (loff_t)from; write_data = kmap(page); write_data += from; if ((to > PAGE_CACHE_SIZE) || (from > to)) { kunmap(page); return -EIO; } /* racing with truncate? */ if (offset > mapping->host->i_size) { kunmap(page); return 0; /* don't care */ } /* check to make sure that we are not extending the file */ if (mapping->host->i_size - offset < (loff_t)to) to = (unsigned)(mapping->host->i_size - offset); open_file = find_writable_file(CIFS_I(mapping->host)); if (open_file) { bytes_written = cifs_write(open_file->pfile, write_data, to-from, &offset); cifsFileInfo_put(open_file); /* Does mm or vfs already set times? */ inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); if ((bytes_written > 0) && (offset)) rc = 0; else if (bytes_written < 0) rc = bytes_written; } else { cFYI(1, "No writeable filehandles for inode"); rc = -EIO; } kunmap(page); return rc; } static int cifs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned int bytes_to_write; unsigned int bytes_written; struct cifs_sb_info *cifs_sb; int done = 0; pgoff_t end; pgoff_t index; int range_whole = 0; struct kvec *iov; int len; int n_iov = 0; pgoff_t next; int nr_pages; __u64 offset = 0; struct cifsFileInfo *open_file; struct cifsInodeInfo *cifsi = CIFS_I(mapping->host); struct page *page; struct pagevec pvec; int rc = 0; int scanned = 0; int xid, long_op; cifs_sb = CIFS_SB(mapping->host->i_sb); /* * If wsize is smaller that the page cache size, default to writing * one page at a time via cifs_writepage */ if (cifs_sb->wsize < PAGE_CACHE_SIZE) return generic_writepages(mapping, wbc); if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server)) if (cifs_sb->tcon->ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) if (!experimEnabled) return generic_writepages(mapping, wbc); iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL); if (iov == NULL) return generic_writepages(mapping, wbc); /* * BB: Is this meaningful for a non-block-device file system? * If it is, we should test it again after we do I/O */ if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; kfree(iov); return 0; } xid = GetXid(); pagevec_init(&pvec, 0); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; scanned = 1; } retry: while (!done && (index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) { int first; unsigned int i; first = -1; next = 0; n_iov = 0; bytes_to_write = 0; for (i = 0; i < nr_pages; i++) { page = pvec.pages[i]; /* * At this point we hold neither mapping->tree_lock nor * lock on the page itself: the page may be truncated or * invalidated (changing page->mapping to NULL), or even * swizzled back from swapper_space to tmpfs file * mapping */ if (first < 0) lock_page(page); else if (!trylock_page(page)) break; if (unlikely(page->mapping != mapping)) { unlock_page(page); break; } if (!wbc->range_cyclic && page->index > end) { done = 1; unlock_page(page); break; } if (next && (page->index != next)) { /* Not next consecutive page */ unlock_page(page); break; } if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); if (PageWriteback(page) || !clear_page_dirty_for_io(page)) { unlock_page(page); break; } /* * This actually clears the dirty bit in the radix tree. * See cifs_writepage() for more commentary. */ set_page_writeback(page); if (page_offset(page) >= mapping->host->i_size) { done = 1; unlock_page(page); end_page_writeback(page); break; } /* * BB can we get rid of this? pages are held by pvec */ page_cache_get(page); len = min(mapping->host->i_size - page_offset(page), (loff_t)PAGE_CACHE_SIZE); /* reserve iov[0] for the smb header */ n_iov++; iov[n_iov].iov_base = kmap(page); iov[n_iov].iov_len = len; bytes_to_write += len; if (first < 0) { first = i; offset = page_offset(page); } next = page->index + 1; if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize) break; } if (n_iov) { /* Search for a writable handle every time we call * CIFSSMBWrite2. We can't rely on the last handle * we used to still be valid */ open_file = find_writable_file(CIFS_I(mapping->host)); if (!open_file) { cERROR(1, "No writable handles for inode"); rc = -EBADF; } else { long_op = cifs_write_timeout(cifsi, offset); rc = CIFSSMBWrite2(xid, cifs_sb->tcon, open_file->netfid, bytes_to_write, offset, &bytes_written, iov, n_iov, long_op); cifsFileInfo_put(open_file); cifs_update_eof(cifsi, offset, bytes_written); if (rc || bytes_written < bytes_to_write) { cERROR(1, "Write2 ret %d, wrote %d", rc, bytes_written); /* BB what if continued retry is requested via mount flags? */ if (rc == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else set_bit(AS_EIO, &mapping->flags); } else { cifs_stats_bytes_written(cifs_sb->tcon, bytes_written); } } for (i = 0; i < n_iov; i++) { page = pvec.pages[first + i]; /* Should we also set page error on success rc but too little data written? */ /* BB investigate retry logic on temporary server crash cases and how recovery works when page marked as error */ if (rc) SetPageError(page); kunmap(page); unlock_page(page); end_page_writeback(page); page_cache_release(page); } if ((wbc->nr_to_write -= n_iov) <= 0) done = 1; index = next; } else /* Need to re-find the pages we skipped */ index = pvec.pages[0]->index + 1; pagevec_release(&pvec); } if (!scanned && !done) { /* * We hit the last page and there is more work to be done: wrap * back to the start of the file */ scanned = 1; index = 0; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; FreeXid(xid); kfree(iov); return rc; } static int cifs_writepage(struct page *page, struct writeback_control *wbc) { int rc = -EFAULT; int xid; xid = GetXid(); /* BB add check for wbc flags */ page_cache_get(page); if (!PageUptodate(page)) cFYI(1, "ppw - page not up to date"); /* * Set the "writeback" flag, and clear "dirty" in the radix tree. * * A writepage() implementation always needs to do either this, * or re-dirty the page with "redirty_page_for_writepage()" in * the case of a failure. * * Just unlocking the page will cause the radix tree tag-bits * to fail to update with the state of the page correctly. */ set_page_writeback(page); rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); /* BB add check for error and Clearuptodate? */ unlock_page(page); end_page_writeback(page); page_cache_release(page); FreeXid(xid); return rc; } static int cifs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int rc; struct inode *inode = mapping->host; cFYI(1, "write_end for page %p from pos %lld with %d bytes", page, pos, copied); if (PageChecked(page)) { if (copied == len) SetPageUptodate(page); ClearPageChecked(page); } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) SetPageUptodate(page); if (!PageUptodate(page)) { char *page_data; unsigned offset = pos & (PAGE_CACHE_SIZE - 1); int xid; xid = GetXid(); /* this is probably better than directly calling partialpage_write since in this function the file handle is known which we might as well leverage */ /* BB check if anything else missing out of ppw such as updating last write time */ page_data = kmap(page); rc = cifs_write(file, page_data + offset, copied, &pos); /* if (rc < 0) should we set writebehind rc? */ kunmap(page); FreeXid(xid); } else { rc = copied; pos += copied; set_page_dirty(page); } if (rc > 0) { spin_lock(&inode->i_lock); if (pos > inode->i_size) i_size_write(inode, pos); spin_unlock(&inode->i_lock); } unlock_page(page); page_cache_release(page); return rc; } int cifs_fsync(struct file *file, int datasync) { int xid; int rc = 0; struct cifsTconInfo *tcon; struct cifsFileInfo *smbfile = (struct cifsFileInfo *)file->private_data; struct inode *inode = file->f_path.dentry->d_inode; xid = GetXid(); cFYI(1, "Sync file - name: %s datasync: 0x%x", file->f_path.dentry->d_name.name, datasync); rc = filemap_write_and_wait(inode->i_mapping); if (rc == 0) { rc = CIFS_I(inode)->write_behind_rc; CIFS_I(inode)->write_behind_rc = 0; tcon = CIFS_SB(inode->i_sb)->tcon; if (!rc && tcon && smbfile && !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); } FreeXid(xid); return rc; } /* static void cifs_sync_page(struct page *page) { struct address_space *mapping; struct inode *inode; unsigned long index = page->index; unsigned int rpages = 0; int rc = 0; cFYI(1, "sync page %p", page); mapping = page->mapping; if (!mapping) return 0; inode = mapping->host; if (!inode) return; */ /* fill in rpages then result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ /* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index); #if 0 if (rc < 0) return rc; return 0; #endif } */ /* * As file closes, flush all cached write data for this inode checking * for write behind errors. */ int cifs_flush(struct file *file, fl_owner_t id) { struct inode *inode = file->f_path.dentry->d_inode; int rc = 0; /* Rather than do the steps manually: lock the inode for writing loop through pages looking for write behind data (dirty pages) coalesce into contiguous 16K (or smaller) chunks to write to server send to server (prefer in parallel) deal with writebehind errors unlock inode for writing filemapfdatawrite appears easier for the time being */ rc = filemap_fdatawrite(inode->i_mapping); /* reset wb rc if we were able to write out dirty pages */ if (!rc) { rc = CIFS_I(inode)->write_behind_rc; CIFS_I(inode)->write_behind_rc = 0; } cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc); return rc; } ssize_t cifs_user_read(struct file *file, char __user *read_data, size_t read_size, loff_t *poffset) { int rc = -EACCES; unsigned int bytes_read = 0; unsigned int total_read = 0; unsigned int current_read_size; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; int xid; struct cifsFileInfo *open_file; char *smb_read_data; char __user *current_offset; struct smb_com_read_rsp *pSMBr; xid = GetXid(); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); pTcon = cifs_sb->tcon; if (file->private_data == NULL) { rc = -EBADF; FreeXid(xid); return rc; } open_file = (struct cifsFileInfo *)file->private_data; if ((file->f_flags & O_ACCMODE) == O_WRONLY) cFYI(1, "attempting read on write only file instance"); for (total_read = 0, current_offset = read_data; read_size > total_read; total_read += bytes_read, current_offset += bytes_read) { current_read_size = min_t(const int, read_size - total_read, cifs_sb->rsize); rc = -EAGAIN; smb_read_data = NULL; while (rc == -EAGAIN) { int buf_type = CIFS_NO_BUFFER; if ((open_file->invalidHandle) && (!open_file->closePend)) { rc = cifs_reopen_file(file, true); if (rc != 0) break; } rc = CIFSSMBRead(xid, pTcon, open_file->netfid, current_read_size, *poffset, &bytes_read, &smb_read_data, &buf_type); pSMBr = (struct smb_com_read_rsp *)smb_read_data; if (smb_read_data) { if (copy_to_user(current_offset, smb_read_data + 4 /* RFC1001 length field */ + le16_to_cpu(pSMBr->DataOffset), bytes_read)) rc = -EFAULT; if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(smb_read_data); smb_read_data = NULL; } } if (rc || (bytes_read == 0)) { if (total_read) { break; } else { FreeXid(xid); return rc; } } else { cifs_stats_bytes_read(pTcon, bytes_read); *poffset += bytes_read; } } FreeXid(xid); return total_read; } static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *poffset) { int rc = -EACCES; unsigned int bytes_read = 0; unsigned int total_read; unsigned int current_read_size; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; int xid; char *current_offset; struct cifsFileInfo *open_file; int buf_type = CIFS_NO_BUFFER; xid = GetXid(); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); pTcon = cifs_sb->tcon; if (file->private_data == NULL) { rc = -EBADF; FreeXid(xid); return rc; } open_file = (struct cifsFileInfo *)file->private_data; if ((file->f_flags & O_ACCMODE) == O_WRONLY) cFYI(1, "attempting read on write only file instance"); for (total_read = 0, current_offset = read_data; read_size > total_read; total_read += bytes_read, current_offset += bytes_read) { current_read_size = min_t(const int, read_size - total_read, cifs_sb->rsize); /* For windows me and 9x we do not want to request more than it negotiated since it will refuse the read then */ if ((pTcon->ses) && !(pTcon->ses->capabilities & CAP_LARGE_FILES)) { current_read_size = min_t(const int, current_read_size, pTcon->ses->server->maxBuf - 128); } rc = -EAGAIN; while (rc == -EAGAIN) { if ((open_file->invalidHandle) && (!open_file->closePend)) { rc = cifs_reopen_file(file, true); if (rc != 0) break; } rc = CIFSSMBRead(xid, pTcon, open_file->netfid, current_read_size, *poffset, &bytes_read, &current_offset, &buf_type); } if (rc || (bytes_read == 0)) { if (total_read) { break; } else { FreeXid(xid); return rc; } } else { cifs_stats_bytes_read(pTcon, total_read); *poffset += bytes_read; } } FreeXid(xid); return total_read; } int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) { int rc, xid; xid = GetXid(); rc = cifs_revalidate_file(file); if (rc) { cFYI(1, "Validation prior to mmap failed, error=%d", rc); FreeXid(xid); return rc; } rc = generic_file_mmap(file, vma); FreeXid(xid); return rc; } static void cifs_copy_cache_pages(struct address_space *mapping, struct list_head *pages, int bytes_read, char *data) { struct page *page; char *target; while (bytes_read > 0) { if (list_empty(pages)) break; page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, GFP_KERNEL)) { page_cache_release(page); cFYI(1, "Add page cache failed"); data += PAGE_CACHE_SIZE; bytes_read -= PAGE_CACHE_SIZE; continue; } page_cache_release(page); target = kmap_atomic(page, KM_USER0); if (PAGE_CACHE_SIZE > bytes_read) { memcpy(target, data, bytes_read); /* zero the tail end of this partial page */ memset(target + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); bytes_read = 0; } else { memcpy(target, data, PAGE_CACHE_SIZE); bytes_read -= PAGE_CACHE_SIZE; } kunmap_atomic(target, KM_USER0); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); data += PAGE_CACHE_SIZE; } return; } static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { int rc = -EACCES; int xid; loff_t offset; struct page *page; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; unsigned int bytes_read = 0; unsigned int read_size, i; char *smb_read_data = NULL; struct smb_com_read_rsp *pSMBr; struct cifsFileInfo *open_file; int buf_type = CIFS_NO_BUFFER; xid = GetXid(); if (file->private_data == NULL) { rc = -EBADF; FreeXid(xid); return rc; } open_file = (struct cifsFileInfo *)file->private_data; cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); pTcon = cifs_sb->tcon; cFYI(DBG2, "rpages: num pages %d", num_pages); for (i = 0; i < num_pages; ) { unsigned contig_pages; struct page *tmp_page; unsigned long expected_index; if (list_empty(page_list)) break; page = list_entry(page_list->prev, struct page, lru); offset = (loff_t)page->index << PAGE_CACHE_SHIFT; /* count adjacent pages that we will read into */ contig_pages = 0; expected_index = list_entry(page_list->prev, struct page, lru)->index; list_for_each_entry_reverse(tmp_page, page_list, lru) { if (tmp_page->index == expected_index) { contig_pages++; expected_index++; } else break; } if (contig_pages + i > num_pages) contig_pages = num_pages - i; /* for reads over a certain size could initiate async read ahead */ read_size = contig_pages * PAGE_CACHE_SIZE; /* Read size needs to be in multiples of one page */ read_size = min_t(const unsigned int, read_size, cifs_sb->rsize & PAGE_CACHE_MASK); cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d", read_size, contig_pages); rc = -EAGAIN; while (rc == -EAGAIN) { if ((open_file->invalidHandle) && (!open_file->closePend)) { rc = cifs_reopen_file(file, true); if (rc != 0) break; } rc = CIFSSMBRead(xid, pTcon, open_file->netfid, read_size, offset, &bytes_read, &smb_read_data, &buf_type); /* BB more RC checks ? */ if (rc == -EAGAIN) { if (smb_read_data) { if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(smb_read_data); smb_read_data = NULL; } } } if ((rc < 0) || (smb_read_data == NULL)) { cFYI(1, "Read error in readpages: %d", rc); break; } else if (bytes_read > 0) { task_io_account_read(bytes_read); pSMBr = (struct smb_com_read_rsp *)smb_read_data; cifs_copy_cache_pages(mapping, page_list, bytes_read, smb_read_data + 4 /* RFC1001 hdr */ + le16_to_cpu(pSMBr->DataOffset)); i += bytes_read >> PAGE_CACHE_SHIFT; cifs_stats_bytes_read(pTcon, bytes_read); if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) { i++; /* account for partial page */ /* server copy of file can have smaller size than client */ /* BB do we need to verify this common case ? this case is ok - if we are at server EOF we will hit it on next read */ /* break; */ } } else { cFYI(1, "No bytes read (%d) at offset %lld . " "Cleaning remaining pages from readahead list", bytes_read, offset); /* BB turn off caching and do new lookup on file size at server? */ break; } if (smb_read_data) { if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(smb_read_data); smb_read_data = NULL; } bytes_read = 0; } /* need to free smb_read_data buf before exit */ if (smb_read_data) { if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(smb_read_data); smb_read_data = NULL; } FreeXid(xid); return rc; } static int cifs_readpage_worker(struct file *file, struct page *page, loff_t *poffset) { char *read_data; int rc; page_cache_get(page); read_data = kmap(page); /* for reads over a certain size could initiate async read ahead */ rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); if (rc < 0) goto io_error; else cFYI(1, "Bytes read %d", rc); file->f_path.dentry->d_inode->i_atime = current_fs_time(file->f_path.dentry->d_inode->i_sb); if (PAGE_CACHE_SIZE > rc) memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); flush_dcache_page(page); SetPageUptodate(page); rc = 0; io_error: kunmap(page); page_cache_release(page); return rc; } static int cifs_readpage(struct file *file, struct page *page) { loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; int rc = -EACCES; int xid; xid = GetXid(); if (file->private_data == NULL) { rc = -EBADF; FreeXid(xid); return rc; } cFYI(1, "readpage %p at offset %d 0x%x\n", page, (int)offset, (int)offset); rc = cifs_readpage_worker(file, page, &offset); unlock_page(page); FreeXid(xid); return rc; } static int is_inode_writable(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *open_file; read_lock(&GlobalSMBSeslock); list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (open_file->closePend) continue; if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) || (open_file->pfile->f_flags & O_WRONLY))) { read_unlock(&GlobalSMBSeslock); return 1; } } read_unlock(&GlobalSMBSeslock); return 0; } /* We do not want to update the file size from server for inodes open for write - to avoid races with writepage extending the file - in the future we could consider allowing refreshing the inode only on increases in the file size but this is tricky to do without racing with writebehind page caching in the current Linux kernel design */ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) { if (!cifsInode) return true; if (is_inode_writable(cifsInode)) { /* This inode is open for write at least once */ struct cifs_sb_info *cifs_sb; cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { /* since no page cache to corrupt on directio we can change size safely */ return true; } if (i_size_read(&cifsInode->vfs_inode) < end_of_file) return true; return false; } else return true; } static int cifs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; loff_t offset = pos & (PAGE_CACHE_SIZE - 1); loff_t page_start = pos & PAGE_MASK; loff_t i_size; struct page *page; int rc = 0; cFYI(1, "write_begin from %lld len %d", (long long)pos, len); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { rc = -ENOMEM; goto out; } if (PageUptodate(page)) goto out; /* * If we write a full page it will be up to date, no need to read from * the server. If the write is short, we'll end up doing a sync write * instead. */ if (len == PAGE_CACHE_SIZE) goto out; /* * optimize away the read when we have an oplock, and we're not * expecting to use any of the data we'd be reading in. That * is, when the page lies beyond the EOF, or straddles the EOF * and the write will cover all of the existing data. */ if (CIFS_I(mapping->host)->clientCanCacheRead) { i_size = i_size_read(mapping->host); if (page_start >= i_size || (offset == 0 && (pos + len) >= i_size)) { zero_user_segments(page, 0, offset, offset + len, PAGE_CACHE_SIZE); /* * PageChecked means that the parts of the page * to which we're not writing are considered up * to date. Once the data is copied to the * page, it can be set uptodate. */ SetPageChecked(page); goto out; } } if ((file->f_flags & O_ACCMODE) != O_WRONLY) { /* * might as well read a page, it is fast enough. If we get * an error, we don't need to return it. cifs_write_end will * do a sync write instead since PG_uptodate isn't set. */ cifs_readpage_worker(file, page, &page_start); } else { /* we could try using another file handle if there is one - but how would we lock it to prevent close of that handle racing with this read? In any case this will be written out by write_end so is fine */ } out: *pagep = page; return rc; } static void cifs_oplock_break(struct slow_work *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = cfile->pInode; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb); int rc, waitrc = 0; if (inode && S_ISREG(inode->i_mode)) { if (cinode->clientCanCacheRead) break_lease(inode, O_RDONLY); else break_lease(inode, O_WRONLY); rc = filemap_fdatawrite(inode->i_mapping); if (cinode->clientCanCacheRead == 0) { waitrc = filemap_fdatawait(inode->i_mapping); invalidate_remote_inode(inode); } if (!rc) rc = waitrc; if (rc) cinode->write_behind_rc = rc; cFYI(1, "Oplock flush inode %p rc %d", inode, rc); } /* * releasing stale oplock after recent reconnect of smb session using * a now incorrect file handle is not a data integrity issue but do * not bother sending an oplock release if session to server still is * disconnected since oplock already released by the server */ if (!cfile->closePend && !cfile->oplock_break_cancelled) { rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false); cFYI(1, "Oplock release rc = %d", rc); } } static int cifs_oplock_break_get(struct slow_work *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); mntget(cfile->mnt); cifsFileInfo_get(cfile); return 0; } static void cifs_oplock_break_put(struct slow_work *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); mntput(cfile->mnt); cifsFileInfo_put(cfile); } const struct slow_work_ops cifs_oplock_break_ops = { .get_ref = cifs_oplock_break_get, .put_ref = cifs_oplock_break_put, .execute = cifs_oplock_break, }; const struct address_space_operations cifs_addr_ops = { .readpage = cifs_readpage, .readpages = cifs_readpages, .writepage = cifs_writepage, .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, .set_page_dirty = __set_page_dirty_nobuffers, /* .sync_page = cifs_sync_page, */ /* .direct_IO = */ }; /* * cifs_readpages requires the server to support a buffer large enough to * contain the header plus one complete page of data. Otherwise, we need * to leave cifs_readpages out of the address space operations. */ const struct address_space_operations cifs_addr_ops_smallbuf = { .readpage = cifs_readpage, .writepage = cifs_writepage, .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, .set_page_dirty = __set_page_dirty_nobuffers, /* .sync_page = cifs_sync_page, */ /* .direct_IO = */ };
gpl-2.0
EZchip/linux
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
940
5367
/******************************************************************************* PTP 1588 clock using the STMMAC. Copyright (C) 2013 Vayavya Labs Pvt Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> *******************************************************************************/ #include "stmmac.h" #include "stmmac_ptp.h" /** * stmmac_adjust_freq * * @ptp: pointer to ptp_clock_info structure * @ppb: desired period change in parts ber billion * * Description: this function will adjust the frequency of hardware clock. */ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; u32 diff, addend; int neg_adj = 0; u64 adj; if (ppb < 0) { neg_adj = 1; ppb = -ppb; } addend = priv->default_addend; adj = addend; adj *= ppb; diff = div_u64(adj, 1000000000ULL); addend = neg_adj ? (addend - diff) : (addend + diff); spin_lock_irqsave(&priv->ptp_lock, flags); priv->hw->ptp->config_addend(priv->ioaddr, addend); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } /** * stmmac_adjust_time * * @ptp: pointer to ptp_clock_info structure * @delta: desired change in nanoseconds * * Description: this function will shift/adjust the hardware clock time. */ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; u32 sec, nsec; u32 quotient, reminder; int neg_adj = 0; if (delta < 0) { neg_adj = 1; delta = -delta; } quotient = div_u64_rem(delta, 1000000000ULL, &reminder); sec = quotient; nsec = reminder; spin_lock_irqsave(&priv->ptp_lock, flags); priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } /** * stmmac_get_time * * @ptp: pointer to ptp_clock_info structure * @ts: pointer to hold time/result * * Description: this function will read the current time from the * hardware clock and store it in @ts. */ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; u64 ns; spin_lock_irqsave(&priv->ptp_lock, flags); ns = priv->hw->ptp->get_systime(priv->ioaddr); spin_unlock_irqrestore(&priv->ptp_lock, flags); *ts = ns_to_timespec64(ns); return 0; } /** * stmmac_set_time * * @ptp: pointer to ptp_clock_info structure * @ts: time value to set * * Description: this function will set the current time on the * hardware clock. */ static int stmmac_set_time(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; spin_lock_irqsave(&priv->ptp_lock, flags); priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } static int stmmac_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { return -EOPNOTSUPP; } /* structure describing a PTP hardware clock */ static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac_ptp_clock", .max_adj = 62500000, .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, .n_pins = 0, .pps = 0, .adjfreq = stmmac_adjust_freq, .adjtime = stmmac_adjust_time, .gettime64 = stmmac_get_time, .settime64 = stmmac_set_time, .enable = stmmac_enable, }; /** * stmmac_ptp_register * @priv: driver private structure * Description: this function will register the ptp clock driver * to kernel. It also does some house keeping work. */ int stmmac_ptp_register(struct stmmac_priv *priv) { spin_lock_init(&priv->ptp_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, priv->device); if (IS_ERR(priv->ptp_clock)) { priv->ptp_clock = NULL; pr_err("ptp_clock_register() failed on %s\n", priv->dev->name); } else pr_debug("Added PTP HW clock successfully on %s\n", priv->dev->name); return 0; } /** * stmmac_ptp_unregister * @priv: driver private structure * Description: this function will remove/unregister the ptp clock driver * from the kernel. */ void stmmac_ptp_unregister(struct stmmac_priv *priv) { if (priv->ptp_clock) { ptp_clock_unregister(priv->ptp_clock); priv->ptp_clock = NULL; pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } }
gpl-2.0
XCage15/linux-1
drivers/input/keyboard/sun4i-lradc-keys.c
1196
7763
/* * Allwinner sun4i low res adc attached tablet keys driver * * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Allwinnner sunxi SoCs have a lradc which is specifically designed to have * various (tablet) keys (ie home, back, search, etc). attached to it using * a resistor network. This driver is for the keys on such boards. * * There are 2 channels, currently this driver only supports channel 0 since * there are no boards known to use channel 1. */ #include <linux/err.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #define LRADC_CTRL 0x00 #define LRADC_INTC 0x04 #define LRADC_INTS 0x08 #define LRADC_DATA0 0x0c #define LRADC_DATA1 0x10 /* LRADC_CTRL bits */ #define FIRST_CONVERT_DLY(x) ((x) << 24) /* 8 bits */ #define CHAN_SELECT(x) ((x) << 22) /* 2 bits */ #define CONTINUE_TIME_SEL(x) ((x) << 16) /* 4 bits */ #define KEY_MODE_SEL(x) ((x) << 12) /* 2 bits */ #define LEVELA_B_CNT(x) ((x) << 8) /* 4 bits */ #define HOLD_EN(x) ((x) << 6) #define LEVELB_VOL(x) ((x) << 4) /* 2 bits */ #define SAMPLE_RATE(x) ((x) << 2) /* 2 bits */ #define ENABLE(x) ((x) << 0) /* LRADC_INTC and LRADC_INTS bits */ #define CHAN1_KEYUP_IRQ BIT(12) #define CHAN1_ALRDY_HOLD_IRQ BIT(11) #define CHAN1_HOLD_IRQ BIT(10) #define CHAN1_KEYDOWN_IRQ BIT(9) #define CHAN1_DATA_IRQ BIT(8) #define CHAN0_KEYUP_IRQ BIT(4) #define CHAN0_ALRDY_HOLD_IRQ BIT(3) #define CHAN0_HOLD_IRQ BIT(2) #define CHAN0_KEYDOWN_IRQ BIT(1) #define CHAN0_DATA_IRQ BIT(0) struct sun4i_lradc_keymap { u32 voltage; u32 keycode; }; struct sun4i_lradc_data { struct device *dev; struct input_dev *input; void __iomem *base; struct regulator *vref_supply; struct sun4i_lradc_keymap *chan0_map; u32 chan0_map_count; u32 chan0_keycode; u32 vref; }; static irqreturn_t sun4i_lradc_irq(int irq, void *dev_id) { struct sun4i_lradc_data *lradc = dev_id; u32 i, ints, val, voltage, diff, keycode = 0, closest = 0xffffffff; ints = readl(lradc->base + LRADC_INTS); /* * lradc supports only one keypress at a time, release does not give * any info as to which key was released, so we cache the keycode. */ if (ints & CHAN0_KEYUP_IRQ) { input_report_key(lradc->input, lradc->chan0_keycode, 0); lradc->chan0_keycode = 0; } if ((ints & CHAN0_KEYDOWN_IRQ) && lradc->chan0_keycode == 0) { val = readl(lradc->base + LRADC_DATA0) & 0x3f; voltage = val * lradc->vref / 63; for (i = 0; i < lradc->chan0_map_count; i++) { diff = abs(lradc->chan0_map[i].voltage - voltage); if (diff < closest) { closest = diff; keycode = lradc->chan0_map[i].keycode; } } lradc->chan0_keycode = keycode; input_report_key(lradc->input, lradc->chan0_keycode, 1); } input_sync(lradc->input); writel(ints, lradc->base + LRADC_INTS); return IRQ_HANDLED; } static int sun4i_lradc_open(struct input_dev *dev) { struct sun4i_lradc_data *lradc = input_get_drvdata(dev); int error; error = regulator_enable(lradc->vref_supply); if (error) return error; /* lradc Vref internally is divided by 2/3 */ lradc->vref = regulator_get_voltage(lradc->vref_supply) * 2 / 3; /* * Set sample time to 4 ms / 250 Hz. Wait 2 * 4 ms for key to * stabilize on press, wait (1 + 1) * 4 ms for key release */ writel(FIRST_CONVERT_DLY(2) | LEVELA_B_CNT(1) | HOLD_EN(1) | SAMPLE_RATE(0) | ENABLE(1), lradc->base + LRADC_CTRL); writel(CHAN0_KEYUP_IRQ | CHAN0_KEYDOWN_IRQ, lradc->base + LRADC_INTC); return 0; } static void sun4i_lradc_close(struct input_dev *dev) { struct sun4i_lradc_data *lradc = input_get_drvdata(dev); /* Disable lradc, leave other settings unchanged */ writel(FIRST_CONVERT_DLY(2) | LEVELA_B_CNT(1) | HOLD_EN(1) | SAMPLE_RATE(2), lradc->base + LRADC_CTRL); writel(0, lradc->base + LRADC_INTC); regulator_disable(lradc->vref_supply); } static int sun4i_lradc_load_dt_keymap(struct device *dev, struct sun4i_lradc_data *lradc) { struct device_node *np, *pp; int i; int error; np = dev->of_node; if (!np) return -EINVAL; lradc->chan0_map_count = of_get_child_count(np); if (lradc->chan0_map_count == 0) { dev_err(dev, "keymap is missing in device tree\n"); return -EINVAL; } lradc->chan0_map = devm_kmalloc_array(dev, lradc->chan0_map_count, sizeof(struct sun4i_lradc_keymap), GFP_KERNEL); if (!lradc->chan0_map) return -ENOMEM; i = 0; for_each_child_of_node(np, pp) { struct sun4i_lradc_keymap *map = &lradc->chan0_map[i]; u32 channel; error = of_property_read_u32(pp, "channel", &channel); if (error || channel != 0) { dev_err(dev, "%s: Inval channel prop\n", pp->name); return -EINVAL; } error = of_property_read_u32(pp, "voltage", &map->voltage); if (error) { dev_err(dev, "%s: Inval voltage prop\n", pp->name); return -EINVAL; } error = of_property_read_u32(pp, "linux,code", &map->keycode); if (error) { dev_err(dev, "%s: Inval linux,code prop\n", pp->name); return -EINVAL; } i++; } return 0; } static int sun4i_lradc_probe(struct platform_device *pdev) { struct sun4i_lradc_data *lradc; struct device *dev = &pdev->dev; int i; int error; lradc = devm_kzalloc(dev, sizeof(struct sun4i_lradc_data), GFP_KERNEL); if (!lradc) return -ENOMEM; error = sun4i_lradc_load_dt_keymap(dev, lradc); if (error) return error; lradc->vref_supply = devm_regulator_get(dev, "vref"); if (IS_ERR(lradc->vref_supply)) return PTR_ERR(lradc->vref_supply); lradc->dev = dev; lradc->input = devm_input_allocate_device(dev); if (!lradc->input) return -ENOMEM; lradc->input->name = pdev->name; lradc->input->phys = "sun4i_lradc/input0"; lradc->input->open = sun4i_lradc_open; lradc->input->close = sun4i_lradc_close; lradc->input->id.bustype = BUS_HOST; lradc->input->id.vendor = 0x0001; lradc->input->id.product = 0x0001; lradc->input->id.version = 0x0100; __set_bit(EV_KEY, lradc->input->evbit); for (i = 0; i < lradc->chan0_map_count; i++) __set_bit(lradc->chan0_map[i].keycode, lradc->input->keybit); input_set_drvdata(lradc->input, lradc); lradc->base = devm_ioremap_resource(dev, platform_get_resource(pdev, IORESOURCE_MEM, 0)); if (IS_ERR(lradc->base)) return PTR_ERR(lradc->base); error = devm_request_irq(dev, platform_get_irq(pdev, 0), sun4i_lradc_irq, 0, "sun4i-a10-lradc-keys", lradc); if (error) return error; error = input_register_device(lradc->input); if (error) return error; platform_set_drvdata(pdev, lradc); return 0; } static const struct of_device_id sun4i_lradc_of_match[] = { { .compatible = "allwinner,sun4i-a10-lradc-keys", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun4i_lradc_of_match); static struct platform_driver sun4i_lradc_driver = { .driver = { .name = "sun4i-a10-lradc-keys", .of_match_table = of_match_ptr(sun4i_lradc_of_match), }, .probe = sun4i_lradc_probe, }; module_platform_driver(sun4i_lradc_driver); MODULE_DESCRIPTION("Allwinner sun4i low res adc attached tablet keys driver"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
OMFGBKANG/nk2
kernel/user-return-notifier.c
1196
1352
#include <linux/user-return-notifier.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/module.h> static DEFINE_PER_CPU(struct hlist_head, return_notifier_list); /* * Request a notification when the current cpu returns to userspace. Must be * called in atomic context. The notifier will also be called in atomic * context. */ void user_return_notifier_register(struct user_return_notifier *urn) { set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list)); } EXPORT_SYMBOL_GPL(user_return_notifier_register); /* * Removes a registered user return notifier. Must be called from atomic * context, and from the same cpu registration occured in. */ void user_return_notifier_unregister(struct user_return_notifier *urn) { hlist_del(&urn->link); if (hlist_empty(&__get_cpu_var(return_notifier_list))) clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); } EXPORT_SYMBOL_GPL(user_return_notifier_unregister); /* Calls registered user return notifiers */ void fire_user_return_notifiers(void) { struct user_return_notifier *urn; struct hlist_node *tmp1, *tmp2; struct hlist_head *head; head = &get_cpu_var(return_notifier_list); hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) urn->on_user_return(urn); put_cpu_var(return_notifier_list); }
gpl-2.0
ivanich/android_kernel_oneplus_msm8996
arch/arm/mach-axxia/platsmp.c
1452
2189
/* * linux/arch/arm/mach-axxia/platsmp.c * * Copyright (C) 2012 LSI Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/io.h> #include <linux/smp.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> /* Syscon register offsets for releasing cores from reset */ #define SC_CRIT_WRITE_KEY 0x1000 #define SC_RST_CPU_HOLD 0x1010 /* * Write the kernel entry point for secondary CPUs to the specified address */ static void write_release_addr(u32 release_phys) { u32 *virt = (u32 *) phys_to_virt(release_phys); writel_relaxed(virt_to_phys(secondary_startup), virt); /* Make sure this store is visible to other CPUs */ smp_wmb(); __cpuc_flush_dcache_area(virt, sizeof(u32)); } static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) { struct device_node *syscon_np; void __iomem *syscon; u32 tmp; syscon_np = of_find_compatible_node(NULL, NULL, "lsi,axxia-syscon"); if (!syscon_np) return -ENOENT; syscon = of_iomap(syscon_np, 0); if (!syscon) return -ENOMEM; tmp = readl(syscon + SC_RST_CPU_HOLD); writel(0xab, syscon + SC_CRIT_WRITE_KEY); tmp &= ~(1 << cpu); writel(tmp, syscon + SC_RST_CPU_HOLD); return 0; } static void __init axxia_smp_prepare_cpus(unsigned int max_cpus) { int cpu_count = 0; int cpu; /* * Initialise the present map, which describes the set of CPUs actually * populated at the present time. */ for_each_possible_cpu(cpu) { struct device_node *np; u32 release_phys; np = of_get_cpu_node(cpu, NULL); if (!np) continue; if (of_property_read_u32(np, "cpu-release-addr", &release_phys)) continue; if (cpu_count < max_cpus) { set_cpu_present(cpu, true); cpu_count++; } if (release_phys != 0) write_release_addr(release_phys); } } static struct smp_operations axxia_smp_ops __initdata = { .smp_prepare_cpus = axxia_smp_prepare_cpus, .smp_boot_secondary = axxia_boot_secondary, }; CPU_METHOD_OF_DECLARE(axxia_smp, "lsi,syscon-release", &axxia_smp_ops);
gpl-2.0
shrike1978/ermahgerd_kernel_vigor
arch/arm/plat-s3c24xx/gpiolib.c
2988
4854
/* linux/arch/arm/plat-s3c24xx/gpiolib.c * * Copyright (c) 2008-2010 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX GPIOlib support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/sysdev.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/gpio.h> #include <plat/gpio-core.h> #include <plat/gpio-cfg.h> #include <plat/gpio-cfg-helpers.h> #include <mach/hardware.h> #include <asm/irq.h> #include <plat/pm.h> #include <mach/regs-gpio.h> static int s3c24xx_gpiolib_banka_input(struct gpio_chip *chip, unsigned offset) { return -EINVAL; } static int s3c24xx_gpiolib_banka_output(struct gpio_chip *chip, unsigned offset, int value) { struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip); void __iomem *base = ourchip->base; unsigned long flags; unsigned long dat; unsigned long con; local_irq_save(flags); con = __raw_readl(base + 0x00); dat = __raw_readl(base + 0x04); dat &= ~(1 << offset); if (value) dat |= 1 << offset; __raw_writel(dat, base + 0x04); con &= ~(1 << offset); __raw_writel(con, base + 0x00); __raw_writel(dat, base + 0x04); local_irq_restore(flags); return 0; } static int s3c24xx_gpiolib_bankf_toirq(struct gpio_chip *chip, unsigned offset) { if (offset < 4) return IRQ_EINT0 + offset; if (offset < 8) return IRQ_EINT4 + offset - 4; return -EINVAL; } static struct s3c_gpio_cfg s3c24xx_gpiocfg_banka = { .set_config = s3c_gpio_setcfg_s3c24xx_a, .get_config = s3c_gpio_getcfg_s3c24xx_a, }; struct s3c_gpio_cfg s3c24xx_gpiocfg_default = { .set_config = s3c_gpio_setcfg_s3c24xx, .get_config = s3c_gpio_getcfg_s3c24xx, }; struct s3c_gpio_chip s3c24xx_gpios[] = { [0] = { .base = S3C2410_GPACON, .pm = __gpio_pm(&s3c_gpio_pm_1bit), .config = &s3c24xx_gpiocfg_banka, .chip = { .base = S3C2410_GPA(0), .owner = THIS_MODULE, .label = "GPIOA", .ngpio = 24, .direction_input = s3c24xx_gpiolib_banka_input, .direction_output = s3c24xx_gpiolib_banka_output, }, }, [1] = { .base = S3C2410_GPBCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPB(0), .owner = THIS_MODULE, .label = "GPIOB", .ngpio = 16, }, }, [2] = { .base = S3C2410_GPCCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPC(0), .owner = THIS_MODULE, .label = "GPIOC", .ngpio = 16, }, }, [3] = { .base = S3C2410_GPDCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPD(0), .owner = THIS_MODULE, .label = "GPIOD", .ngpio = 16, }, }, [4] = { .base = S3C2410_GPECON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPE(0), .label = "GPIOE", .owner = THIS_MODULE, .ngpio = 16, }, }, [5] = { .base = S3C2410_GPFCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPF(0), .owner = THIS_MODULE, .label = "GPIOF", .ngpio = 8, .to_irq = s3c24xx_gpiolib_bankf_toirq, }, }, [6] = { .base = S3C2410_GPGCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .irq_base = IRQ_EINT8, .chip = { .base = S3C2410_GPG(0), .owner = THIS_MODULE, .label = "GPIOG", .ngpio = 16, .to_irq = samsung_gpiolib_to_irq, }, }, { .base = S3C2410_GPHCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPH(0), .owner = THIS_MODULE, .label = "GPIOH", .ngpio = 11, }, }, /* GPIOS for the S3C2443 and later devices. */ { .base = S3C2440_GPJCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPJ(0), .owner = THIS_MODULE, .label = "GPIOJ", .ngpio = 16, }, }, { .base = S3C2443_GPKCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPK(0), .owner = THIS_MODULE, .label = "GPIOK", .ngpio = 16, }, }, { .base = S3C2443_GPLCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPL(0), .owner = THIS_MODULE, .label = "GPIOL", .ngpio = 15, }, }, { .base = S3C2443_GPMCON, .pm = __gpio_pm(&s3c_gpio_pm_2bit), .chip = { .base = S3C2410_GPM(0), .owner = THIS_MODULE, .label = "GPIOM", .ngpio = 2, }, }, }; static __init int s3c24xx_gpiolib_init(void) { struct s3c_gpio_chip *chip = s3c24xx_gpios; int gpn; for (gpn = 0; gpn < ARRAY_SIZE(s3c24xx_gpios); gpn++, chip++) { if (!chip->config) chip->config = &s3c24xx_gpiocfg_default; s3c_gpiolib_add(chip); } return 0; } core_initcall(s3c24xx_gpiolib_init);
gpl-2.0
usb-bullhead-ubuntu-touch/kernel_msm
arch/mips/dec/wbflush.c
4268
2095
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/wbflush.h> #include <asm/barrier.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
geekzoo/linux
linux-3.18.5/arch/mips/dec/wbflush.c
4268
2095
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/wbflush.h> #include <asm/barrier.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
MaxiCM-Test/android_kernel_lge_msm8226
drivers/net/can/cc770/cc770_isa.c
5036
9956
/* * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus * * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus. * The I/O port or memory address and the IRQ number must be specified via * module parameters: * * insmod cc770_isa.ko port=0x310,0x380 irq=7,11 * * for ISA devices using I/O ports or: * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 * * for memory mapped ISA devices. * * Indirect access via address and data port is supported as well: * * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11 * * Furthermore, the following mode parameter can be defined: * * clk: External oscillator clock frequency (default=16000000 [16 MHz]) * cir: CPU interface register (default=0x40 [DSC]) * bcr: Bus configuration register (default=0x40 [CBY]) * cor: Clockout register (default=0x00) * * Note: for clk, cir, bcr and cor, the first argument re-defines the * default for all other devices, e.g.: * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000 * * is equivalent to * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/platform/cc770.h> #include "cc770.h" #define MAXDEV 8 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus"); MODULE_LICENSE("GPL v2"); #define CLK_DEFAULT 16000000 /* 16 MHz */ #define COR_DEFAULT 0x00 #define BCR_DEFAULT BUSCFG_CBY static unsigned long port[MAXDEV]; static unsigned long mem[MAXDEV]; static int __devinitdata irq[MAXDEV]; static int __devinitdata clk[MAXDEV]; static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; module_param_array(port, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); module_param_array(mem, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(mem, "I/O memory address"); module_param_array(indirect, int, NULL, S_IRUGO); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); module_param_array(irq, int, NULL, S_IRUGO); MODULE_PARM_DESC(irq, "IRQ number"); module_param_array(clk, int, NULL, S_IRUGO); MODULE_PARM_DESC(clk, "External oscillator clock frequency " "(default=16000000 [16 MHz])"); module_param_array(cir, byte, NULL, S_IRUGO); MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])"); module_param_array(cor, byte, NULL, S_IRUGO); MODULE_PARM_DESC(cor, "Clockout register (default=0x00)"); module_param_array(bcr, byte, NULL, S_IRUGO); MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])"); #define CC770_IOSIZE 0x20 #define CC770_IOSIZE_INDIRECT 0x02 /* Spinlock for cc770_isa_port_write_reg_indirect * and cc770_isa_port_read_reg_indirect */ static DEFINE_SPINLOCK(cc770_isa_port_lock); static struct platform_device *cc770_isa_devs[MAXDEV]; static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg) { return readb(priv->reg_base + reg); } static void cc770_isa_mem_write_reg(const struct cc770_priv *priv, int reg, u8 val) { writeb(val, priv->reg_base + reg); } static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg) { return inb((unsigned long)priv->reg_base + reg); } static void cc770_isa_port_write_reg(const struct cc770_priv *priv, int reg, u8 val) { outb(val, (unsigned long)priv->reg_base + reg); } static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv, int reg) { unsigned long base = (unsigned long)priv->reg_base; unsigned long flags; u8 val; spin_lock_irqsave(&cc770_isa_port_lock, flags); outb(reg, base); val = inb(base + 1); spin_unlock_irqrestore(&cc770_isa_port_lock, flags); return val; } static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv, int reg, u8 val) { unsigned long base = (unsigned long)priv->reg_base; unsigned long flags; spin_lock_irqsave(&cc770_isa_port_lock, flags); outb(reg, base); outb(val, base + 1); spin_unlock_irqrestore(&cc770_isa_port_lock, flags); } static int __devinit cc770_isa_probe(struct platform_device *pdev) { struct net_device *dev; struct cc770_priv *priv; void __iomem *base = NULL; int iosize = CC770_IOSIZE; int idx = pdev->id; int err; u32 clktmp; dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", idx, port[idx], mem[idx], irq[idx]); if (mem[idx]) { if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) { err = -EBUSY; goto exit; } base = ioremap_nocache(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; } } else { if (indirect[idx] > 0 || (indirect[idx] == -1 && indirect[0] > 0)) iosize = CC770_IOSIZE_INDIRECT; if (!request_region(port[idx], iosize, KBUILD_MODNAME)) { err = -EBUSY; goto exit; } } dev = alloc_cc770dev(0); if (!dev) { err = -ENOMEM; goto exit_unmap; } priv = netdev_priv(dev); dev->irq = irq[idx]; priv->irq_flags = IRQF_SHARED; if (mem[idx]) { priv->reg_base = base; dev->base_addr = mem[idx]; priv->read_reg = cc770_isa_mem_read_reg; priv->write_reg = cc770_isa_mem_write_reg; } else { priv->reg_base = (void __iomem *)port[idx]; dev->base_addr = port[idx]; if (iosize == CC770_IOSIZE_INDIRECT) { priv->read_reg = cc770_isa_port_read_reg_indirect; priv->write_reg = cc770_isa_port_write_reg_indirect; } else { priv->read_reg = cc770_isa_port_read_reg; priv->write_reg = cc770_isa_port_write_reg; } } if (clk[idx]) clktmp = clk[idx]; else if (clk[0]) clktmp = clk[0]; else clktmp = CLK_DEFAULT; priv->can.clock.freq = clktmp; if (cir[idx] != 0xff) { priv->cpu_interface = cir[idx]; } else if (cir[0] != 0xff) { priv->cpu_interface = cir[0]; } else { /* The system clock may not exceed 10 MHz */ if (clktmp > 10000000) { priv->cpu_interface |= CPUIF_DSC; clktmp /= 2; } /* The memory clock may not exceed 8 MHz */ if (clktmp > 8000000) priv->cpu_interface |= CPUIF_DMC; } if (priv->cpu_interface & CPUIF_DSC) priv->can.clock.freq /= 2; if (bcr[idx] != 0xff) priv->bus_config = bcr[idx]; else if (bcr[0] != 0xff) priv->bus_config = bcr[0]; else priv->bus_config = BCR_DEFAULT; if (cor[idx] != 0xff) priv->clkout = cor[idx]; else if (cor[0] != 0xff) priv->clkout = cor[0]; else priv->clkout = COR_DEFAULT; dev_set_drvdata(&pdev->dev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_cc770dev(dev); if (err) { dev_err(&pdev->dev, "couldn't register device (err=%d)\n", err); goto exit_unmap; } dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; exit_unmap: if (mem[idx]) iounmap(base); exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); exit: return err; } static int __devexit cc770_isa_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); struct cc770_priv *priv = netdev_priv(dev); int idx = pdev->id; unregister_cc770dev(dev); dev_set_drvdata(&pdev->dev, NULL); if (mem[idx]) { iounmap(priv->reg_base); release_mem_region(mem[idx], CC770_IOSIZE); } else { if (priv->read_reg == cc770_isa_port_read_reg_indirect) release_region(port[idx], CC770_IOSIZE_INDIRECT); else release_region(port[idx], CC770_IOSIZE); } free_cc770dev(dev); return 0; } static struct platform_driver cc770_isa_driver = { .probe = cc770_isa_probe, .remove = __devexit_p(cc770_isa_remove), .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, }; static int __init cc770_isa_init(void) { int idx, err; for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { if ((port[idx] || mem[idx]) && irq[idx]) { cc770_isa_devs[idx] = platform_device_alloc(KBUILD_MODNAME, idx); if (!cc770_isa_devs[idx]) { err = -ENOMEM; goto exit_free_devices; } err = platform_device_add(cc770_isa_devs[idx]); if (err) { platform_device_put(cc770_isa_devs[idx]); goto exit_free_devices; } pr_debug("platform device %d: port=%#lx, mem=%#lx, " "irq=%d\n", idx, port[idx], mem[idx], irq[idx]); } else if (idx == 0 || port[idx] || mem[idx]) { pr_err("insufficient parameters supplied\n"); err = -EINVAL; goto exit_free_devices; } } err = platform_driver_register(&cc770_isa_driver); if (err) goto exit_free_devices; pr_info("driver for max. %d devices registered\n", MAXDEV); return 0; exit_free_devices: while (--idx >= 0) { if (cc770_isa_devs[idx]) platform_device_unregister(cc770_isa_devs[idx]); } return err; } module_init(cc770_isa_init); static void __exit cc770_isa_exit(void) { int idx; platform_driver_unregister(&cc770_isa_driver); for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { if (cc770_isa_devs[idx]) platform_device_unregister(cc770_isa_devs[idx]); } } module_exit(cc770_isa_exit);
gpl-2.0
xdajog/kernel_fx3q_aosp
drivers/net/ethernet/natsemi/xtsonic.c
5036
8392
/* * xtsonic.c * * (C) 2001 - 2007 Tensilica Inc. * Kevin Chea <kchea@yahoo.com> * Marc Gauthier <marc@linux-xtensa.org> * Chris Zankel <chris@zankel.net> * * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) * * A driver for the onboard Sonic ethernet controller on the XT2000. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/dma.h> static char xtsonic_string[] = "xtsonic"; extern unsigned xtboard_nvram_valid(void); extern void xtboard_get_ether_addr(unsigned char *buf); #include "sonic.h" /* * According to the documentation for the Sonic ethernet controller, * EOBC should be 760 words (1520 bytes) for 32-bit applications, and, * as such, 2 words less than the buffer size. The value for RBSIZE * defined in sonic.h, however is only 1520. * * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and * RBSIZE 1520 bytes) */ #undef SONIC_RBSIZE #define SONIC_RBSIZE 1524 /* * The chip provides 256 byte register space. */ #define SONIC_MEM_SIZE 0x100 /* * Macros to access SONIC registers */ #define SONIC_READ(reg) \ (0xffff & *((volatile unsigned int *)dev->base_addr+reg)) #define SONIC_WRITE(reg,val) \ *((volatile unsigned int *)dev->base_addr+reg) = val /* Use 0 for production, 1 for verification, and >2 for debug */ #ifdef SONIC_DEBUG static unsigned int sonic_debug = SONIC_DEBUG; #else static unsigned int sonic_debug = 1; #endif /* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. * So we check for known Silicon Revision IDs instead. */ static unsigned short known_revisions[] = { 0x101, /* SONIC 83934 */ 0xffff /* end of list */ }; static int xtsonic_open(struct net_device *dev) { int retval; retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } retval = sonic_open(dev); if (retval) free_irq(dev->irq, dev); return retval; } static int xtsonic_close(struct net_device *dev) { int err; err = sonic_close(dev); free_irq(dev->irq, dev); return err; } static const struct net_device_ops xtsonic_netdev_ops = { .ndo_open = xtsonic_open, .ndo_stop = xtsonic_close, .ndo_start_xmit = sonic_send_packet, .ndo_get_stats = sonic_get_stats, .ndo_set_rx_mode = sonic_multicast_list, .ndo_tx_timeout = sonic_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static int __init sonic_probe1(struct net_device *dev) { static unsigned version_printed = 0; unsigned int silicon_revision; struct sonic_local *lp = netdev_priv(dev); unsigned int base_addr = dev->base_addr; int i; int err = 0; if (!request_mem_region(base_addr, 0x100, xtsonic_string)) return -EBUSY; /* * get the Silicon Revision ID. If this is one of the known * one assume that we found a SONIC ethernet controller at * the expected location. */ silicon_revision = SONIC_READ(SONIC_SR); if (sonic_debug > 1) printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision); i = 0; while ((known_revisions[i] != 0xffff) && (known_revisions[i] != silicon_revision)) i++; if (known_revisions[i] == 0xffff) { printk("SONIC ethernet controller not found (0x%4x)\n", silicon_revision); return -ENODEV; } if (sonic_debug && version_printed++ == 0) printk(version); /* * Put the sonic into software reset, then retrieve ethernet address. * Note: we are assuming that the boot-loader has initialized the cam. */ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS); SONIC_WRITE(SONIC_CEP,0); SONIC_WRITE(SONIC_IMR,0); SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); SONIC_WRITE(SONIC_CEP,0); for (i=0; i<3; i++) { unsigned int val = SONIC_READ(SONIC_CAP0-i); dev->dev_addr[i*2] = val; dev->dev_addr[i*2+1] = val >> 8; } /* Initialize the device structure. */ lp->dma_bitmode = SONIC_BITMODE32; /* * Allocate local private descriptor areas in uncached space. * The entire structure must be located within the same 64kb segment. * A simple way to ensure this is to allocate twice the * size of the structure -- given that the structure is * much less than 64 kB, at least one of the halves of * the allocated area will be contained entirely in 64 kB. * We also allocate extra space for a pointer to allow freeing * this structure later on (in xtsonic_cleanup_module()). */ lp->descriptors = dma_alloc_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), &lp->descriptors_laddr, GFP_KERNEL); if (lp->descriptors == NULL) { printk(KERN_ERR "%s: couldn't alloc DMA memory for " " descriptors.\n", dev_name(lp->device)); goto out; } lp->cda = lp->descriptors; lp->tda = lp->cda + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); /* get the virtual dma address */ lp->cda_laddr = lp->descriptors_laddr; lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); dev->netdev_ops = &xtsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* * clear tally counter */ SONIC_WRITE(SONIC_CRCT,0xffff); SONIC_WRITE(SONIC_FAET,0xffff); SONIC_WRITE(SONIC_MPT,0xffff); return 0; out: release_region(dev->base_addr, SONIC_MEM_SIZE); return err; } /* * Probe for a SONIC ethernet controller on an XT2000 board. * Actually probing is superfluous but we're paranoid. */ int __devinit xtsonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; struct resource *resmem, *resirq; int err = 0; if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL) return -ENODEV; if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL) return -ENODEV; if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL) return -ENOMEM; lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); netdev_boot_setup_check(dev); dev->base_addr = resmem->start; dev->irq = resirq->start; if ((err = sonic_probe1(dev))) goto out; if ((err = register_netdev(dev))) goto out1; printk("%s: SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->name, dev->base_addr, dev->dev_addr, dev->irq); return 0; out1: release_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); return err; } MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); module_param(sonic_debug, int, 0); MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)"); #include "sonic.c" static int __devexit xtsonic_device_remove (struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local *lp = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); release_region (dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); return 0; } static struct platform_driver xtsonic_driver = { .probe = xtsonic_probe, .remove = __devexit_p(xtsonic_device_remove), .driver = { .name = xtsonic_string, }, }; module_platform_driver(xtsonic_driver);
gpl-2.0
jawad6233/android_kernel_bq_aquaris5
arch/m68k/platform/68360/ints.c
7340
4302
/* * linux/arch/$(ARCH)/platform/$(PLATFORM)/ints.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * Copyright (c) 2000 Michael Leslie <mleslie@lineo.com> * Copyright (c) 1996 Roman Zippel * Copyright (c) 1999 D. Jeff Dionne <jeff@uclinux.org> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/m68360.h> /* from quicc/commproc.c: */ extern QUICC *pquicc; extern void cpm_interrupt_init(void); #define INTERNAL_IRQS (96) /* assembler routines */ asmlinkage void system_call(void); asmlinkage void buserr(void); asmlinkage void trap(void); asmlinkage void bad_interrupt(void); asmlinkage void inthandler(void); static void intc_irq_unmask(struct irq_data *d) { pquicc->intr_cimr |= (1 << d->irq); } static void intc_irq_mask(struct irq_data *d) { pquicc->intr_cimr &= ~(1 << d->irq); } static void intc_irq_ack(struct irq_data *d) { pquicc->intr_cisr = (1 << d->irq); } static struct irq_chip intc_irq_chip = { .name = "M68K-INTC", .irq_mask = intc_irq_mask, .irq_unmask = intc_irq_unmask, .irq_ack = intc_irq_ack, }; /* * This function should be called during kernel startup to initialize * the vector table. */ void __init trap_init(void) { int vba = (CPM_VECTOR_BASE<<4); /* set up the vectors */ _ramvec[2] = buserr; _ramvec[3] = trap; _ramvec[4] = trap; _ramvec[5] = trap; _ramvec[6] = trap; _ramvec[7] = trap; _ramvec[8] = trap; _ramvec[9] = trap; _ramvec[10] = trap; _ramvec[11] = trap; _ramvec[12] = trap; _ramvec[13] = trap; _ramvec[14] = trap; _ramvec[15] = trap; _ramvec[32] = system_call; _ramvec[33] = trap; cpm_interrupt_init(); /* set up CICR for vector base address and irq level */ /* irl = 4, hp = 1f - see MC68360UM p 7-377 */ pquicc->intr_cicr = 0x00e49f00 | vba; /* CPM interrupt vectors: (p 7-376) */ _ramvec[vba+CPMVEC_ERROR] = bad_interrupt; /* Error */ _ramvec[vba+CPMVEC_PIO_PC11] = inthandler; /* pio - pc11 */ _ramvec[vba+CPMVEC_PIO_PC10] = inthandler; /* pio - pc10 */ _ramvec[vba+CPMVEC_SMC2] = inthandler; /* smc2/pip */ _ramvec[vba+CPMVEC_SMC1] = inthandler; /* smc1 */ _ramvec[vba+CPMVEC_SPI] = inthandler; /* spi */ _ramvec[vba+CPMVEC_PIO_PC9] = inthandler; /* pio - pc9 */ _ramvec[vba+CPMVEC_TIMER4] = inthandler; /* timer 4 */ _ramvec[vba+CPMVEC_RESERVED1] = inthandler; /* reserved */ _ramvec[vba+CPMVEC_PIO_PC8] = inthandler; /* pio - pc8 */ _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* timer table */ _ramvec[vba+CPMVEC_TIMER2] = inthandler; /* timer 2 */ _ramvec[vba+CPMVEC_RESERVED3] = inthandler; /* reserved */ _ramvec[vba+CPMVEC_IDMA2] = inthandler; /* idma 2 */ _ramvec[vba+CPMVEC_IDMA1] = inthandler; /* idma 1 */ _ramvec[vba+CPMVEC_SDMA_CB_ERR] = inthandler; /* sdma channel bus error */ _ramvec[vba+CPMVEC_PIO_PC3] = inthandler; /* pio - pc3 */ _ramvec[vba+CPMVEC_PIO_PC2] = inthandler; /* pio - pc2 */ /* _ramvec[vba+CPMVEC_TIMER1] = cpm_isr_timer1; */ /* timer 1 */ _ramvec[vba+CPMVEC_TIMER1] = inthandler; /* timer 1 */ _ramvec[vba+CPMVEC_PIO_PC1] = inthandler; /* pio - pc1 */ _ramvec[vba+CPMVEC_SCC4] = inthandler; /* scc 4 */ _ramvec[vba+CPMVEC_SCC3] = inthandler; /* scc 3 */ _ramvec[vba+CPMVEC_SCC2] = inthandler; /* scc 2 */ _ramvec[vba+CPMVEC_SCC1] = inthandler; /* scc 1 */ _ramvec[vba+CPMVEC_PIO_PC0] = inthandler; /* pio - pc0 */ /* turn off all CPM interrupts */ pquicc->intr_cimr = 0x00000000; } void init_IRQ(void) { int i; for (i = 0; (i < NR_IRQS); i++) { irq_set_chip(i, &intc_irq_chip); irq_set_handler(i, handle_level_irq); } }
gpl-2.0
maxwen/android_kernel_oppo_n1
arch/s390/kernel/ftrace.c
7852
5313
/* * Dynamic function tracer architecture backend. * * Copyright IBM Corp. 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/hardirq.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/kprobes.h> #include <trace/syscall.h> #include <asm/asm-offsets.h> #ifdef CONFIG_64BIT #define MCOUNT_OFFSET_RET 12 #else #define MCOUNT_OFFSET_RET 22 #endif #ifdef CONFIG_DYNAMIC_FTRACE void ftrace_disable_code(void); void ftrace_enable_insn(void); #ifdef CONFIG_64BIT /* * The 64-bit mcount code looks like this: * stg %r14,8(%r15) # offset 0 * > larl %r1,<&counter> # offset 6 * > brasl %r14,_mcount # offset 12 * lg %r14,8(%r15) # offset 18 * Total length is 24 bytes. The middle two instructions of the mcount * block get overwritten by ftrace_make_nop / ftrace_make_call. * The 64-bit enabled ftrace code block looks like this: * stg %r14,8(%r15) # offset 0 * > lg %r1,__LC_FTRACE_FUNC # offset 6 * > lgr %r0,%r0 # offset 12 * > basr %r14,%r1 # offset 16 * lg %r14,8(%15) # offset 18 * The return points of the mcount/ftrace function have the same offset 18. * The 64-bit disable ftrace code block looks like this: * stg %r14,8(%r15) # offset 0 * > jg .+18 # offset 6 * > lgr %r0,%r0 # offset 12 * > basr %r14,%r1 # offset 16 * lg %r14,8(%15) # offset 18 * The jg instruction branches to offset 24 to skip as many instructions * as possible. */ asm( " .align 4\n" "ftrace_disable_code:\n" " jg 0f\n" " lgr %r0,%r0\n" " basr %r14,%r1\n" "0:\n" " .align 4\n" "ftrace_enable_insn:\n" " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); #define FTRACE_INSN_SIZE 6 #else /* CONFIG_64BIT */ /* * The 31-bit mcount code looks like this: * st %r14,4(%r15) # offset 0 * > bras %r1,0f # offset 4 * > .long _mcount # offset 8 * > .long <&counter> # offset 12 * > 0: l %r14,0(%r1) # offset 16 * > l %r1,4(%r1) # offset 20 * basr %r14,%r14 # offset 24 * l %r14,4(%r15) # offset 26 * Total length is 30 bytes. The twenty bytes starting from offset 4 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call. * The 31-bit enabled ftrace code block looks like this: * st %r14,4(%r15) # offset 0 * > l %r14,__LC_FTRACE_FUNC # offset 4 * > j 0f # offset 8 * > .fill 12,1,0x07 # offset 12 * 0: basr %r14,%r14 # offset 24 * l %r14,4(%r14) # offset 26 * The return points of the mcount/ftrace function have the same offset 26. * The 31-bit disabled ftrace code block looks like this: * st %r14,4(%r15) # offset 0 * > j .+26 # offset 4 * > j 0f # offset 8 * > .fill 12,1,0x07 # offset 12 * 0: basr %r14,%r14 # offset 24 * l %r14,4(%r14) # offset 26 * The j instruction branches to offset 30 to skip as many instructions * as possible. */ asm( " .align 4\n" "ftrace_disable_code:\n" " j 1f\n" " j 0f\n" " .fill 12,1,0x07\n" "0: basr %r14,%r14\n" "1:\n" " .align 4\n" "ftrace_enable_insn:\n" " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n"); #define FTRACE_INSN_SIZE 4 #endif /* CONFIG_64BIT */ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, MCOUNT_INSN_SIZE)) return -EPERM; return 0; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, FTRACE_INSN_SIZE)) return -EPERM; return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { return 0; } int __init ftrace_dyn_arch_init(void *data) { *(unsigned long *) data = 0; return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addresses * in current thread info. */ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, unsigned long ip) { struct ftrace_graph_ent trace; if (unlikely(atomic_read(&current->tracing_graph_pause))) goto out; if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; /* Only trace if the calling function expects to. */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; goto out; } parent = (unsigned long) return_to_handler; out: return parent; } #ifdef CONFIG_DYNAMIC_FTRACE /* * Patch the kernel code at ftrace_graph_caller location. The instruction * there is branch relative and save to prepare_ftrace_return. To disable * the call to prepare_ftrace_return we patch the bras offset to point * directly after the instructions. To enable the call we calculate * the original offset to prepare_ftrace_return and put it back. */ int ftrace_enable_ftrace_graph_caller(void) { unsigned short offset; offset = ((void *) prepare_ftrace_return - (void *) ftrace_graph_caller) / 2; return probe_kernel_write(ftrace_graph_caller + 2, &offset, sizeof(offset)); } int ftrace_disable_ftrace_graph_caller(void) { static unsigned short offset = 0x0002; return probe_kernel_write(ftrace_graph_caller + 2, &offset, sizeof(offset)); } #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
gpl-2.0
slz/delidded-kernel-5.1.1-g900t-note3
drivers/scsi/sym53c8xx_2/sym_hipd.c
8620
147387
/* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <wolf@cologne.de> * Stefan Esser <se@mi.Uni-Koeln.de> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> * *----------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <asm/param.h> /* for timeouts in units of HZ */ #include "sym_glue.h" #include "sym_nvram.h" #if 0 #define SYM_DEBUG_GENERIC_SUPPORT #endif /* * Needed function prototypes. */ static void sym_int_ma (struct sym_hcb *np); static void sym_int_sir(struct sym_hcb *); static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp); static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp); static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp); /* * Print a buffer in hexadecimal format with a ".\n" at end. */ static void sym_printl_hex(u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); printf (".\n"); } static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) { sym_print_addr(cp->cmd, "%s: ", label); spi_print_msg(msg); printf("\n"); } static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) { struct sym_tcb *tp = &np->target[target]; dev_info(&tp->starget->dev, "%s: ", label); spi_print_msg(msg); printf("\n"); } /* * Print something that tells about extended errors. */ void sym_print_xerr(struct scsi_cmnd *cmd, int x_status) { if (x_status & XE_PARITY_ERR) { sym_print_addr(cmd, "unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { sym_print_addr(cmd, "extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { sym_print_addr(cmd, "illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n"); } } /* * Return a string for SCSI BUS mode. */ static char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (struct sym_hcb *np) { OUTB(np, nc_istat, SRST); INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); INB(np, nc_mbox1); udelay(2000); /* For BUS MODE to settle */ } /* * Really soft reset the chip.:) * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (struct sym_hcb *np) { u_char istat = 0; int i; if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN)) goto do_chip_reset; OUTB(np, nc_istat, CABRT); for (i = 100000 ; i ; --i) { istat = INB(np, nc_istat); if (istat & SIP) { INW(np, nc_sist); } else if (istat & DIP) { if (INB(np, nc_dstat) & ABRT) break; } udelay(5); } OUTB(np, nc_istat, 0); if (!i) printf("%s: unable to abort current chip operation, " "ISTAT=0x%02x.\n", sym_name(np), istat); do_chip_reset: sym_chip_reset(np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(struct sym_hcb *np) { sym_reset_scsi_bus(np, 1); } int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW(np, nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB(np, nc_stest3, TE); OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM)); OUTB(np, nc_scntl1, CRST); INB(np, nc_mbox1); udelay(200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(np, nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!np->maxwide) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB(np, nc_scntl1, 0); return retv; } /* * Select SCSI clock frequency */ static void sym_selectclock(struct sym_hcb *np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(np, nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 50 micro-seconds (at least). */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0) udelay(20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else { INB(np, nc_mbox1); udelay(50+10); } OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */ OUTB(np, nc_scntl3, scntl3); OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */ } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (struct sym_hcb *np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW(np, nc_sien, 0); /* mask all scsi interrupts */ INW(np, nc_sist); /* clear pending scsi interrupt */ OUTB(np, nc_dien, 0); /* mask all dma interrupts */ INW(np, nc_sist); /* another one, just to be sure :) */ /* * The C1010-33 core does not report GEN in SIST, * if this interrupt is masked in SIEN. * I don't know yet if the C1010-66 behaves the same way. */ if (np->features & FE_C10) { OUTW(np, nc_sien, GEN); OUTB(np, nc_istat1, SIRQD); } OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ while (!(INW(np, nc_sist) & GEN) && ms++ < 100000) udelay(1000/4); /* count in 1/4 of ms */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ /* * Undo C1010-33 specific settings. */ if (np->features & FE_C10) { OUTW(np, nc_sien, 0); OUTB(np, nc_istat1, 0); } /* * set prescaler to divide by whatever 0 means * 0 ought to choose divide by 2, but appears * to set divide by 3.5 mode in my 53c810 ... */ OUTB(np, nc_scntl3, 0); /* * adjust for prescaler, and convert into KHz */ f = ms ? ((1 << gen) * (4340*4)) / ms : 0; /* * The C1010-33 result is biased by a factor * of 2/3 compared to earlier chips. */ if (np->features & FE_C10) f = (f * 2) / 3; if (sym_verbose >= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms/4, f); return f; } static unsigned sym_getfreq (struct sym_hcb *np) { u_int f1, f2; int gen = 8; getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (struct sym_hcb *np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (struct sym_hcb *np) { int f = 0; /* * For now, we only need to know about the actual * PCI BUS clock frequency for C1010-66 chips. */ #if 1 if (np->features & FE_66MHZ) { #else if (1) { #endif OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = sym_getfreq(np); OUTB(np, nc_stest1, 0); } np->pciclk_khz = f; return f; } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (div-- > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak > 2) { fak = 2; ret = -1; } /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static inline void sym_init_burst(struct sym_hcb *np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (struct sym_hcb *np) { np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a; np->sv_scntl3 = INB(np, nc_scntl3) & 0x07; np->sv_dmode = INB(np, nc_dmode) & 0xce; np->sv_dcntl = INB(np, nc_dcntl) & 0xa8; np->sv_ctest3 = INB(np, nc_ctest3) & 0x01; np->sv_ctest4 = INB(np, nc_ctest4) & 0x80; np->sv_gpcntl = INB(np, nc_gpcntl); np->sv_stest1 = INB(np, nc_stest1); np->sv_stest2 = INB(np, nc_stest2) & 0x20; np->sv_stest4 = INB(np, nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(np, nc_scntl4); np->sv_ctest5 = INB(np, nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(np, nc_ctest5) & 0x24; } /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the current BUS mode * through the STEST4 IO register. * - For previous generation chips (825/825A/875), the user has to tell us * how to check against HVD, since a 100% safe algorithm is not possible. */ static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram) { if (np->scsi_mode) return; np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(np, nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; } /* * Prepare io register values used by sym_start_up() * according to selected and supported features. */ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; u_char burst_max; u32 period; int i; np->maxwide = (np->features & FE_WIDE) ? 1 : 0; /* * Guess the frequency of the chip's clock. */ if (np->features & (FE_ULTRA3 | FE_ULTRA2)) np->clock_khz = 160000; else if (np->features & FE_ULTRA) np->clock_khz = 80000; else np->clock_khz = 40000; /* * Get the clock multiplier factor. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* * Measure SCSI clock frequency for chips * it may vary from assumed one. */ if (np->features & FE_VARCLK) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = (period + 40 - 1) / 40; /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = nvram->type ? 62 : 31; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) { if (!use_dac(np)) np->rv_ccntl1 |= (DDAC); else if (SYM_CONF_DMA_ADDRESSING_MODE == 1) np->rv_ccntl1 |= (XTIMOD | EXTIBMV); else if (SYM_CONF_DMA_ADDRESSING_MODE == 2) np->rv_ccntl1 |= (0 | EXTIBMV); } /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 0x1) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 && pdev->revision >= 0x10 && pdev->revision <= 0x11) || (pdev->device == PCI_DEVICE_ID_NCR_53C860 && pdev->revision <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ np->rv_ctest4 |= MPEE; /* Master parity checking */ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; np->scsi_mode = 0; sym_nvram_setup_host(shost, np, nvram); /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(np, nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); sym_set_bus_mode(np, nvram); /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && pdev->device == PCI_DEVICE_ID_NCR_53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { struct sym_tcb *tp = &np->target[i]; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; tp->usr_width = np->maxwide; tp->usr_period = 9; sym_nvram_setup_target(tp, i, nvram); if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np), sym_nvram_type(nvram), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose >= 2) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } return 0; } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifdef CONFIG_SCSI_SYM53C8XX_MMIO static int sym_regtest(struct sym_hcb *np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL(np, nc_dstat, data); data = INL(np, nc_dstat); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return 0x10; } return 0; } #else static inline int sym_regtest(struct sym_hcb *np) { return 0; } #endif static int sym_snooptest(struct sym_hcb *np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err; err = sym_regtest(np); if (err) return err; restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTZ_BA(np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->scratch = cpu_to_scr(host_wr); OUTL(np, nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, pc); /* * Wait 'til done (with timeout) */ for (i=0; i<SYM_SNOOP_TIMEOUT; i++) if (INB(np, nc_istat) & (INTF|SIP|DIP)) break; if (i>=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* * Check for fatal DMA errors. */ dstat = INB(np, nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL(np, nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->scratch); sym_rd = INL(np, nc_scratcha); sym_bk = INL(np, nc_temp); /* * Check termination position. */ if (pc != SCRIPTZ_BA(np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc, (u_long) SCRIPTZ_BA(np, snoopend) +8); return (0x40); } /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; } if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; } if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; } return err; } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sx: sxfer (see the manual) * s3: scntl3 (see the manual) * s4: scntl4 (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat) { struct sym_hcb *np = sym_get_hcb(shost); u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL(np, nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; script_base = NULL; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n", sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist, (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl), (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer), (unsigned)INB(np, nc_scntl3), (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0, script_name, script_ofs, (unsigned)INL(np, nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf("%s: regdump:", sym_name(np)); for (i = 0; i < 24; i++) printf(" %02x", (unsigned)INB_OFF(np, i)); printf(".\n"); /* * PCI BUS error. */ if (dstat & (MDPE|BF)) sym_log_bus_error(shost); } void sym_dump_registers(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_short sist; u_char dstat; sist = INW(np, nc_sist); dstat = INB(np, nc_dstat); sym_log_hard_error(shost, sist, dstat); } static struct sym_chip sym_dev_table[] = { {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; #define sym_num_devs (ARRAY_SIZE(sym_dev_table)) /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ struct sym_chip * sym_lookup_chip_table (u_short device_id, u_char revision) { struct sym_chip *chip; int i; for (i = 0; i < sym_num_devs; i++) { chip = &sym_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return NULL; } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Lookup the 64 bit DMA segments map. * This is only used if the direct mapping * has been unsuccessful. */ int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) { int i; if (!use_dac(np)) goto weird; /* Look up existing mappings */ for (i = SYM_DMAP_SIZE-1; i > 0; i--) { if (h == np->dmap_bah[i]) return i; } /* If direct mapping is free, get it */ if (!np->dmap_bah[s]) goto new; /* Collision -> lookup free mappings */ for (s = SYM_DMAP_SIZE-1; s > 0; s--) { if (!np->dmap_bah[s]) goto new; } weird: panic("sym: ran out of 64 bit DMA segment registers"); return -1; new: np->dmap_bah[s] = h; np->dmap_dirty = 1; return s; } /* * Update IO registers scratch C..R so they will be * in sync. with queued CCB expectations. */ static void sym_update_dmap_regs(struct sym_hcb *np) { int o, i; if (!np->dmap_dirty) return; o = offsetof(struct sym_reg, nc_scrx[0]); for (i = 0; i < SYM_DMAP_SIZE; i++) { OUTL_OFF(np, o, np->dmap_bah[i]); o += 4; } np->dmap_dirty = 0; } #endif /* Enforce all the fiddly SPI rules and the chip limitations */ static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, struct sym_trans *goal) { if (!spi_support_wide(starget)) goal->width = 0; if (!spi_support_sync(starget)) { goal->iu = 0; goal->dt = 0; goal->qas = 0; goal->offset = 0; return; } if (spi_support_dt(starget)) { if (spi_support_dt_only(starget)) goal->dt = 1; if (goal->offset == 0) goal->dt = 0; } else { goal->dt = 0; } /* Some targets fail to properly negotiate DT in SE mode */ if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN)) goal->dt = 0; if (goal->dt) { /* all DT transfers must be wide */ goal->width = 1; if (goal->offset > np->maxoffs_dt) goal->offset = np->maxoffs_dt; if (goal->period < np->minsync_dt) goal->period = np->minsync_dt; if (goal->period > np->maxsync_dt) goal->period = np->maxsync_dt; } else { goal->iu = goal->qas = 0; if (goal->offset > np->maxoffs) goal->offset = np->maxoffs; if (goal->period < np->minsync) goal->period = np->minsync; if (goal->period > np->maxsync) goal->period = np->maxsync; } } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) { struct sym_tcb *tp = &np->target[cp->target]; struct scsi_target *starget = tp->starget; struct sym_trans *goal = &tp->tgoal; int msglen = 0; int nego; sym_check_goals(np, starget, goal); /* * Many devices implement PPR in a buggy way, so only use it if we * really want to. */ if (goal->renego == NS_PPR || (goal->offset && (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) { nego = NS_PPR; } else if (goal->renego == NS_WIDE || goal->width) { nego = NS_WIDE; } else if (goal->renego == NS_SYNC || goal->offset) { nego = NS_SYNC; } else { goal->check_nego = 0; nego = 0; } switch (nego) { case NS_SYNC: msglen += spi_populate_sync_msg(msgptr + msglen, goal->period, goal->offset); break; case NS_WIDE: msglen += spi_populate_width_msg(msgptr + msglen, goal->width); break; case NS_PPR: msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period, goal->offset, goal->width, (goal->iu ? PPR_OPT_IU : 0) | (goal->dt ? PPR_OPT_DT : 0) | (goal->qas ? PPR_OPT_QAS : 0)); break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); } } return msglen; } /* * Insert a job into the start queue. */ void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Make SCRIPTS aware of the 64 bit DMA * segment registers not being up-to-date. */ if (np->dmap_dirty) cp->host_xflags |= HX_DMAP_DIRTY; #endif /* * Insert first the idle task and then our job. * The MBs should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_WRITE_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n", np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_WRITE_BARRIER(); OUTB(np, nc_istat, SIGP|np->istat_sem); } #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Start next ready-to-start CCBs. */ void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn) { SYM_QUEHEAD *qp; struct sym_ccb *cp; /* * Paranoia, as usual. :-) */ assert(!lp->started_tags || !lp->started_no_tag); /* * Try to start as many commands as asked by caller. * Prevent from having both tagged and untagged * commands queued to the device at the same time. */ while (maxn--) { qp = sym_remque_head(&lp->waiting_ccbq); if (!qp) break; cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); if (cp->tag != NO_TAG) { if (lp->started_no_tag || lp->started_tags >= lp->started_max) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); ++lp->started_tags; } else { if (lp->started_no_tag || lp->started_tags) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); ++lp->started_no_tag; } cp->started = 1; sym_insque_tail(qp, &lp->started_ccbq); sym_put_start_queue(np, cp); } } #endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */ /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On paper, memory read barriers may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. */ static int sym_wakeup_done (struct sym_hcb *np) { struct sym_ccb *cp; int i, n; u32 dsa; n = 0; i = np->dqueueget; /* MEMORY_READ_BARRIER(); */ while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_READ_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status) { SYM_QUEHEAD *qp; struct sym_ccb *cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; cmd = cp->cmd; if (cam_status) sym_set_cam_status(cmd, cam_status); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; continue; } #endif sym_free_ccb(np, cp); sym_xpt_done(np, cmd); } } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ void sym_start_up(struct Scsi_Host *shost, int reason) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; int i; u32 phys; /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB(np, nc_stest3, TE|CSF); OUTONB(np, nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(shost); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, DID_RESET); /* * Init chip. */ OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ INB(np, nc_mbox1); udelay(2000); /* The 895 needs time for the bus mode to settle */ OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */ OUTB(np, nc_istat , SIGP ); /* Signal Process */ OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB(np, nc_stest2, np->rv_stest2); else OUTB(np, nc_stest2, EXT|np->rv_stest2); OUTB(np, nc_stest3, TE); /* TolerANT enable */ OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66) OUTB(np, nc_aipcntl1, DISAIP); /* * C10101 rev. 0 errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 1) OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (pdev->device == PCI_DEVICE_ID_NCR_53C875) OUTB(np, nc_ctest0, (1<<5)); else if (pdev->device == PCI_DEVICE_ID_NCR_53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB(np, nc_ccntl0, np->rv_ccntl0); OUTB(np, nc_ccntl1, np->rv_ccntl1); } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Set up scratch C and DRS IO registers to map the 32 bit * DMA address range our data structures are located in. */ if (use_dac(np)) { np->dmap_bah[0] = 0; /* ??? */ OUTL(np, nc_scrx[0], np->dmap_bah[0]); OUTL(np, nc_drs, np->dmap_bah[0]); } #endif /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle)); OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW(np, nc_sien, SBMC); if (reason == 0) { INB(np, nc_mbox1); mdelay(100); INW(np, nc_sist); } np->scsi_mode = INB(np, nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;i<SYM_CONF_MAX_TARGET;i++) { struct sym_tcb *tp = &np->target[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; if (tp->lun0p) tp->lun0p->to_clear = 0; if (tp->lunmp) { int ln; for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) if (tp->lunmp[ln]) tp->lunmp[ln]->to_clear = 0; } } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. * We do the download preferently from the CPU. * For platforms that may not support PCI memory mapping, * we use simple SCRIPTS that performs MEMORY MOVEs. */ phys = SCRIPTA_BA(np, init); if (np->ram_ba) { if (sym_verbose >= 2) printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); if (np->features & FE_RAM8K) { memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); phys = scr_to_cpu(np->scr_ram_seg); OUTL(np, nc_mmws, phys); OUTL(np, nc_mmrs, phys); OUTL(np, nc_sfs, phys); phys = SCRIPTB_BA(np, start64); } } np->istat_sem = 0; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) sym_xpt_async_bus_reset(np); } /* * Switch trans mode for current job and its target. */ static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; u_char sval, wval, uval; struct sym_tcb *tp = &np->target[target]; assert(target == (INB(np, nc_sdid) & 0x0f)); sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (opts) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB(np, nc_stest2, EXT); /* * set actual value and sync_status */ OUTB(np, nc_sxfer, tp->head.sval); OUTB(np, nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB(np, nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } static void sym_announce_transfer_rate(struct sym_tcb *tp) { struct scsi_target *starget = tp->starget; if (tp->tprint.period != spi_period(starget) || tp->tprint.offset != spi_offset(starget) || tp->tprint.width != spi_width(starget) || tp->tprint.iu != spi_iu(starget) || tp->tprint.dt != spi_dt(starget) || tp->tprint.qas != spi_qas(starget) || !tp->tprint.check_nego) { tp->tprint.period = spi_period(starget); tp->tprint.offset = spi_offset(starget); tp->tprint.width = spi_width(starget); tp->tprint.iu = spi_iu(starget); tp->tprint.dt = spi_dt(starget); tp->tprint.qas = spi_qas(starget); tp->tprint.check_nego = 1; spi_display_xfer_agreement(starget); } } /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(struct sym_hcb *np, int target, u_char wide) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, 0, 0, 0, wide, 0, 0); if (wide) tp->tgoal.renego = NS_WIDE; else tp->tgoal.renego = 0; tp->tgoal.check_nego = 0; tp->tgoal.width = wide; spi_offset(starget) = 0; spi_period(starget) = 0; spi_width(starget) = wide; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; if (sym_verbose >= 3) sym_announce_transfer_rate(tp); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(struct sym_hcb *np, int target, u_char ofs, u_char per, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; sym_settrans(np, target, 0, ofs, per, wide, div, fak); if (wide) tp->tgoal.renego = NS_WIDE; else if (ofs) tp->tgoal.renego = NS_SYNC; else tp->tgoal.renego = 0; spi_period(starget) = per; spi_offset(starget) = ofs; spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0; if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.check_nego = 0; } sym_announce_transfer_rate(tp); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, opts, ofs, per, wide, div, fak); if (wide || ofs) tp->tgoal.renego = NS_PPR; else tp->tgoal.renego = 0; spi_width(starget) = tp->tgoal.width = wide; spi_period(starget) = tp->tgoal.period = per; spi_offset(starget) = tp->tgoal.offset = ofs; spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU); spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT); spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS); tp->tgoal.check_nego = 0; sym_announce_transfer_rate(tp); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts) { u32 dsp = INL(np, nc_dsp); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical pathes, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA(np, getjob_begin) && dsp < SCRIPTA_BA(np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA(np, ungetjob) && dsp < SCRIPTA_BA(np, reselect) + 1)) && (!(dsp > SCRIPTB_BA(np, sel_for_abort) && dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA(np, done) && dsp < SCRIPTA_BA(np, done_end) + 1))) { OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP(np, SCRIPTA_BA(np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL(np, nc_dsa, 0xffffff); OUTL_DSP(np, SCRIPTA_BA(np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (struct sym_hcb *np) { u32 dsp = INL(np, nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (struct sym_hcb *np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_char scsi_mode = INB(np, nc_stest4) & SMODE; /* * Notify user. */ printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np), sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_start_up(shost, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (struct sym_hcb *np, u_short sist) { u_char hsts = INB(np, HS_PRT); u32 dsp = INL(np, nc_dsp); u32 dbc = INL(np, nc_dbc); u32 dsa = INL(np, nc_dsa); u_char sbcl = INB(np, nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); if (printk_ratelimit()) printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB(np, nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA(np, pm_handle)) OUTL_DSP(np, dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { sym_set_script_dp (np, cp, dsp); OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); } } else if (phase == 7) /* We definitely cannot handle parity errors */ #if 1 /* in message-in phase due to the relection */ goto reset_all; /* path and various message anticipations. */ #else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); #endif else OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); return; reset_all: sym_start_reset(np); return; } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (struct sym_hcb *np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; struct sym_ccb *cp; dsp = INL(np, nc_dsp); dbc = INL(np, nc_dbc); dsa = INL(np, nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW(np, nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(np, nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB(np, nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB(np, nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; } /* * Clear fifos. */ OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ vdsp = NULL; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { sym_print_addr(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", cmd, scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* * if old phase not dataphase, leave here. */ if (cmd & 2) { sym_print_addr(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB(np, HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB(np, HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA(np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB(np, nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA(np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ sym_set_script_dp (np, cp, newcmd); OUTL_DSP(np, nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA(np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = IDENTIFY(0, cp->lun); nxtdsp = SCRIPTB_BA(np, ident_break_atn); } else nxtdsp = SCRIPTB_BA(np, ident_break); } else if (dsp == SCRIPTB_BA(np, send_wdtr) || dsp == SCRIPTB_BA(np, send_sdtr) || dsp == SCRIPTB_BA(np, send_ppr)) { nxtdsp = SCRIPTB_BA(np, nego_bad_phase); if (dsp == SCRIPTB_BA(np, send_ppr)) { struct scsi_device *dev = cp->cmd->device; dev->ppr = 0; } } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA(np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP(np, nxtdsp); return; } reset_all: sym_start_reset(np); } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When an parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ irqreturn_t sym_interrupt(struct Scsi_Host *shost) { struct sym_data *sym_data = shost_priv(shost); struct sym_hcb *np = sym_data->ncb; struct pci_dev *pdev = sym_data->pdev; u_char istat, istatc; u_char dstat; u_short sist; /* * interrupt on the fly ? * (SCRIPTS may still be running) * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * and that posted writes are flushed to memory * before the scanning of the DONE queue. * Note that SCRIPTS also (dummy) read to memory * prior to deliver the INTF interrupt condition. */ istat = INB(np, nc_istat); if (istat & INTF) { OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat |= INB(np, nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); sym_wakeup_done(np); } if (!(istat & (SIP|DIP))) return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB(np, nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW(np, nc_sist); if (istatc & DIP) dstat |= INB(np, nc_dstat); istatc = INB(np, nc_istat); istat |= istatc; /* Prevent deadlock waiting on a condition that may * never clear. */ if (unlikely(sist == 0xffff && dstat == 0xff)) { if (pci_channel_offline(pdev)) return IRQ_NONE; } } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(np, nc_scr0), dstat,sist, (unsigned)INL(np, nc_dsp), (unsigned)INL(np, nc_dbc)); /* * On paper, a memory read barrier may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. * And since we are paranoid ... :) */ MEMORY_READ_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir(np); else if (dstat & SSI) OUTONB_STD(); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { printf("%s: SCSI BUS reset detected.\n", sym_name(np)); sym_start_up(shost, 1); return IRQ_HANDLED; } OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc(shost); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(shost, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return IRQ_HANDLED; } unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); return IRQ_NONE; } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with DID_SOFT_ERROR status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) { int j; struct sym_ccb *cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp) { u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int i; /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = 0; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { sym_print_addr(cp->cmd, "%s\n", s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP(np, SCRIPTA_BA(np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun); msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = CCB_BA(cp, sensecmd); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = REQUEST_SENSE; cp->sensecmd[1] = 0; if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7) cp->sensecmd[1] = cp->lun << 5; cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = CCB_BA(cp, sns_bbuf); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA(np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->goalp = cpu_to_scr(startp + 16); cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; struct sym_ccb *cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); cmd = cp->cmd; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(cmd) != DID_TIME_OUT) sym_set_cam_status(cmd, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(struct sym_hcb *np, int num) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */ struct scsi_target *starget; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB(np, nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR); #else sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); #endif /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); else sym_set_cam_status(cp->cmd, DID_ABORT); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { struct sym_lcb *lp = sym_lp(tp, lun); lp->to_clear = 0; /* We don't expect to fail here */ np->abrt_msg[0] = IDENTIFY(0, lun); np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = IDENTIFY(0, cp->lun); /* * If we want to abort an untagged command, we * will send a IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * a IDENTITFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; starget = tp->starget; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; spi_period(starget) = 0; spi_offset(starget) = 0; spi_width(starget) = 0; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; tp->tgoal.check_nego = 1; tp->tgoal.renego = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancelation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; sym_dequeue_from_squeue(np, i, target, lun, -1); sym_clear_tasks(np, DID_ABORT, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make upper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) starget_printk(KERN_NOTICE, starget, "has been reset\n"); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { dev_info(&tp->starget->dev, "control msgout:"); sym_printl_hex(np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD(); } /* * Gerard's alchemy:) that deals with with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lines for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA(np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA(np, pm1_data)) pm = &cp->phys.pm1; else pm = NULL; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = sym_get_script_dp (np, cp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB(np, HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB(np, HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: sym_set_script_dp (np, cp, dp_scr); OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) { int dp_sg, dp_sgmin, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len - cp->odd_byte_adjustment; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ dp_sgmin = SYM_CONF_MAX_SG - cp->segments; resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } resid -= cp->odd_byte_adjustment; /* * Hopefully, the result is not too wrong. */ return resid; } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static int sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, ofs, per, fak, div; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgin", np->msgin); } /* * Get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * Check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setsync (np, target, ofs, per, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_sync_msg(np->msgout, per, ofs); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setsync (np, target, 0, 0, 0, 0); return -1; } static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_sync_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_SYNC; OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static int sym_ppr_nego_check(struct sym_hcb *np, int req, int target) { struct sym_tcb *tp = &np->target[target]; unsigned char fak, div; int dt, chg = 0; unsigned char per = np->msgin[3]; unsigned char ofs = np->msgin[5]; unsigned char wide = np->msgin[6]; unsigned char opts = np->msgin[7] & PPR_OPT_MASK; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgin", np->msgin); } /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (!wide || !(np->features & FE_U3EN)) opts = 0; if (opts != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; dt = opts & PPR_OPT_DT; if (ofs) { unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs; if (ofs > maxoffs) { chg = 1; ofs = maxoffs; } } if (ofs) { unsigned char minsync = dt ? np->minsync_dt : np->minsync; if (per < minsync) { chg = 1; per = minsync; } } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setpprot(np, target, opts, ofs, per, wide, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setpprot (np, target, 0, 0, 0, 0, 0, 0); /* * If it is a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !opts) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.width = wide; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } return -1; } static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_ppr_nego_check(np, req, cp->target); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_PPR; OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static int sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, wide; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgin", np->msgin); } /* * Get requested values. */ chg = 0; wide = np->msgin[3]; /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n", wide, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setwide (np, target, wide); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_width_msg(np->msgout, wide); np->msgin [0] = M_NOOP; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgout", np->msgout); } return 0; reject_it: return -1; } static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_wide_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_WIDE; OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp)); } else { /* Was a response. */ /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tgoal.offset) { spi_populate_sync_msg(np->msgout, tp->tgoal.period, tp->tgoal.offset); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB(np, HS_PRT, HS_NEGOTIATE); OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); return; } else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); } return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * Reset DT, SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * A target that understands a PPR message should never * reject it, and messing with it is very unlikely. * So, if a PPR makes problems, we may just want to * try a legacy negotiation later. */ static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); #else if (tp->tgoal.period < np->minsync) tp->tgoal.period = np->minsync; if (tp->tgoal.offset > np->maxoffs) tp->tgoal.offset = np->maxoffs; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; #endif break; case NS_SYNC: sym_setsync (np, cp->target, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp->target, 0); break; } np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * PPR, WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { sym_nego_default(np, tp, cp); OUTB(np, HS_PRT, HS_BUSY); } /* * chip exception handler for programmed interrupts. */ static void sym_int_sir(struct sym_hcb *np) { u_char num = INB(np, nc_dsps); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); u_char target = INB(np, nc_sdid) & 0x0f; struct sym_tcb *tp = &np->target[target]; int tmp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * SCRIPTS tell us that we may have to update * 64 bit DMA segment registers. */ case SIR_DMAP_DIRTY: sym_update_dmap_regs(np); goto out; #endif /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We do not want to handle that. */ case SIR_SEL_ATN_NO_MSG_OUT: scmd_printk(KERN_WARNING, cp->cmd, "No MSG OUT phase after selection with ATN\n"); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reselected the initiator. */ case SIR_RESEL_NO_MSG_IN: scmd_printk(KERN_WARNING, cp->cmd, "No MSG IN phase after reselection\n"); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: scmd_printk(KERN_WARNING, cp->cmd, "No IDENTIFY after reselection\n"); goto out_stuck; /* * The device reselected a LUN we do not know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we do not have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; scmd_printk(KERN_WARNING, cp->cmd, "message %x sent on bad reselection\n", np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB(np, HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, num, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to tranfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL(np, nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "extended msg ", np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "1 or 2 byte ", np->msgin); if (cp->host_flags & HF_SENSE) OUTL_DSP(np, SCRIPTA_BA(np, clrack)); else sym_modify_dp(np, tp, cp, -1); return; case M_REJECT: if (INB(np, HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { sym_print_addr(cp->cmd, "M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; break; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP(np, SCRIPTB_BA(np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB(np, HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; } out: OUTONB_STD(); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); return; out_clrack: OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order) { u_char tn = cmd->device->id; u_char ln = cmd->device->lun; struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; struct sym_ccb *cp = NULL; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) sym_alloc_ccb(np); qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0) goto out_free; #endif /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; ++lp->busy_itlq; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); #endif #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING cp->tags_si = lp->tags_si; ++lp->tags_sum[cp->tags_si]; ++lp->tags_since; #endif } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0 || lp->busy_itlq != 0) goto out_free; #endif /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ ++lp->busy_itl; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); } else goto out_free; #endif } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); } #endif cp->to_abort = 0; cp->odd_byte_adjustment = 0; cp->tag = tag; cp->order = tag_order; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return NULL; } /* * Release one control block */ void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING --lp->tags_sum[cp->tags_si]; #endif /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); } /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = 0; #endif /* * Make this CCB available. */ cp->cmd = NULL; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; #endif } /* * Allocate a CCB from memory and initialize its fixed part. */ static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np) { struct sym_ccb *cp = NULL; int hcode; /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return NULL; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) goto out_free; /* * Count it. */ np->actccbs++; /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialyze the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); /* * Chain into optionnal lists. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq); #endif return cp; out_free: if (cp) sym_mfree_dma(cp, sizeof(*cp), "CCB"); return NULL; } /* * Look up a CCB from a DSA value. */ static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa) { int hcode; struct sym_ccb *cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Target control block initialisation. * Nothing important to do at the moment. */ static void sym_init_tcb (struct sym_hcb *np, u_char tn) { #if 0 /* Hmmm... this checking looks paranoid. */ /* * Check some alignments required by the chip. */ assert (((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); assert (((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); #endif } /* * Lun control block allocation and initialization. */ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = NULL; /* * Initialize the target control block if not yet. */ sym_init_tcb (np, tn); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { int i; tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; for (i = 0 ; i < 64 ; i++) tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), GFP_ATOMIC); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } tp->nlcb++; /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Initialize device queueing. */ sym_que_init(&lp->waiting_ccbq); sym_que_init(&lp->started_ccbq); lp->started_max = SYM_CONF_MAX_TASK; lp->started_limit = SYM_CONF_MAX_TASK; #endif fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); int i; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) goto fail; lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = NULL; goto fail; } /* * Initialize the task table with invalid entries. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); return; fail: return; } /* * Lun control block deallocation. Returns the number of valid remaining LCBs * for the target. */ int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); tp->nlcb--; if (ln) { if (!tp->nlcb) { kfree(tp->lunmp); sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); tp->lunmp = NULL; tp->luntbl = NULL; tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); } else { tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->lunmp[ln] = NULL; } } else { tp->lun0p = NULL; tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } if (lp->itlq_tbl) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); kfree(lp->cb_tags); } sym_mfree_dma(lp, sizeof(*lp), "LCB"); return tp->nlcb; } /* * Queue a SCSI IO to the controller. */ int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { struct scsi_device *sdev = cmd->device; struct sym_tcb *tp; struct sym_lcb *lp; u_char *msgptr; u_int msglen; int can_disconnect; /* * Keep track of the IO in our CCB. */ cp->cmd = cmd; /* * Retrieve the target descriptor. */ tp = &np->target[cp->target]; /* * Retrieve the lun descriptor. */ lp = sym_lp(tp, sdev->lun); can_disconnect = (cp->tag != NO_TAG) || (lp && (lp->curr_flags & SYM_DISC_ENABLED)); msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun); /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = cp->order; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING /* * Avoid too much reordering of SCSI commands. * The algorithm tries to prevent completion of any * tagged command from being delayed against more * than 3 times the max number of queued commands. */ if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) { lp->tags_si = !(lp->tags_si); if (lp->tags_sum[lp->tags_si]) { order = M_ORDERED_TAG; if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) { sym_print_addr(cmd, "ordered tag forced.\n"); } } lp->tags_since = 0; } #endif msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) * * Always negotiate on INQUIRY and REQUEST SENSE. * */ cp->nego_status = 0; if ((tp->tgoal.check_nego || cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) && !tp->nego_cp && lp) { msglen += sym_prepare_nego(np, cp, msgptr + msglen); } /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg); cp->phys.smsg.size = cpu_to_scr(msglen); /* * status */ cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the CDB and DATA descriptor block * and start the IO. */ return sym_setup_data_and_start(np, cmd, cp); } /* * Reset a SCSI target (all LUNs of this target). */ int sym_reset_scsi_target(struct sym_hcb *np, int target) { struct sym_tcb *tp; if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET) return -1; tp = &np->target[target]; tp->to_reset = 1; np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } /* * Abort a SCSI IO. */ static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out) { /* * Check that the IO is active. */ if (!cp || !cp->host_status || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out) { struct sym_ccb *cp; SYM_QUEHEAD *qp; /* * Look up our CCB control block. */ cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cmd == cmd) { cp = cp2; break; } } return sym_abort_ccb(np, cp, timed_out); } /* * Complete execution of a SCSI command with extended * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp) { struct scsi_device *sdev; struct scsi_cmnd *cmd; struct sym_tcb *tp; struct sym_lcb *lp; int resid; int i; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; cmd = cp->cmd; sdev = cmd->device; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp, cp->host_status, cp->ssss_status, cp->host_flags); } /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, sdev->lun); /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cmd, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ resid = sym_compute_residual(np, cp); if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */ resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP(np, SCRIPTA_BA(np, start)); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_QUEUE_FULL) { if (!lp || lp->started_tags - i < 2) goto weirdness; /* * Decrease queue depth as needed. */ lp->started_max = lp->started_tags - i - 1; lp->num_sgood = 0; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } /* * Repair the CCB. */ cp->host_status = HS_BUSY; cp->ssss_status = S_ILLEGAL; /* * Let's requeue it to device. */ sym_set_cam_status(cmd, DID_SOFT_ERROR); goto finish; } weirdness: #endif /* * Build result in CAM ccb. */ sym_set_cam_result_error(np, cp, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING finish: #endif /* * Add this one to the COMP queue. */ sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); /* * Complete all those commands with either error * or requeue condition. */ sym_flush_comp_queue(np, 0); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Donnot start more than 1 command after an error. */ sym_start_next_ccbs(np, lp, 1); #endif } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) { struct sym_tcb *tp; struct sym_lcb *lp; struct scsi_cmnd *cmd; int resid; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; assert (cp->host_status == HS_COMPLETE); /* * Get user command. */ cmd = cp->cmd; /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ resid = 0; if (cp->phys.head.lastp != cp->goalp) resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature in * sym53c8xx.h. Residual support is enabled by default. */ if (!SYM_SETUP_RESIDUAL_SUPPORT) resid = 0; #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Build result in CAM ccb. */ sym_set_cam_result_ok(cp, cmd, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * If max number of started ccbs had been reduced, * increase it if 200 good status received. */ if (lp && lp->started_max < lp->started_limit) { ++lp->num_sgood; if (lp->num_sgood >= 200) { lp->num_sgood = 0; ++lp->started_max; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } } } #endif /* * Free our CCB. */ sym_free_ccb (np, cp); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Requeue a couple of awaiting scsi commands. */ if (!sym_que_empty(&lp->waiting_ccbq)) sym_start_next_ccbs(np, lp, 2); #endif /* * Complete the command. */ sym_xpt_done(np, cmd); } /* * Soft-attach the controller. */ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram) { struct sym_hcb *np = sym_get_hcb(shost); int i; /* * Get some info about the firmware. */ np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->scriptz_sz = fw->z_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset(np); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ sym_prepare_setting(shost, np, nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000 && !(np->features & FE_66MHZ)) printf("%s: PCI BUS clock seems too high: %u KHz.\n", sym_name(np), i); /* * Allocate the start queue. */ np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0"); if (!np->scripta0 || !np->scriptb0 || !np->scriptz0) goto attach_failed; /* * Allocate the array of lists of CCBs hashed by DSA. */ np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL); if (!np->ccbh) goto attach_failed; /* * Initialyze the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); /* * Initialization for optional handling * of device queueing. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_que_init(&np->dummy_ccbq); #endif /* * Allocate some CCB. We need at least ONE. */ if (!sym_alloc_ccb(np)) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptz_ba = vtobus(np->scriptz0); if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->scriptb_ba = np->scripta_ba + 4096; #if 0 /* May get useful for 64 BIT PCI addressing */ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } } /* * Copy scripts to controller instance. */ memcpy(np->scripta0, fw->a_base, np->scripta_sz); memcpy(np->scriptb0, fw->b_base, np->scriptb_sz); memcpy(np->scriptz0, fw->z_base, np->scriptz_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); goto attach_failed; } /* * Sigh! we are done. */ return 0; attach_failed: return -ENXIO; } /* * Free everything that has been allocated for this device. */ void sym_hcb_free(struct sym_hcb *np) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp; int target; if (np->scriptz0) sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0"); if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); if (np->actccbs) { while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } } kfree(np->ccbh); if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; if (tp->luntbl) sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); #if SYM_CONF_MAX_LUN > 1 kfree(tp->lunmp); #endif } if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); }
gpl-2.0
Lloir/elitekernel_oxp_kk
drivers/staging/vt6656/michael.c
8620
4387
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: michael.cpp * * Purpose: The implementation of LIST data structure. * * Author: Kyle Hsu * * Date: Sep 4, 2002 * * Functions: * s_dwGetUINT32 - Convert from BYTE[] to DWORD in a portable way * s_vPutUINT32 - Convert from DWORD to BYTE[] in a portable way * s_vClear - Reset the state to the empty message. * s_vSetKey - Set the key. * MIC_vInit - Set the key. * s_vAppendByte - Append the byte to our word-sized buffer. * MIC_vAppend - call s_vAppendByte. * MIC_vGetMIC - Append the minimum padding and call s_vAppendByte. * * Revision History: * */ #include "tmacro.h" #include "michael.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /* * static DWORD s_dwGetUINT32(BYTE * p); Get DWORD from * 4 bytes LSByte first * static void s_vPutUINT32(BYTE* p, DWORD val); Put DWORD into * 4 bytes LSByte first */ static void s_vClear(void); /* Clear the internal message, * resets the object to the * state just after construction. */ static void s_vSetKey(DWORD dwK0, DWORD dwK1); static void s_vAppendByte(BYTE b); /* Add a single byte to the internal * message */ /*--------------------- Export Variables --------------------------*/ static DWORD L, R; /* Current state */ static DWORD K0, K1; /* Key */ static DWORD M; /* Message accumulator (single word) */ static unsigned int nBytesInM; /* # bytes in M */ /*--------------------- Export Functions --------------------------*/ /* static DWORD s_dwGetUINT32 (BYTE * p) // Convert from BYTE[] to DWORD in a portable way { DWORD res = 0; unsigned int i; for (i = 0; i < 4; i++) res |= (*p++) << (8*i); return res; } static void s_vPutUINT32(BYTE *p, DWORD val) // Convert from DWORD to BYTE[] in a portable way { unsigned int i; for (i = 0; i < 4; i++) { *p++ = (BYTE) (val & 0xff); val >>= 8; } } */ static void s_vClear(void) { /* Reset the state to the empty message. */ L = K0; R = K1; nBytesInM = 0; M = 0; } static void s_vSetKey(DWORD dwK0, DWORD dwK1) { /* Set the key */ K0 = dwK0; K1 = dwK1; /* and reset the message */ s_vClear(); } static void s_vAppendByte(BYTE b) { /* Append the byte to our word-sized buffer */ M |= b << (8*nBytesInM); nBytesInM++; /* Process the word if it is full. */ if (nBytesInM >= 4) { L ^= M; R ^= ROL32(L, 17); L += R; R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); L += R; R ^= ROL32(L, 3); L += R; R ^= ROR32(L, 2); L += R; /* Clear the buffer */ M = 0; nBytesInM = 0; } } void MIC_vInit(DWORD dwK0, DWORD dwK1) { /* Set the key */ s_vSetKey(dwK0, dwK1); } void MIC_vUnInit(void) { /* Wipe the key material */ K0 = 0; K1 = 0; /* And the other fields as well. */ /* Note that this sets (L,R) to (K0,K1) which is just fine. */ s_vClear(); } void MIC_vAppend(PBYTE src, unsigned int nBytes) { /* This is simple */ while (nBytes > 0) { s_vAppendByte(*src++); nBytes--; } } void MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR) { /* Append the minimum padding */ s_vAppendByte(0x5a); s_vAppendByte(0); s_vAppendByte(0); s_vAppendByte(0); s_vAppendByte(0); /* and then zeroes until the length is a multiple of 4 */ while (nBytesInM != 0) s_vAppendByte(0); /* The s_vAppendByte function has already computed the result. */ *pdwL = L; *pdwR = R; /* Reset to the empty message. */ s_vClear(); }
gpl-2.0
MassStash/htc_m8_kernel_sense_4.4.4
drivers/media/video/cx23885/netup-init.c
9900
2828
/* * netup-init.c * * NetUP Dual DVB-S2 CI driver * * Copyright (C) 2009 NetUP Inc. * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * Copyright (C) 2009 Abylay Ospan <aospan@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val) { int ret; u8 buf[3]; struct i2c_msg msg = { .addr = 0x88 >> 1, .flags = 0, .buf = buf, .len = 3 }; buf[0] = reg >> 8; buf[1] = reg & 0xff; buf[2] = val; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error!\n", __func__); } static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val) { int ret; u8 buf[6]; struct i2c_msg msg = { .addr = 0x88 >> 1, .flags = 0, .buf = buf, .len = 6 }; buf[0] = reg >> 8; buf[1] = reg & 0xff; buf[2] = val & 0xff; buf[3] = (val >> 8) & 0xff; buf[4] = (val >> 16) & 0xff; buf[5] = val >> 24; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error!\n", __func__); } static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg) { int ret; u8 buf[2]; struct i2c_msg msg = { .addr = 0x88 >> 1, .flags = 0, .buf = buf, .len = 2 }; buf[0] = reg >> 8; buf[1] = reg & 0xff; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error!\n", __func__); msg.flags = I2C_M_RD; msg.len = 1; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c read error!\n", __func__); return buf[0]; } static void i2c_av_and_or(struct i2c_adapter *i2c, u16 reg, unsigned and_mask, u8 or_value) { i2c_av_write(i2c, reg, (i2c_av_read(i2c, reg) & and_mask) | or_value); } /* set 27MHz on AUX_CLK */ void netup_initialize(struct cx23885_dev *dev) { struct cx23885_i2c *i2c_bus = &dev->i2c_bus[2]; struct i2c_adapter *i2c = &i2c_bus->i2c_adap; /* Stop microcontroller */ i2c_av_and_or(i2c, 0x803, ~0x10, 0x00); /* Aux PLL frac for 27 MHz */ i2c_av_write4(i2c, 0x114, 0xea0eb3); /* Aux PLL int for 27 MHz */ i2c_av_write4(i2c, 0x110, 0x090319); /* start microcontroller */ i2c_av_and_or(i2c, 0x803, ~0x10, 0x10); }
gpl-2.0
captivo/linux-captivo
fs/nfs/objlayout/pnfs_osd_xdr_cli.c
10924
11268
/* * Object-Based pNFS Layout XDR layer * * Copyright (C) 2007 Panasas Inc. [year of first publication] * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> * Boaz Harrosh <bharrosh@panasas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * See the file COPYING included with this distribution for more details. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Panasas company nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/pnfs_osd_xdr.h> #define NFSDBG_FACILITY NFSDBG_PNFS_LD /* * The following implementation is based on RFC5664 */ /* * struct pnfs_osd_objid { * struct nfs4_deviceid oid_device_id; * u64 oid_partition_id; * u64 oid_object_id; * }; // xdr size 32 bytes */ static __be32 * _osd_xdr_decode_objid(__be32 *p, struct pnfs_osd_objid *objid) { p = xdr_decode_opaque_fixed(p, objid->oid_device_id.data, sizeof(objid->oid_device_id.data)); p = xdr_decode_hyper(p, &objid->oid_partition_id); p = xdr_decode_hyper(p, &objid->oid_object_id); return p; } /* * struct pnfs_osd_opaque_cred { * u32 cred_len; * void *cred; * }; // xdr size [variable] * The return pointers are from the xdr buffer */ static int _osd_xdr_decode_opaque_cred(struct pnfs_osd_opaque_cred *opaque_cred, struct xdr_stream *xdr) { __be32 *p = xdr_inline_decode(xdr, 1); if (!p) return -EINVAL; opaque_cred->cred_len = be32_to_cpu(*p++); p = xdr_inline_decode(xdr, opaque_cred->cred_len); if (!p) return -EINVAL; opaque_cred->cred = p; return 0; } /* * struct pnfs_osd_object_cred { * struct pnfs_osd_objid oc_object_id; * u32 oc_osd_version; * u32 oc_cap_key_sec; * struct pnfs_osd_opaque_cred oc_cap_key * struct pnfs_osd_opaque_cred oc_cap; * }; // xdr size 32 + 4 + 4 + [variable] + [variable] */ static int _osd_xdr_decode_object_cred(struct pnfs_osd_object_cred *comp, struct xdr_stream *xdr) { __be32 *p = xdr_inline_decode(xdr, 32 + 4 + 4); int ret; if (!p) return -EIO; p = _osd_xdr_decode_objid(p, &comp->oc_object_id); comp->oc_osd_version = be32_to_cpup(p++); comp->oc_cap_key_sec = be32_to_cpup(p); ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap_key, xdr); if (unlikely(ret)) return ret; ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap, xdr); return ret; } /* * struct pnfs_osd_data_map { * u32 odm_num_comps; * u64 odm_stripe_unit; * u32 odm_group_width; * u32 odm_group_depth; * u32 odm_mirror_cnt; * u32 odm_raid_algorithm; * }; // xdr size 4 + 8 + 4 + 4 + 4 + 4 */ static inline int _osd_data_map_xdr_sz(void) { return 4 + 8 + 4 + 4 + 4 + 4; } static __be32 * _osd_xdr_decode_data_map(__be32 *p, struct pnfs_osd_data_map *data_map) { data_map->odm_num_comps = be32_to_cpup(p++); p = xdr_decode_hyper(p, &data_map->odm_stripe_unit); data_map->odm_group_width = be32_to_cpup(p++); data_map->odm_group_depth = be32_to_cpup(p++); data_map->odm_mirror_cnt = be32_to_cpup(p++); data_map->odm_raid_algorithm = be32_to_cpup(p++); dprintk("%s: odm_num_comps=%u odm_stripe_unit=%llu odm_group_width=%u " "odm_group_depth=%u odm_mirror_cnt=%u odm_raid_algorithm=%u\n", __func__, data_map->odm_num_comps, (unsigned long long)data_map->odm_stripe_unit, data_map->odm_group_width, data_map->odm_group_depth, data_map->odm_mirror_cnt, data_map->odm_raid_algorithm); return p; } int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr) { __be32 *p; memset(iter, 0, sizeof(*iter)); p = xdr_inline_decode(xdr, _osd_data_map_xdr_sz() + 4 + 4); if (unlikely(!p)) return -EINVAL; p = _osd_xdr_decode_data_map(p, &layout->olo_map); layout->olo_comps_index = be32_to_cpup(p++); layout->olo_num_comps = be32_to_cpup(p++); dprintk("%s: olo_comps_index=%d olo_num_comps=%d\n", __func__, layout->olo_comps_index, layout->olo_num_comps); iter->total_comps = layout->olo_num_comps; return 0; } bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp, struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, int *err) { BUG_ON(iter->decoded_comps > iter->total_comps); if (iter->decoded_comps == iter->total_comps) return false; *err = _osd_xdr_decode_object_cred(comp, xdr); if (unlikely(*err)) { dprintk("%s: _osd_xdr_decode_object_cred=>%d decoded_comps=%d " "total_comps=%d\n", __func__, *err, iter->decoded_comps, iter->total_comps); return false; /* stop the loop */ } dprintk("%s: dev(%llx:%llx) par=0x%llx obj=0x%llx " "key_len=%u cap_len=%u\n", __func__, _DEVID_LO(&comp->oc_object_id.oid_device_id), _DEVID_HI(&comp->oc_object_id.oid_device_id), comp->oc_object_id.oid_partition_id, comp->oc_object_id.oid_object_id, comp->oc_cap_key.cred_len, comp->oc_cap.cred_len); iter->decoded_comps++; return true; } /* * Get Device Information Decoding * * Note: since Device Information is currently done synchronously, all * variable strings fields are left inside the rpc buffer and are only * pointed to by the pnfs_osd_deviceaddr members. So the read buffer * should not be freed while the returned information is in use. */ /* *struct nfs4_string { * unsigned int len; * char *data; *}; // size [variable] * NOTE: Returned string points to inside the XDR buffer */ static __be32 * __read_u8_opaque(__be32 *p, struct nfs4_string *str) { str->len = be32_to_cpup(p++); str->data = (char *)p; p += XDR_QUADLEN(str->len); return p; } /* * struct pnfs_osd_targetid { * u32 oti_type; * struct nfs4_string oti_scsi_device_id; * };// size 4 + [variable] */ static __be32 * __read_targetid(__be32 *p, struct pnfs_osd_targetid* targetid) { u32 oti_type; oti_type = be32_to_cpup(p++); targetid->oti_type = oti_type; switch (oti_type) { case OBJ_TARGET_SCSI_NAME: case OBJ_TARGET_SCSI_DEVICE_ID: p = __read_u8_opaque(p, &targetid->oti_scsi_device_id); } return p; } /* * struct pnfs_osd_net_addr { * struct nfs4_string r_netid; * struct nfs4_string r_addr; * }; */ static __be32 * __read_net_addr(__be32 *p, struct pnfs_osd_net_addr* netaddr) { p = __read_u8_opaque(p, &netaddr->r_netid); p = __read_u8_opaque(p, &netaddr->r_addr); return p; } /* * struct pnfs_osd_targetaddr { * u32 ota_available; * struct pnfs_osd_net_addr ota_netaddr; * }; */ static __be32 * __read_targetaddr(__be32 *p, struct pnfs_osd_targetaddr *targetaddr) { u32 ota_available; ota_available = be32_to_cpup(p++); targetaddr->ota_available = ota_available; if (ota_available) p = __read_net_addr(p, &targetaddr->ota_netaddr); return p; } /* * struct pnfs_osd_deviceaddr { * struct pnfs_osd_targetid oda_targetid; * struct pnfs_osd_targetaddr oda_targetaddr; * u8 oda_lun[8]; * struct nfs4_string oda_systemid; * struct pnfs_osd_object_cred oda_root_obj_cred; * struct nfs4_string oda_osdname; * }; */ /* We need this version for the pnfs_osd_xdr_decode_deviceaddr which does * not have an xdr_stream */ static __be32 * __read_opaque_cred(__be32 *p, struct pnfs_osd_opaque_cred *opaque_cred) { opaque_cred->cred_len = be32_to_cpu(*p++); opaque_cred->cred = p; return p + XDR_QUADLEN(opaque_cred->cred_len); } static __be32 * __read_object_cred(__be32 *p, struct pnfs_osd_object_cred *comp) { p = _osd_xdr_decode_objid(p, &comp->oc_object_id); comp->oc_osd_version = be32_to_cpup(p++); comp->oc_cap_key_sec = be32_to_cpup(p++); p = __read_opaque_cred(p, &comp->oc_cap_key); p = __read_opaque_cred(p, &comp->oc_cap); return p; } void pnfs_osd_xdr_decode_deviceaddr( struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p) { p = __read_targetid(p, &deviceaddr->oda_targetid); p = __read_targetaddr(p, &deviceaddr->oda_targetaddr); p = xdr_decode_opaque_fixed(p, deviceaddr->oda_lun, sizeof(deviceaddr->oda_lun)); p = __read_u8_opaque(p, &deviceaddr->oda_systemid); p = __read_object_cred(p, &deviceaddr->oda_root_obj_cred); p = __read_u8_opaque(p, &deviceaddr->oda_osdname); /* libosd likes this terminated in dbg. It's last, so no problems */ deviceaddr->oda_osdname.data[deviceaddr->oda_osdname.len] = 0; } /* * struct pnfs_osd_layoutupdate { * u32 dsu_valid; * s64 dsu_delta; * u32 olu_ioerr_flag; * }; xdr size 4 + 8 + 4 */ int pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr, struct pnfs_osd_layoutupdate *lou) { __be32 *p = xdr_reserve_space(xdr, 4 + 8 + 4); if (!p) return -E2BIG; *p++ = cpu_to_be32(lou->dsu_valid); if (lou->dsu_valid) p = xdr_encode_hyper(p, lou->dsu_delta); *p++ = cpu_to_be32(lou->olu_ioerr_flag); return 0; } /* * struct pnfs_osd_objid { * struct nfs4_deviceid oid_device_id; * u64 oid_partition_id; * u64 oid_object_id; * }; // xdr size 32 bytes */ static inline __be32 * pnfs_osd_xdr_encode_objid(__be32 *p, struct pnfs_osd_objid *object_id) { p = xdr_encode_opaque_fixed(p, &object_id->oid_device_id.data, sizeof(object_id->oid_device_id.data)); p = xdr_encode_hyper(p, object_id->oid_partition_id); p = xdr_encode_hyper(p, object_id->oid_object_id); return p; } /* * struct pnfs_osd_ioerr { * struct pnfs_osd_objid oer_component; * u64 oer_comp_offset; * u64 oer_comp_length; * u32 oer_iswrite; * u32 oer_errno; * }; // xdr size 32 + 24 bytes */ void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr) { p = pnfs_osd_xdr_encode_objid(p, &ioerr->oer_component); p = xdr_encode_hyper(p, ioerr->oer_comp_offset); p = xdr_encode_hyper(p, ioerr->oer_comp_length); *p++ = cpu_to_be32(ioerr->oer_iswrite); *p = cpu_to_be32(ioerr->oer_errno); } __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr) { __be32 *p; p = xdr_reserve_space(xdr, 32 + 24); if (unlikely(!p)) dprintk("%s: out of xdr space\n", __func__); return p; }
gpl-2.0
mayli/wrapfs-latest
fs/nfs/objlayout/pnfs_osd_xdr_cli.c
10924
11268
/* * Object-Based pNFS Layout XDR layer * * Copyright (C) 2007 Panasas Inc. [year of first publication] * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> * Boaz Harrosh <bharrosh@panasas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * See the file COPYING included with this distribution for more details. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Panasas company nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/pnfs_osd_xdr.h> #define NFSDBG_FACILITY NFSDBG_PNFS_LD /* * The following implementation is based on RFC5664 */ /* * struct pnfs_osd_objid { * struct nfs4_deviceid oid_device_id; * u64 oid_partition_id; * u64 oid_object_id; * }; // xdr size 32 bytes */ static __be32 * _osd_xdr_decode_objid(__be32 *p, struct pnfs_osd_objid *objid) { p = xdr_decode_opaque_fixed(p, objid->oid_device_id.data, sizeof(objid->oid_device_id.data)); p = xdr_decode_hyper(p, &objid->oid_partition_id); p = xdr_decode_hyper(p, &objid->oid_object_id); return p; } /* * struct pnfs_osd_opaque_cred { * u32 cred_len; * void *cred; * }; // xdr size [variable] * The return pointers are from the xdr buffer */ static int _osd_xdr_decode_opaque_cred(struct pnfs_osd_opaque_cred *opaque_cred, struct xdr_stream *xdr) { __be32 *p = xdr_inline_decode(xdr, 1); if (!p) return -EINVAL; opaque_cred->cred_len = be32_to_cpu(*p++); p = xdr_inline_decode(xdr, opaque_cred->cred_len); if (!p) return -EINVAL; opaque_cred->cred = p; return 0; } /* * struct pnfs_osd_object_cred { * struct pnfs_osd_objid oc_object_id; * u32 oc_osd_version; * u32 oc_cap_key_sec; * struct pnfs_osd_opaque_cred oc_cap_key * struct pnfs_osd_opaque_cred oc_cap; * }; // xdr size 32 + 4 + 4 + [variable] + [variable] */ static int _osd_xdr_decode_object_cred(struct pnfs_osd_object_cred *comp, struct xdr_stream *xdr) { __be32 *p = xdr_inline_decode(xdr, 32 + 4 + 4); int ret; if (!p) return -EIO; p = _osd_xdr_decode_objid(p, &comp->oc_object_id); comp->oc_osd_version = be32_to_cpup(p++); comp->oc_cap_key_sec = be32_to_cpup(p); ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap_key, xdr); if (unlikely(ret)) return ret; ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap, xdr); return ret; } /* * struct pnfs_osd_data_map { * u32 odm_num_comps; * u64 odm_stripe_unit; * u32 odm_group_width; * u32 odm_group_depth; * u32 odm_mirror_cnt; * u32 odm_raid_algorithm; * }; // xdr size 4 + 8 + 4 + 4 + 4 + 4 */ static inline int _osd_data_map_xdr_sz(void) { return 4 + 8 + 4 + 4 + 4 + 4; } static __be32 * _osd_xdr_decode_data_map(__be32 *p, struct pnfs_osd_data_map *data_map) { data_map->odm_num_comps = be32_to_cpup(p++); p = xdr_decode_hyper(p, &data_map->odm_stripe_unit); data_map->odm_group_width = be32_to_cpup(p++); data_map->odm_group_depth = be32_to_cpup(p++); data_map->odm_mirror_cnt = be32_to_cpup(p++); data_map->odm_raid_algorithm = be32_to_cpup(p++); dprintk("%s: odm_num_comps=%u odm_stripe_unit=%llu odm_group_width=%u " "odm_group_depth=%u odm_mirror_cnt=%u odm_raid_algorithm=%u\n", __func__, data_map->odm_num_comps, (unsigned long long)data_map->odm_stripe_unit, data_map->odm_group_width, data_map->odm_group_depth, data_map->odm_mirror_cnt, data_map->odm_raid_algorithm); return p; } int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr) { __be32 *p; memset(iter, 0, sizeof(*iter)); p = xdr_inline_decode(xdr, _osd_data_map_xdr_sz() + 4 + 4); if (unlikely(!p)) return -EINVAL; p = _osd_xdr_decode_data_map(p, &layout->olo_map); layout->olo_comps_index = be32_to_cpup(p++); layout->olo_num_comps = be32_to_cpup(p++); dprintk("%s: olo_comps_index=%d olo_num_comps=%d\n", __func__, layout->olo_comps_index, layout->olo_num_comps); iter->total_comps = layout->olo_num_comps; return 0; } bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp, struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, int *err) { BUG_ON(iter->decoded_comps > iter->total_comps); if (iter->decoded_comps == iter->total_comps) return false; *err = _osd_xdr_decode_object_cred(comp, xdr); if (unlikely(*err)) { dprintk("%s: _osd_xdr_decode_object_cred=>%d decoded_comps=%d " "total_comps=%d\n", __func__, *err, iter->decoded_comps, iter->total_comps); return false; /* stop the loop */ } dprintk("%s: dev(%llx:%llx) par=0x%llx obj=0x%llx " "key_len=%u cap_len=%u\n", __func__, _DEVID_LO(&comp->oc_object_id.oid_device_id), _DEVID_HI(&comp->oc_object_id.oid_device_id), comp->oc_object_id.oid_partition_id, comp->oc_object_id.oid_object_id, comp->oc_cap_key.cred_len, comp->oc_cap.cred_len); iter->decoded_comps++; return true; } /* * Get Device Information Decoding * * Note: since Device Information is currently done synchronously, all * variable strings fields are left inside the rpc buffer and are only * pointed to by the pnfs_osd_deviceaddr members. So the read buffer * should not be freed while the returned information is in use. */ /* *struct nfs4_string { * unsigned int len; * char *data; *}; // size [variable] * NOTE: Returned string points to inside the XDR buffer */ static __be32 * __read_u8_opaque(__be32 *p, struct nfs4_string *str) { str->len = be32_to_cpup(p++); str->data = (char *)p; p += XDR_QUADLEN(str->len); return p; } /* * struct pnfs_osd_targetid { * u32 oti_type; * struct nfs4_string oti_scsi_device_id; * };// size 4 + [variable] */ static __be32 * __read_targetid(__be32 *p, struct pnfs_osd_targetid* targetid) { u32 oti_type; oti_type = be32_to_cpup(p++); targetid->oti_type = oti_type; switch (oti_type) { case OBJ_TARGET_SCSI_NAME: case OBJ_TARGET_SCSI_DEVICE_ID: p = __read_u8_opaque(p, &targetid->oti_scsi_device_id); } return p; } /* * struct pnfs_osd_net_addr { * struct nfs4_string r_netid; * struct nfs4_string r_addr; * }; */ static __be32 * __read_net_addr(__be32 *p, struct pnfs_osd_net_addr* netaddr) { p = __read_u8_opaque(p, &netaddr->r_netid); p = __read_u8_opaque(p, &netaddr->r_addr); return p; } /* * struct pnfs_osd_targetaddr { * u32 ota_available; * struct pnfs_osd_net_addr ota_netaddr; * }; */ static __be32 * __read_targetaddr(__be32 *p, struct pnfs_osd_targetaddr *targetaddr) { u32 ota_available; ota_available = be32_to_cpup(p++); targetaddr->ota_available = ota_available; if (ota_available) p = __read_net_addr(p, &targetaddr->ota_netaddr); return p; } /* * struct pnfs_osd_deviceaddr { * struct pnfs_osd_targetid oda_targetid; * struct pnfs_osd_targetaddr oda_targetaddr; * u8 oda_lun[8]; * struct nfs4_string oda_systemid; * struct pnfs_osd_object_cred oda_root_obj_cred; * struct nfs4_string oda_osdname; * }; */ /* We need this version for the pnfs_osd_xdr_decode_deviceaddr which does * not have an xdr_stream */ static __be32 * __read_opaque_cred(__be32 *p, struct pnfs_osd_opaque_cred *opaque_cred) { opaque_cred->cred_len = be32_to_cpu(*p++); opaque_cred->cred = p; return p + XDR_QUADLEN(opaque_cred->cred_len); } static __be32 * __read_object_cred(__be32 *p, struct pnfs_osd_object_cred *comp) { p = _osd_xdr_decode_objid(p, &comp->oc_object_id); comp->oc_osd_version = be32_to_cpup(p++); comp->oc_cap_key_sec = be32_to_cpup(p++); p = __read_opaque_cred(p, &comp->oc_cap_key); p = __read_opaque_cred(p, &comp->oc_cap); return p; } void pnfs_osd_xdr_decode_deviceaddr( struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p) { p = __read_targetid(p, &deviceaddr->oda_targetid); p = __read_targetaddr(p, &deviceaddr->oda_targetaddr); p = xdr_decode_opaque_fixed(p, deviceaddr->oda_lun, sizeof(deviceaddr->oda_lun)); p = __read_u8_opaque(p, &deviceaddr->oda_systemid); p = __read_object_cred(p, &deviceaddr->oda_root_obj_cred); p = __read_u8_opaque(p, &deviceaddr->oda_osdname); /* libosd likes this terminated in dbg. It's last, so no problems */ deviceaddr->oda_osdname.data[deviceaddr->oda_osdname.len] = 0; } /* * struct pnfs_osd_layoutupdate { * u32 dsu_valid; * s64 dsu_delta; * u32 olu_ioerr_flag; * }; xdr size 4 + 8 + 4 */ int pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr, struct pnfs_osd_layoutupdate *lou) { __be32 *p = xdr_reserve_space(xdr, 4 + 8 + 4); if (!p) return -E2BIG; *p++ = cpu_to_be32(lou->dsu_valid); if (lou->dsu_valid) p = xdr_encode_hyper(p, lou->dsu_delta); *p++ = cpu_to_be32(lou->olu_ioerr_flag); return 0; } /* * struct pnfs_osd_objid { * struct nfs4_deviceid oid_device_id; * u64 oid_partition_id; * u64 oid_object_id; * }; // xdr size 32 bytes */ static inline __be32 * pnfs_osd_xdr_encode_objid(__be32 *p, struct pnfs_osd_objid *object_id) { p = xdr_encode_opaque_fixed(p, &object_id->oid_device_id.data, sizeof(object_id->oid_device_id.data)); p = xdr_encode_hyper(p, object_id->oid_partition_id); p = xdr_encode_hyper(p, object_id->oid_object_id); return p; } /* * struct pnfs_osd_ioerr { * struct pnfs_osd_objid oer_component; * u64 oer_comp_offset; * u64 oer_comp_length; * u32 oer_iswrite; * u32 oer_errno; * }; // xdr size 32 + 24 bytes */ void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr) { p = pnfs_osd_xdr_encode_objid(p, &ioerr->oer_component); p = xdr_encode_hyper(p, ioerr->oer_comp_offset); p = xdr_encode_hyper(p, ioerr->oer_comp_length); *p++ = cpu_to_be32(ioerr->oer_iswrite); *p = cpu_to_be32(ioerr->oer_errno); } __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr) { __be32 *p; p = xdr_reserve_space(xdr, 32 + 24); if (unlikely(!p)) dprintk("%s: out of xdr space\n", __func__); return p; }
gpl-2.0
JamesAng/lx-sk
drivers/media/video/pvrusb2/pvrusb2-ctrl.c
11692
14185
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "pvrusb2-ctrl.h" #include "pvrusb2-hdw-internal.h" #include <linux/errno.h> #include <linux/string.h> #include <linux/mutex.h> static int pvr2_ctrl_range_check(struct pvr2_ctrl *cptr,int val) { if (cptr->info->check_value) { if (!cptr->info->check_value(cptr,val)) return -ERANGE; } else if (cptr->info->type == pvr2_ctl_enum) { if (val < 0) return -ERANGE; if (val >= cptr->info->def.type_enum.count) return -ERANGE; } else { int lim; lim = cptr->info->def.type_int.min_value; if (cptr->info->get_min_value) { cptr->info->get_min_value(cptr,&lim); } if (val < lim) return -ERANGE; lim = cptr->info->def.type_int.max_value; if (cptr->info->get_max_value) { cptr->info->get_max_value(cptr,&lim); } if (val > lim) return -ERANGE; } return 0; } /* Set the given control. */ int pvr2_ctrl_set_value(struct pvr2_ctrl *cptr,int val) { return pvr2_ctrl_set_mask_value(cptr,~0,val); } /* Set/clear specific bits of the given control. */ int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *cptr,int mask,int val) { int ret = 0; if (!cptr) return -EINVAL; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->set_value) { if (cptr->info->type == pvr2_ctl_bitmask) { mask &= cptr->info->def.type_bitmask.valid_bits; } else if ((cptr->info->type == pvr2_ctl_int)|| (cptr->info->type == pvr2_ctl_enum)) { ret = pvr2_ctrl_range_check(cptr,val); if (ret < 0) break; } else if (cptr->info->type != pvr2_ctl_bool) { break; } ret = cptr->info->set_value(cptr,mask,val); } else { ret = -EPERM; } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Get the current value of the given control. */ int pvr2_ctrl_get_value(struct pvr2_ctrl *cptr,int *valptr) { int ret = 0; if (!cptr) return -EINVAL; LOCK_TAKE(cptr->hdw->big_lock); do { ret = cptr->info->get_value(cptr,valptr); } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Retrieve control's type */ enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *cptr) { if (!cptr) return pvr2_ctl_int; return cptr->info->type; } /* Retrieve control's maximum value (int type) */ int pvr2_ctrl_get_max(struct pvr2_ctrl *cptr) { int ret = 0; if (!cptr) return 0; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->get_max_value) { cptr->info->get_max_value(cptr,&ret); } else if (cptr->info->type == pvr2_ctl_int) { ret = cptr->info->def.type_int.max_value; } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Retrieve control's minimum value (int type) */ int pvr2_ctrl_get_min(struct pvr2_ctrl *cptr) { int ret = 0; if (!cptr) return 0; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->get_min_value) { cptr->info->get_min_value(cptr,&ret); } else if (cptr->info->type == pvr2_ctl_int) { ret = cptr->info->def.type_int.min_value; } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Retrieve control's default value (any type) */ int pvr2_ctrl_get_def(struct pvr2_ctrl *cptr, int *valptr) { int ret = 0; if (!cptr) return -EINVAL; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->get_def_value) { ret = cptr->info->get_def_value(cptr, valptr); } else { *valptr = cptr->info->default_value; } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Retrieve control's enumeration count (enum only) */ int pvr2_ctrl_get_cnt(struct pvr2_ctrl *cptr) { int ret = 0; if (!cptr) return 0; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->type == pvr2_ctl_enum) { ret = cptr->info->def.type_enum.count; } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Retrieve control's valid mask bits (bit mask only) */ int pvr2_ctrl_get_mask(struct pvr2_ctrl *cptr) { int ret = 0; if (!cptr) return 0; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->type == pvr2_ctl_bitmask) { ret = cptr->info->def.type_bitmask.valid_bits; } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Retrieve the control's name */ const char *pvr2_ctrl_get_name(struct pvr2_ctrl *cptr) { if (!cptr) return NULL; return cptr->info->name; } /* Retrieve the control's desc */ const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *cptr) { if (!cptr) return NULL; return cptr->info->desc; } /* Retrieve a control enumeration or bit mask value */ int pvr2_ctrl_get_valname(struct pvr2_ctrl *cptr,int val, char *bptr,unsigned int bmax, unsigned int *blen) { int ret = -EINVAL; if (!cptr) return 0; *blen = 0; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->type == pvr2_ctl_enum) { const char * const *names; names = cptr->info->def.type_enum.value_names; if (pvr2_ctrl_range_check(cptr,val) == 0) { if (names[val]) { *blen = scnprintf( bptr,bmax,"%s", names[val]); } else { *blen = 0; } ret = 0; } } else if (cptr->info->type == pvr2_ctl_bitmask) { const char **names; unsigned int idx; int msk; names = cptr->info->def.type_bitmask.bit_names; val &= cptr->info->def.type_bitmask.valid_bits; for (idx = 0, msk = 1; val; idx++, msk <<= 1) { if (val & msk) { *blen = scnprintf(bptr,bmax,"%s", names[idx]); ret = 0; break; } } } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Return V4L ID for this control or zero if none */ int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *cptr) { if (!cptr) return 0; return cptr->info->v4l_id; } unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *cptr) { unsigned int flags = 0; if (cptr->info->get_v4lflags) { flags = cptr->info->get_v4lflags(cptr); } if (cptr->info->set_value) { flags &= ~V4L2_CTRL_FLAG_READ_ONLY; } else { flags |= V4L2_CTRL_FLAG_READ_ONLY; } return flags; } /* Return true if control is writable */ int pvr2_ctrl_is_writable(struct pvr2_ctrl *cptr) { if (!cptr) return 0; return cptr->info->set_value != NULL; } /* Return true if control has custom symbolic representation */ int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *cptr) { if (!cptr) return 0; if (!cptr->info->val_to_sym) return 0; if (!cptr->info->sym_to_val) return 0; return !0; } /* Convert a given mask/val to a custom symbolic value */ int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *cptr, int mask,int val, char *buf,unsigned int maxlen, unsigned int *len) { if (!cptr) return -EINVAL; if (!cptr->info->val_to_sym) return -EINVAL; return cptr->info->val_to_sym(cptr,mask,val,buf,maxlen,len); } /* Convert a symbolic value to a mask/value pair */ int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *cptr, const char *buf,unsigned int len, int *maskptr,int *valptr) { if (!cptr) return -EINVAL; if (!cptr->info->sym_to_val) return -EINVAL; return cptr->info->sym_to_val(cptr,buf,len,maskptr,valptr); } static unsigned int gen_bitmask_string(int msk,int val,int msk_only, const char **names, char *ptr,unsigned int len) { unsigned int idx; long sm,um; int spcFl; unsigned int uc,cnt; const char *idStr; spcFl = 0; uc = 0; um = 0; for (idx = 0, sm = 1; msk; idx++, sm <<= 1) { if (sm & msk) { msk &= ~sm; idStr = names[idx]; if (idStr) { cnt = scnprintf(ptr,len,"%s%s%s", (spcFl ? " " : ""), (msk_only ? "" : ((val & sm) ? "+" : "-")), idStr); ptr += cnt; len -= cnt; uc += cnt; spcFl = !0; } else { um |= sm; } } } if (um) { if (msk_only) { cnt = scnprintf(ptr,len,"%s0x%lx", (spcFl ? " " : ""), um); ptr += cnt; len -= cnt; uc += cnt; spcFl = !0; } else if (um & val) { cnt = scnprintf(ptr,len,"%s+0x%lx", (spcFl ? " " : ""), um & val); ptr += cnt; len -= cnt; uc += cnt; spcFl = !0; } else if (um & ~val) { cnt = scnprintf(ptr,len,"%s+0x%lx", (spcFl ? " " : ""), um & ~val); ptr += cnt; len -= cnt; uc += cnt; spcFl = !0; } } return uc; } static const char *boolNames[] = { "false", "true", "no", "yes", }; static int parse_token(const char *ptr,unsigned int len, int *valptr, const char * const *names, unsigned int namecnt) { char buf[33]; unsigned int slen; unsigned int idx; int negfl; char *p2; *valptr = 0; if (!names) namecnt = 0; for (idx = 0; idx < namecnt; idx++) { if (!names[idx]) continue; slen = strlen(names[idx]); if (slen != len) continue; if (memcmp(names[idx],ptr,slen)) continue; *valptr = idx; return 0; } negfl = 0; if ((*ptr == '-') || (*ptr == '+')) { negfl = (*ptr == '-'); ptr++; len--; } if (len >= sizeof(buf)) return -EINVAL; memcpy(buf,ptr,len); buf[len] = 0; *valptr = simple_strtol(buf,&p2,0); if (negfl) *valptr = -(*valptr); if (*p2) return -EINVAL; return 1; } static int parse_mtoken(const char *ptr,unsigned int len, int *valptr, const char **names,int valid_bits) { char buf[33]; unsigned int slen; unsigned int idx; char *p2; int msk; *valptr = 0; for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) { if (!(msk & valid_bits)) continue; valid_bits &= ~msk; if (!names[idx]) continue; slen = strlen(names[idx]); if (slen != len) continue; if (memcmp(names[idx],ptr,slen)) continue; *valptr = msk; return 0; } if (len >= sizeof(buf)) return -EINVAL; memcpy(buf,ptr,len); buf[len] = 0; *valptr = simple_strtol(buf,&p2,0); if (*p2) return -EINVAL; return 0; } static int parse_tlist(const char *ptr,unsigned int len, int *maskptr,int *valptr, const char **names,int valid_bits) { unsigned int cnt; int mask,val,kv,mode,ret; mask = 0; val = 0; ret = 0; while (len) { cnt = 0; while ((cnt < len) && ((ptr[cnt] <= 32) || (ptr[cnt] >= 127))) cnt++; ptr += cnt; len -= cnt; mode = 0; if ((*ptr == '-') || (*ptr == '+')) { mode = (*ptr == '-') ? -1 : 1; ptr++; len--; } cnt = 0; while (cnt < len) { if (ptr[cnt] <= 32) break; if (ptr[cnt] >= 127) break; cnt++; } if (!cnt) break; if (parse_mtoken(ptr,cnt,&kv,names,valid_bits)) { ret = -EINVAL; break; } ptr += cnt; len -= cnt; switch (mode) { case 0: mask = valid_bits; val |= kv; break; case -1: mask |= kv; val &= ~kv; break; case 1: mask |= kv; val |= kv; break; default: break; } } *maskptr = mask; *valptr = val; return ret; } /* Convert a symbolic value to a mask/value pair */ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, const char *ptr,unsigned int len, int *maskptr,int *valptr) { int ret = -EINVAL; unsigned int cnt; *maskptr = 0; *valptr = 0; cnt = 0; while ((cnt < len) && ((ptr[cnt] <= 32) || (ptr[cnt] >= 127))) cnt++; len -= cnt; ptr += cnt; cnt = 0; while ((cnt < len) && ((ptr[len-(cnt+1)] <= 32) || (ptr[len-(cnt+1)] >= 127))) cnt++; len -= cnt; if (!len) return -EINVAL; LOCK_TAKE(cptr->hdw->big_lock); do { if (cptr->info->type == pvr2_ctl_int) { ret = parse_token(ptr,len,valptr,NULL,0); if (ret >= 0) { ret = pvr2_ctrl_range_check(cptr,*valptr); } *maskptr = ~0; } else if (cptr->info->type == pvr2_ctl_bool) { ret = parse_token(ptr,len,valptr,boolNames, ARRAY_SIZE(boolNames)); if (ret == 1) { *valptr = *valptr ? !0 : 0; } else if (ret == 0) { *valptr = (*valptr & 1) ? !0 : 0; } *maskptr = 1; } else if (cptr->info->type == pvr2_ctl_enum) { ret = parse_token( ptr,len,valptr, cptr->info->def.type_enum.value_names, cptr->info->def.type_enum.count); if (ret >= 0) { ret = pvr2_ctrl_range_check(cptr,*valptr); } *maskptr = ~0; } else if (cptr->info->type == pvr2_ctl_bitmask) { ret = parse_tlist( ptr,len,maskptr,valptr, cptr->info->def.type_bitmask.bit_names, cptr->info->def.type_bitmask.valid_bits); } } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Convert a given mask/val to a symbolic value */ int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *cptr, int mask,int val, char *buf,unsigned int maxlen, unsigned int *len) { int ret = -EINVAL; *len = 0; if (cptr->info->type == pvr2_ctl_int) { *len = scnprintf(buf,maxlen,"%d",val); ret = 0; } else if (cptr->info->type == pvr2_ctl_bool) { *len = scnprintf(buf,maxlen,"%s",val ? "true" : "false"); ret = 0; } else if (cptr->info->type == pvr2_ctl_enum) { const char * const *names; names = cptr->info->def.type_enum.value_names; if ((val >= 0) && (val < cptr->info->def.type_enum.count)) { if (names[val]) { *len = scnprintf( buf,maxlen,"%s", names[val]); } else { *len = 0; } ret = 0; } } else if (cptr->info->type == pvr2_ctl_bitmask) { *len = gen_bitmask_string( val & mask & cptr->info->def.type_bitmask.valid_bits, ~0,!0, cptr->info->def.type_bitmask.bit_names, buf,maxlen); } return ret; } /* Convert a given mask/val to a symbolic value */ int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *cptr, int mask,int val, char *buf,unsigned int maxlen, unsigned int *len) { int ret; LOCK_TAKE(cptr->hdw->big_lock); do { ret = pvr2_ctrl_value_to_sym_internal(cptr,mask,val, buf,maxlen,len); } while(0); LOCK_GIVE(cptr->hdw->big_lock); return ret; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
mifl/android_kernel_pantech_ef44s
arch/sh/mm/cache-sh3.c
12972
2611
/* * arch/sh/mm/cache-sh3.c * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2002 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> /* * Write back the dirty D-caches, but not invalidate them. * * Is this really worth it, or should we just alias this routine * to __flush_purge_region too? * * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */ static void sh3__flush_wback_region(void *start, int size) { unsigned long v, j; unsigned long begin, end; unsigned long flags; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; for (j = 0; j < current_cpu_data.dcache.ways; j++) { unsigned long data, addr, p; p = __pa(v); addr = addrstart | (v & current_cpu_data.dcache.entry_mask); local_irq_save(flags); data = __raw_readl(addr); if ((data & CACHE_PHYSADDR_MASK) == (p & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; __raw_writel(data, addr); local_irq_restore(flags); break; } local_irq_restore(flags); addrstart += current_cpu_data.dcache.way_incr; } } } /* * Write back the dirty D-caches and invalidate them. * * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */ static void sh3__flush_purge_region(void *start, int size) { unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { unsigned long data, addr; data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ addr = CACHE_OC_ADDRESS_ARRAY | (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; __raw_writel(data, addr); } } void __init sh3_cache_init(void) { __flush_wback_region = sh3__flush_wback_region; __flush_purge_region = sh3__flush_purge_region; /* * No write back please * * Except I don't think there's any way to avoid the writeback. * So we just alias it to sh3__flush_purge_region(). dwmw2. */ __flush_invalidate_region = sh3__flush_purge_region; }
gpl-2.0
Evervolv/android_kernel_htc_msm8660
arch/x86/boot/tty.c
12972
2430
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ /* * Very simple screen and serial I/O */ #include "boot.h" int early_serial_base; #define XMTRDY 0x20 #define TXR 0 /* Transmit register (WRITE) */ #define LSR 5 /* Line Status */ /* * These functions are in .inittext so they can be used to signal * error during initialization. */ static void __attribute__((section(".inittext"))) serial_putchar(int ch) { unsigned timeout = 0xffff; while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) cpu_relax(); outb(ch, early_serial_base + TXR); } static void __attribute__((section(".inittext"))) bios_putchar(int ch) { struct biosregs ireg; initregs(&ireg); ireg.bx = 0x0007; ireg.cx = 0x0001; ireg.ah = 0x0e; ireg.al = ch; intcall(0x10, &ireg, NULL); } void __attribute__((section(".inittext"))) putchar(int ch) { if (ch == '\n') putchar('\r'); /* \n -> \r\n */ bios_putchar(ch); if (early_serial_base != 0) serial_putchar(ch); } void __attribute__((section(".inittext"))) puts(const char *str) { while (*str) putchar(*str++); } /* * Read the CMOS clock through the BIOS, and return the * seconds in BCD. */ static u8 gettime(void) { struct biosregs ireg, oreg; initregs(&ireg); ireg.ah = 0x02; intcall(0x1a, &ireg, &oreg); return oreg.dh; } /* * Read from the keyboard */ int getchar(void) { struct biosregs ireg, oreg; initregs(&ireg); /* ireg.ah = 0x00; */ intcall(0x16, &ireg, &oreg); return oreg.al; } static int kbd_pending(void) { struct biosregs ireg, oreg; initregs(&ireg); ireg.ah = 0x01; intcall(0x16, &ireg, &oreg); return !(oreg.eflags & X86_EFLAGS_ZF); } void kbd_flush(void) { for (;;) { if (!kbd_pending()) break; getchar(); } } int getchar_timeout(void) { int cnt = 30; int t0, t1; t0 = gettime(); while (cnt) { if (kbd_pending()) return getchar(); t1 = gettime(); if (t0 != t1) { cnt--; t0 = t1; } } return 0; /* Timeout! */ }
gpl-2.0
bcm216xx/android_kernel_rhea
net/ipv4/tcp.c
173
91078
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> * * Fixes: * Alan Cox : Numerous verify_area() calls * Alan Cox : Set the ACK bit on a reset * Alan Cox : Stopped it crashing if it closed while * sk->inuse=1 and was trying to connect * (tcp_err()). * Alan Cox : All icmp error handling was broken * pointers passed where wrong and the * socket was looked up backwards. Nobody * tested any icmp error code obviously. * Alan Cox : tcp_err() now handled properly. It * wakes people on errors. poll * behaves and the icmp error race * has gone by moving it into sock.c * Alan Cox : tcp_send_reset() fixed to work for * everything not just packets for * unknown sockets. * Alan Cox : tcp option processing. * Alan Cox : Reset tweaked (still not 100%) [Had * syn rule wrong] * Herp Rosmanith : More reset fixes * Alan Cox : No longer acks invalid rst frames. * Acking any kind of RST is right out. * Alan Cox : Sets an ignore me flag on an rst * receive otherwise odd bits of prattle * escape still * Alan Cox : Fixed another acking RST frame bug. * Should stop LAN workplace lockups. * Alan Cox : Some tidyups using the new skb list * facilities * Alan Cox : sk->keepopen now seems to work * Alan Cox : Pulls options out correctly on accepts * Alan Cox : Fixed assorted sk->rqueue->next errors * Alan Cox : PSH doesn't end a TCP read. Switched a * bit to skb ops. * Alan Cox : Tidied tcp_data to avoid a potential * nasty. * Alan Cox : Added some better commenting, as the * tcp is hard to follow * Alan Cox : Removed incorrect check for 20 * psh * Michael O'Reilly : ack < copied bug fix. * Johannes Stille : Misc tcp fixes (not all in yet). * Alan Cox : FIN with no memory -> CRASH * Alan Cox : Added socket option proto entries. * Also added awareness of them to accept. * Alan Cox : Added TCP options (SOL_TCP) * Alan Cox : Switched wakeup calls to callbacks, * so the kernel can layer network * sockets. * Alan Cox : Use ip_tos/ip_ttl settings. * Alan Cox : Handle FIN (more) properly (we hope). * Alan Cox : RST frames sent on unsynchronised * state ack error. * Alan Cox : Put in missing check for SYN bit. * Alan Cox : Added tcp_select_window() aka NET2E * window non shrink trick. * Alan Cox : Added a couple of small NET2E timer * fixes * Charles Hedrick : TCP fixes * Toomas Tamm : TCP window fixes * Alan Cox : Small URG fix to rlogin ^C ack fight * Charles Hedrick : Rewrote most of it to actually work * Linus : Rewrote tcp_read() and URG handling * completely * Gerhard Koerting: Fixed some missing timer handling * Matthew Dillon : Reworked TCP machine states as per RFC * Gerhard Koerting: PC/TCP workarounds * Adam Caldwell : Assorted timer/timing errors * Matthew Dillon : Fixed another RST bug * Alan Cox : Move to kernel side addressing changes. * Alan Cox : Beginning work on TCP fastpathing * (not yet usable) * Arnt Gulbrandsen: Turbocharged tcp_check() routine. * Alan Cox : TCP fast path debugging * Alan Cox : Window clamping * Michael Riepe : Bug in tcp_check() * Matt Dillon : More TCP improvements and RST bug fixes * Matt Dillon : Yet more small nasties remove from the * TCP code (Be very nice to this man if * tcp finally works 100%) 8) * Alan Cox : BSD accept semantics. * Alan Cox : Reset on closedown bug. * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). * Michael Pall : Handle poll() after URG properly in * all cases. * Michael Pall : Undo the last fix in tcp_read_urg() * (multi URG PUSH broke rlogin). * Michael Pall : Fix the multi URG PUSH problem in * tcp_readable(), poll() after URG * works now. * Michael Pall : recv(...,MSG_OOB) never blocks in the * BSD api. * Alan Cox : Changed the semantics of sk->socket to * fix a race and a signal problem with * accept() and async I/O. * Alan Cox : Relaxed the rules on tcp_sendto(). * Yury Shevchuk : Really fixed accept() blocking problem. * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for * clients/servers which listen in on * fixed ports. * Alan Cox : Cleaned the above up and shrank it to * a sensible code size. * Alan Cox : Self connect lockup fix. * Alan Cox : No connect to multicast. * Ross Biro : Close unaccepted children on master * socket close. * Alan Cox : Reset tracing code. * Alan Cox : Spurious resets on shutdown. * Alan Cox : Giant 15 minute/60 second timer error * Alan Cox : Small whoops in polling before an * accept. * Alan Cox : Kept the state trace facility since * it's handy for debugging. * Alan Cox : More reset handler fixes. * Alan Cox : Started rewriting the code based on * the RFC's for other useful protocol * references see: Comer, KA9Q NOS, and * for a reference on the difference * between specifications and how BSD * works see the 4.4lite source. * A.N.Kuznetsov : Don't time wait on completion of tidy * close. * Linus Torvalds : Fin/Shutdown & copied_seq changes. * Linus Torvalds : Fixed BSD port reuse to work first syn * Alan Cox : Reimplemented timers as per the RFC * and using multiple timers for sanity. * Alan Cox : Small bug fixes, and a lot of new * comments. * Alan Cox : Fixed dual reader crash by locking * the buffers (much like datagram.c) * Alan Cox : Fixed stuck sockets in probe. A probe * now gets fed up of retrying without * (even a no space) answer. * Alan Cox : Extracted closing code better * Alan Cox : Fixed the closing state machine to * resemble the RFC. * Alan Cox : More 'per spec' fixes. * Jorge Cwik : Even faster checksumming. * Alan Cox : tcp_data() doesn't ack illegal PSH * only frames. At least one pc tcp stack * generates them. * Alan Cox : Cache last socket. * Alan Cox : Per route irtt. * Matt Day : poll()->select() match BSD precisely on error * Alan Cox : New buffers * Marc Tamsky : Various sk->prot->retransmits and * sk->retransmits misupdating fixed. * Fixed tcp_write_timeout: stuck close, * and TCP syn retries gets used now. * Mark Yarvis : In tcp_read_wakeup(), don't send an * ack if state is TCP_CLOSED. * Alan Cox : Look up device on a retransmit - routes may * change. Doesn't yet cope with MSS shrink right * but it's a start! * Marc Tamsky : Closing in closing fixes. * Mike Shaver : RFC1122 verifications. * Alan Cox : rcv_saddr errors. * Alan Cox : Block double connect(). * Alan Cox : Small hooks for enSKIP. * Alexey Kuznetsov: Path MTU discovery. * Alan Cox : Support soft errors. * Alan Cox : Fix MTU discovery pathological case * when the remote claims no mtu! * Marc Tamsky : TCP_CLOSE fix. * Colin (G3TNE) : Send a reset on syn ack replies in * window but wrong (fixes NT lpd problems) * Pedro Roque : Better TCP window handling, delayed ack. * Joerg Reuter : No modification of locked buffers in * tcp_do_retransmit() * Eric Schenk : Changed receiver side silly window * avoidance algorithm to BSD style * algorithm. This doubles throughput * against machines running Solaris, * and seems to result in general * improvement. * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD * Willy Konynenberg : Transparent proxying support. * Mike McLagan : Routing by source * Keith Owens : Do proper merging with partial SKB's in * tcp_do_sendmsg to avoid burstiness. * Eric Schenk : Fix fast close down bug with * shutdown() followed by close(). * Andi Kleen : Make poll agree with SIGIO * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and * lingertime == 0 (RFC 793 ABORT Call) * Hirokazu Takahashi : Use copy_from_user() instead of * csum_and_copy_from_user() if possible. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or(at your option) any later version. * * Description of States: * * TCP_SYN_SENT sent a connection request, waiting for ack * * TCP_SYN_RECV received a connection request, sent ack, * waiting for final ack in three-way handshake. * * TCP_ESTABLISHED connection established * * TCP_FIN_WAIT1 our side has shutdown, waiting to complete * transmission of remaining buffered data * * TCP_FIN_WAIT2 all buffered data sent, waiting for remote * to shutdown * * TCP_CLOSING both sides have shutdown but we still have * data we have to finish sending * * TCP_TIME_WAIT timeout to catch resent junk before entering * closed, can only be entered from FIN_WAIT2 * or CLOSING. Required because the other end * may not have gotten our last ACK causing it * to retransmit the data packet (which we ignore) * * TCP_CLOSE_WAIT remote side has shutdown and is waiting for * us to finish writing our data and to shutdown * (we have to close() to move on to LAST_ACK) * * TCP_LAST_ACK out side has shutdown after remote has * shutdown. There may still be data in our * buffer that we have to finish sending * * TCP_CLOSE socket is finished */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/skbuff.h> #include <linux/scatterlist.h> #include <linux/splice.h> #include <linux/net.h> #include <linux/socket.h> #include <linux/random.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/cache.h> #include <linux/err.h> #include <linux/crypto.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/uid_stat.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/ip6_route.h> #include <net/ipv6.h> #include <net/transp_v6.h> #include <net/netdma.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/ioctls.h> int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; struct percpu_counter tcp_orphan_count; EXPORT_SYMBOL_GPL(tcp_orphan_count); long sysctl_tcp_mem[3] __read_mostly; int sysctl_tcp_wmem[3] __read_mostly; int sysctl_tcp_rmem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem); EXPORT_SYMBOL(sysctl_tcp_rmem); EXPORT_SYMBOL(sysctl_tcp_wmem); atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ EXPORT_SYMBOL(tcp_memory_allocated); /* * Current number of TCP sockets. */ struct percpu_counter tcp_sockets_allocated; EXPORT_SYMBOL(tcp_sockets_allocated); /* * TCP splice context */ struct tcp_splice_state { struct pipe_inode_info *pipe; size_t len; unsigned int flags; }; /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ int tcp_memory_pressure __read_mostly; EXPORT_SYMBOL(tcp_memory_pressure); void tcp_enter_memory_pressure(struct sock *sk) { if (!tcp_memory_pressure) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); tcp_memory_pressure = 1; } } EXPORT_SYMBOL(tcp_enter_memory_pressure); /* Convert seconds to retransmits based on initial and max timeout */ static u8 secs_to_retrans(int seconds, int timeout, int rto_max) { u8 res = 0; if (seconds > 0) { int period = timeout; res = 1; while (seconds > period && res < 255) { res++; timeout <<= 1; if (timeout > rto_max) timeout = rto_max; period += timeout; } } return res; } /* Convert retransmits to seconds based on initial and max timeout */ static int retrans_to_secs(u8 retrans, int timeout, int rto_max) { int period = 0; if (retrans > 0) { period = timeout; while (--retrans) { timeout <<= 1; if (timeout > rto_max) timeout = rto_max; period += timeout; } } return period; } /* * Wait for a TCP event. * * Note that we don't need to lock the socket, as the upper poll layers * take care of normal races (between the test and the event) and we don't * go look at any of the socket buffers directly. */ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; struct tcp_sock *tp = tcp_sk(sk); sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == TCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events * by poll logic and correct handling of state changes * made by other threads is impossible in any case. */ mask = 0; /* * POLLHUP is certainly not done right. But poll() doesn't * have a notion of HUP in just one direction, and for a * socket the read side is more interesting. * * Some poll() documentation says that POLLHUP is incompatible * with the POLLOUT/POLLWR flags, so somebody should check this * all. But careful, it tends to be safer to return too many * bits than too few, and you can easily break real applications * if you don't tell them that something has hung up! * * Check-me. * * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and * our fs/select.c). It means that after we received EOF, * poll always returns immediately, making impossible poll() on write() * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP * if and only if shutdown has been made in both directions. * Actually, it is interesting to look how Solaris and DUX * solve this dilemma. I would prefer, if POLLHUP were maskable, * then we could set it on SND_SHUTDOWN. BTW examples given * in Stevens' books assume exactly this behaviour, it explains * why POLLHUP is incompatible with POLLOUT. --ANK * * NOTE. Check for TCP_CLOSE is added. The goal is to prevent * blocking on fresh not-connected or disconnected socket. --ANK */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM | POLLRDHUP; /* Connected? */ if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { int target = sock_rcvlowat(sk, 0, INT_MAX); if (tp->urg_seq == tp->copied_seq && !sock_flag(sk, SOCK_URGINLINE) && tp->urg_data) target++; /* Potential race condition. If read of tp below will * escape above sk->sk_state, we can be illegally awaken * in SYN_* states. */ if (tp->rcv_nxt - tp->copied_seq >= target) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after * wspace test but before the flags are set, * IO signal will be lost. */ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) mask |= POLLOUT | POLLWRNORM; } } else mask |= POLLOUT | POLLWRNORM; if (tp->urg_data & TCP_URG_VALID) mask |= POLLPRI; } /* This barrier is coupled with smp_wmb() in tcp_reset() */ smp_rmb(); if (sk->sk_err) mask |= POLLERR; return mask; } EXPORT_SYMBOL(tcp_poll); int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { struct tcp_sock *tp = tcp_sk(sk); int answ; switch (cmd) { case SIOCINQ: if (sk->sk_state == TCP_LISTEN) return -EINVAL; lock_sock(sk); if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) answ = 0; else if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || before(tp->urg_seq, tp->copied_seq) || !before(tp->urg_seq, tp->rcv_nxt)) { answ = tp->rcv_nxt - tp->copied_seq; /* Subtract 1, if FIN was received */ if (answ && sock_flag(sk, SOCK_DONE)) answ--; } else answ = tp->urg_seq - tp->copied_seq; release_sock(sk); break; case SIOCATMARK: answ = tp->urg_data && tp->urg_seq == tp->copied_seq; break; case SIOCOUTQ: if (sk->sk_state == TCP_LISTEN) return -EINVAL; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) answ = 0; else answ = tp->write_seq - tp->snd_una; break; case SIOCOUTQNSD: if (sk->sk_state == TCP_LISTEN) return -EINVAL; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) answ = 0; else answ = tp->write_seq - tp->snd_nxt; break; default: return -ENOIOCTLCMD; } return put_user(answ, (int __user *)arg); } EXPORT_SYMBOL(tcp_ioctl); static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; tp->pushed_seq = tp->write_seq; } static inline int forced_push(struct tcp_sock *tp) { return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } static inline void skb_entail(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); skb->csum = 0; tcb->seq = tcb->end_seq = tp->write_seq; tcb->flags = TCPHDR_ACK; tcb->sacked = 0; skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); if (tp->nonagle & TCP_NAGLE_PUSH) tp->nonagle &= ~TCP_NAGLE_PUSH; } static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) { if (flags & MSG_OOB) tp->snd_up = tp->write_seq; } static inline void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle) { if (tcp_send_head(sk)) { struct tcp_sock *tp = tcp_sk(sk); if (!(flags & MSG_MORE) || forced_push(tp)) tcp_mark_push(tp, tcp_write_queue_tail(sk)); tcp_mark_urg(tp, flags); __tcp_push_pending_frames(sk, mss_now, (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); } } static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) { struct tcp_splice_state *tss = rd_desc->arg.data; int ret; ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), tss->flags); if (ret > 0) rd_desc->count -= ret; return ret; } static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) { /* Store TCP splice context information in read_descriptor_t. */ read_descriptor_t rd_desc = { .arg.data = tss, .count = tss->len, }; return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); } /** * tcp_splice_read - splice data from TCP socket to a pipe * @sock: socket to splice from * @ppos: position (not valid) * @pipe: pipe to splice to * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will read pages from given socket and fill them into a pipe. * **/ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct sock *sk = sock->sk; struct tcp_splice_state tss = { .pipe = pipe, .len = len, .flags = flags, }; long timeo; ssize_t spliced; int ret; sock_rps_record_flow(sk); /* * We can't seek on a socket input */ if (unlikely(*ppos)) return -ESPIPE; ret = spliced = 0; lock_sock(sk); timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); while (tss.len) { ret = __tcp_splice_read(sk, &tss); if (ret < 0) break; else if (!ret) { if (spliced) break; if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { ret = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { /* * This occurs when user tries to read * from never connected socket. */ if (!sock_flag(sk, SOCK_DONE)) ret = -ENOTCONN; break; } if (!timeo) { ret = -EAGAIN; break; } sk_wait_data(sk, &timeo); if (signal_pending(current)) { ret = sock_intr_errno(timeo); break; } continue; } tss.len -= ret; spliced += ret; if (!timeo) break; release_sock(sk); lock_sock(sk); if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current)) break; } release_sock(sk); if (spliced) return spliced; return ret; } EXPORT_SYMBOL(tcp_splice_read); struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) { struct sk_buff *skb; /* The TCP header must be at least 32-bit aligned. */ size = ALIGN(size, 4); skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); if (skb) { if (sk_wmem_schedule(sk, skb->truesize)) { /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ skb_reserve(skb, skb_tailroom(skb) - size); return skb; } __kfree_skb(skb); } else { sk->sk_prot->enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); } return NULL; } static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) { struct tcp_sock *tp = tcp_sk(sk); u32 xmit_size_goal, old_size_goal; xmit_size_goal = mss_now; if (large_allowed && sk_can_gso(sk)) { xmit_size_goal = ((sk->sk_gso_max_size - 1) - inet_csk(sk)->icsk_af_ops->net_header_len - inet_csk(sk)->icsk_ext_hdr_len - tp->tcp_header_len); xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); /* We try hard to avoid divides here */ old_size_goal = tp->xmit_size_goal_segs * mss_now; if (likely(old_size_goal <= xmit_size_goal && old_size_goal + mss_now > xmit_size_goal)) { xmit_size_goal = old_size_goal; } else { tp->xmit_size_goal_segs = min_t(u16, xmit_size_goal / mss_now, sk->sk_gso_max_segs); xmit_size_goal = tp->xmit_size_goal_segs * mss_now; } } return max(xmit_size_goal, mss_now); } static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) { int mss_now; mss_now = tcp_current_mss(sk); *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); return mss_now; } static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags) { struct tcp_sock *tp = tcp_sk(sk); int mss_now, size_goal; int err; ssize_t copied; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); mss_now = tcp_send_mss(sk, &size_goal, flags); copied = 0; err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; while (psize > 0) { struct sk_buff *skb = tcp_write_queue_tail(sk); struct page *page = pages[poffset / PAGE_SIZE]; int copy, i, can_coalesce; int offset = poffset % PAGE_SIZE; int size = min_t(size_t, psize, PAGE_SIZE - offset); if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); if (!skb) goto wait_for_memory; skb_entail(sk, skb); copy = size_goal; } if (copy > size) copy = size; i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, page, offset); if (!can_coalesce && i >= MAX_SKB_FRAGS) { tcp_mark_push(tp, skb); goto new_segment; } if (!sk_wmem_schedule(sk, copy)) goto wait_for_memory; if (can_coalesce) { skb_shinfo(skb)->frags[i - 1].size += copy; } else { get_page(page); skb_fill_page_desc(skb, i, page, offset, copy); } skb->len += copy; skb->data_len += copy; skb->truesize += copy; sk->sk_wmem_queued += copy; sk_mem_charge(sk, copy); skb->ip_summed = CHECKSUM_PARTIAL; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; skb_shinfo(skb)->gso_segs = 0; if (!copied) TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; copied += copy; poffset += copy; if (!(psize -= copy)) goto out; if (skb->len < size_goal || (flags & MSG_OOB)) continue; if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); } else if (skb == tcp_send_head(sk)) tcp_push_one(sk, mss_now); continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; mss_now = tcp_send_mss(sk, &size_goal, flags); } out: if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) tcp_push(sk, flags, mss_now, tp->nonagle); return copied; do_error: if (copied) goto out; out_err: return sk_stream_error(sk, flags, err); } int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { ssize_t res; if (!(sk->sk_route_caps & NETIF_F_SG) || !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) return sock_no_sendpage(sk->sk_socket, page, offset, size, flags); lock_sock(sk); res = do_tcp_sendpages(sk, &page, offset, size, flags); release_sock(sk); return res; } EXPORT_SYMBOL(tcp_sendpage); #define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_OFF(sk) (sk->sk_sndmsg_off) static inline int select_size(struct sock *sk, int sg) { struct tcp_sock *tp = tcp_sk(sk); int tmp = tp->mss_cache; if (sg) { if (sk_can_gso(sk)) tmp = 0; else { int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); if (tmp >= pgbreak && tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) tmp = pgbreak; } } return tmp; } int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct iovec *iov; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags; int mss_now, size_goal; int sg, err, copied; long timeo; lock_sock(sk); flags = msg->msg_flags; timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; /* This should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); mss_now = tcp_send_mss(sk, &size_goal, flags); /* Ok commence sending. */ iovlen = msg->msg_iovlen; iov = msg->msg_iov; copied = 0; err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; sg = sk->sk_route_caps & NETIF_F_SG; while (--iovlen >= 0) { size_t seglen = iov->iov_len; unsigned char __user *from = iov->iov_base; iov++; while (seglen > 0) { int copy = 0; int max = size_goal; skb = tcp_write_queue_tail(sk); if (tcp_send_head(sk)) { if (skb->ip_summed == CHECKSUM_NONE) max = mss_now; copy = max - skb->len; } if (copy <= 0) { new_segment: /* Allocate new segment. If the interface is SG, * allocate skb fitting to single page. */ if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; skb = sk_stream_alloc_skb(sk, select_size(sk, sg), sk->sk_allocation); if (!skb) goto wait_for_memory; /* * Check whether we can use HW checksum. */ if (sk->sk_route_caps & NETIF_F_ALL_CSUM) skb->ip_summed = CHECKSUM_PARTIAL; skb_entail(sk, skb); copy = size_goal; max = size_goal; } /* Try to append data to the end of skb. */ if (copy > seglen) copy = seglen; /* Where to copy to? */ if (skb_tailroom(skb) > 0) { /* We have some space in skb head. Superb! */ if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); err = skb_add_data_nocache(sk, skb, from, copy); if (err) goto do_fault; } else { int merge = 0; int i = skb_shinfo(skb)->nr_frags; struct page *page = TCP_PAGE(sk); int off = TCP_OFF(sk); if (skb_can_coalesce(skb, i, page, off) && off != PAGE_SIZE) { /* We can extend the last page * fragment. */ merge = 1; } else if (i == MAX_SKB_FRAGS || !sg) { /* Need to add new fragment and cannot * do this because interface is non-SG, * or because all the page slots are * busy. */ tcp_mark_push(tp, skb); goto new_segment; } else if (page) { if (off == PAGE_SIZE) { put_page(page); TCP_PAGE(sk) = page = NULL; off = 0; } } else off = 0; if (copy > PAGE_SIZE - off) copy = PAGE_SIZE - off; if (!sk_wmem_schedule(sk, copy)) goto wait_for_memory; if (!page) { /* Allocate new cache page. */ if (!(page = sk_stream_alloc_page(sk))) goto wait_for_memory; } /* Time to copy data. We are close to * the end! */ err = skb_copy_to_page_nocache(sk, from, skb, page, off, copy); if (err) { /* If this page was new, give it to the * socket so it does not get leaked. */ if (!TCP_PAGE(sk)) { TCP_PAGE(sk) = page; TCP_OFF(sk) = 0; } goto do_error; } /* Update the skb. */ if (merge) { skb_shinfo(skb)->frags[i - 1].size += copy; } else { skb_fill_page_desc(skb, i, page, off, copy); if (TCP_PAGE(sk)) { get_page(page); } else if (off + copy < PAGE_SIZE) { get_page(page); TCP_PAGE(sk) = page; } } TCP_OFF(sk) = off + copy; } if (!copied) TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; skb_shinfo(skb)->gso_segs = 0; from += copy; copied += copy; if ((seglen -= copy) == 0 && iovlen == 0) goto out; if (skb->len < max || (flags & MSG_OOB)) continue; if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); } else if (skb == tcp_send_head(sk)) tcp_push_one(sk, mss_now); continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: if (copied) tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; mss_now = tcp_send_mss(sk, &size_goal, flags); } } out: if (copied) tcp_push(sk, flags, mss_now, tp->nonagle); release_sock(sk); if (copied > 0) uid_stat_tcp_snd(current_uid(), copied); return copied; do_fault: if (!skb->len) { tcp_unlink_write_queue(skb, sk); /* It is the one place in all of TCP, except connection * reset, where we can be unlinking the send_head. */ tcp_check_send_head(sk, skb); sk_wmem_free_skb(sk, skb); } do_error: if (copied) goto out; out_err: err = sk_stream_error(sk, flags, err); release_sock(sk); return err; } EXPORT_SYMBOL(tcp_sendmsg); /* * Handle reading urgent data. BSD has very simple semantics for * this, no blocking and very strange errors 8) */ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) { struct tcp_sock *tp = tcp_sk(sk); /* No URG data to read. */ if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || tp->urg_data == TCP_URG_READ) return -EINVAL; /* Yes this is right ! */ if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) return -ENOTCONN; if (tp->urg_data & TCP_URG_VALID) { int err = 0; char c = tp->urg_data; if (!(flags & MSG_PEEK)) tp->urg_data = TCP_URG_READ; /* Read urgent data. */ msg->msg_flags |= MSG_OOB; if (len > 0) { if (!(flags & MSG_TRUNC)) err = memcpy_toiovec(msg->msg_iov, &c, 1); len = 1; } else msg->msg_flags |= MSG_TRUNC; return err ? -EFAULT : len; } if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) return 0; /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and * the available implementations agree in this case: * this call should never block, independent of the * blocking state of the socket. * Mike <pall@rz.uni-karlsruhe.de> */ return -EAGAIN; } /* Clean up the receive buffer for full frames taken by the user, * then send an ACK if necessary. COPIED is the number of bytes * tcp_recvmsg has given to the user so far, it speeds up the * calculation of whether or not we must ACK for the sake of * a window update. */ void tcp_cleanup_rbuf(struct sock *sk, int copied) { struct tcp_sock *tp = tcp_sk(sk); int time_to_ack = 0; #if TCP_DEBUG struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); #endif if (inet_csk_ack_scheduled(sk)) { const struct inet_connection_sock *icsk = inet_csk(sk); /* Delayed ACKs frequently hit locked sockets during bulk * receive. */ if (icsk->icsk_ack.blocked || /* Once-per-two-segments ACK was not sent by tcp_input.c */ tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || /* * If this read emptied read buffer, we send ACK, if * connection is not bidirectional, user drained * receive buffer and there was a small segment * in queue. */ (copied > 0 && ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && !icsk->icsk_ack.pingpong)) && !atomic_read(&sk->sk_rmem_alloc))) time_to_ack = 1; } /* We send an ACK if we can now advertise a non-zero window * which has been raised "significantly". * * Even if window raised up to infinity, do not send window open ACK * in states, where we will not receive more. It is useless. */ if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { __u32 rcv_window_now = tcp_receive_window(tp); /* Optimize, __tcp_select_window() is not cheap. */ if (2*rcv_window_now <= tp->window_clamp) { __u32 new_window = __tcp_select_window(sk); /* Send ACK now, if this read freed lots of space * in our buffer. Certainly, new_window is new window. * We can advertise it now, if it is not less than current one. * "Lots" means "at least twice" here. */ if (new_window && new_window >= 2 * rcv_window_now) time_to_ack = 1; } } if (time_to_ack) tcp_send_ack(sk); } static void tcp_prequeue_process(struct sock *sk) { struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); /* RX process wants to run with disabled BHs, though it is not * necessary */ local_bh_disable(); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk_backlog_rcv(sk, skb); local_bh_enable(); /* Clear memory counter. */ tp->ucopy.memory = 0; } #ifdef CONFIG_NET_DMA static void tcp_service_net_dma(struct sock *sk, bool wait) { dma_cookie_t done, used; dma_cookie_t last_issued; struct tcp_sock *tp = tcp_sk(sk); if (!tp->ucopy.dma_chan) return; last_issued = tp->ucopy.dma_cookie; dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); do { if (dma_async_memcpy_complete(tp->ucopy.dma_chan, last_issued, &done, &used) == DMA_SUCCESS) { /* Safe to free early-copied skbs now */ __skb_queue_purge(&sk->sk_async_wait_queue); break; } else { struct sk_buff *skb; while ((skb = skb_peek(&sk->sk_async_wait_queue)) && (dma_async_is_complete(skb->dma_cookie, done, used) == DMA_SUCCESS)) { __skb_dequeue(&sk->sk_async_wait_queue); kfree_skb(skb); } } } while (wait); } #endif static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) { struct sk_buff *skb; u32 offset; skb_queue_walk(&sk->sk_receive_queue, skb) { offset = seq - TCP_SKB_CB(skb)->seq; if (tcp_hdr(skb)->syn) offset--; if (offset < skb->len || tcp_hdr(skb)->fin) { *off = offset; return skb; } } return NULL; } /* * This routine provides an alternative to tcp_recvmsg() for routines * that would like to handle copying from skbuffs directly in 'sendfile' * fashion. * Note: * - It is assumed that the socket was locked by the caller. * - The routine does not block. * - At present, there is no support for reading OOB data * or for 'peeking' the socket using this routine * (although both would be easy to implement). */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor) { struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); u32 seq = tp->copied_seq; u32 offset; int copied = 0; if (sk->sk_state == TCP_LISTEN) return -ENOTCONN; while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { if (offset < skb->len) { int used; size_t len; len = skb->len - offset; /* Stop reading if we hit a patch of urgent data */ if (tp->urg_data) { u32 urg_offset = tp->urg_seq - seq; if (urg_offset < len) len = urg_offset; if (!len) break; } used = recv_actor(desc, skb, offset, len); if (used < 0) { if (!copied) copied = used; break; } else if (used <= len) { seq += used; copied += used; offset += used; } /* * If recv_actor drops the lock (e.g. TCP splice * receive) the skb pointer might be invalid when * getting here: tcp_collapse might have deleted it * while aggregating skbs from the socket queue. */ skb = tcp_recv_skb(sk, seq-1, &offset); if (!skb || (offset+1 != skb->len)) break; } if (tcp_hdr(skb)->fin) { sk_eat_skb(sk, skb, 0); ++seq; break; } sk_eat_skb(sk, skb, 0); if (!desc->count) break; tp->copied_seq = seq; } tp->copied_seq = seq; tcp_rcv_space_adjust(sk); /* Clean up data we have read: This will do ACK frames. */ if (copied > 0) { tcp_cleanup_rbuf(sk, copied); uid_stat_tcp_rcv(current_uid(), copied); } return copied; } EXPORT_SYMBOL(tcp_read_sock); /* * This routine copies from a sock struct into the user buffer. * * Technical note: in 2.3 we work on _locked_ socket, so that * tricks with *seq access order and skb->users are not required. * Probably, code can be easily improved even more. */ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { struct tcp_sock *tp = tcp_sk(sk); int copied = 0; u32 peek_seq; u32 *seq; unsigned long used; int err; int target; /* Read at least this many bytes */ long timeo; struct task_struct *user_recv = NULL; int copied_early = 0; struct sk_buff *skb; u32 urg_hole = 0; lock_sock(sk); err = -ENOTCONN; if (sk->sk_state == TCP_LISTEN) goto out; timeo = sock_rcvtimeo(sk, nonblock); /* Urgent data needs to be handled specially. */ if (flags & MSG_OOB) goto recv_urg; seq = &tp->copied_seq; if (flags & MSG_PEEK) { peek_seq = tp->copied_seq; seq = &peek_seq; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); #ifdef CONFIG_NET_DMA tp->ucopy.dma_chan = NULL; preempt_disable(); skb = skb_peek_tail(&sk->sk_receive_queue); { int available = 0; if (skb) available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); if ((available < target) && (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && !sysctl_tcp_low_latency && dma_find_channel(DMA_MEMCPY)) { preempt_enable_no_resched(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); } else { preempt_enable_no_resched(); } } #endif do { u32 offset; /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ if (tp->urg_data && tp->urg_seq == *seq) { if (copied) break; if (signal_pending(current)) { copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } } /* Next get a buffer. */ skb_queue_walk(&sk->sk_receive_queue, skb) { /* Now that we have two receive queues this * shouldn't happen. */ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags)) break; offset = *seq - TCP_SKB_CB(skb)->seq; if (tcp_hdr(skb)->syn) offset--; if (offset < skb->len) goto found_ok_skb; if (tcp_hdr(skb)->fin) goto found_fin_ok; WARN(!(flags & MSG_PEEK), "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); } /* Well, if we have backlog, try to process it now yet. */ if (copied >= target && !sk->sk_backlog.tail) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || signal_pending(current)) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { if (!sock_flag(sk, SOCK_DONE)) { /* This occurs when user tries to read * from never connected socket. */ copied = -ENOTCONN; break; } break; } if (!timeo) { copied = -EAGAIN; break; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); break; } } tcp_cleanup_rbuf(sk, copied); if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { /* Install new reader */ if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { user_recv = current; tp->ucopy.task = user_recv; tp->ucopy.iov = msg->msg_iov; } tp->ucopy.len = len; WARN_ON(tp->copied_seq != tp->rcv_nxt && !(flags & (MSG_PEEK | MSG_TRUNC))); /* Ugly... If prequeue is not empty, we have to * process it before releasing socket, otherwise * order will be broken at second iteration. * More elegant solution is required!!! * * Look: we have the following (pseudo)queues: * * 1. packets in flight * 2. backlog * 3. prequeue * 4. receive_queue * * Each queue can be processed only if the next ones * are empty. At this point we have empty receive_queue. * But prequeue _can_ be not empty after 2nd iteration, * when we jumped to start of loop because backlog * processing added something to receive_queue. * We cannot release_sock(), because backlog contains * packets arrived _after_ prequeued ones. * * Shortly, algorithm is clear --- to process all * the queues in order. We could make it more directly, * requeueing packets from backlog to prequeue, if * is not empty. It is more elegant, but eats cycles, * unfortunately. */ if (!skb_queue_empty(&tp->ucopy.prequeue)) goto do_prequeue; /* __ Set realtime policy in scheduler __ */ } #ifdef CONFIG_NET_DMA if (tp->ucopy.dma_chan) { if (tp->rcv_wnd == 0 && !skb_queue_empty(&sk->sk_async_wait_queue)) { tcp_service_net_dma(sk, true); tcp_cleanup_rbuf(sk, copied); } else dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); } #endif if (copied >= target) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); } else sk_wait_data(sk, &timeo); #ifdef CONFIG_NET_DMA tcp_service_net_dma(sk, false); /* Don't block */ tp->ucopy.wakeup = 0; #endif if (user_recv) { int chunk; /* __ Restore normal policy in scheduler __ */ if ((chunk = len - tp->ucopy.len) != 0) { NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); len -= chunk; copied += chunk; } if (tp->rcv_nxt == tp->copied_seq && !skb_queue_empty(&tp->ucopy.prequeue)) { do_prequeue: tcp_prequeue_process(sk); if ((chunk = len - tp->ucopy.len) != 0) { NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } } } if ((flags & MSG_PEEK) && (peek_seq - copied - urg_hole != tp->copied_seq)) { if (net_ratelimit()) printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", current->comm, task_pid_nr(current)); peek_seq = tp->copied_seq; } continue; found_ok_skb: /* Ok so how much can we use? */ used = skb->len - offset; if (len < used) used = len; /* Do we have urgent data here? */ if (tp->urg_data) { u32 urg_offset = tp->urg_seq - *seq; if (urg_offset < used) { if (!urg_offset) { if (!sock_flag(sk, SOCK_URGINLINE)) { ++*seq; urg_hole++; offset++; used--; if (!used) goto skip_copy; } } else used = urg_offset; } } if (!(flags & MSG_TRUNC)) { #ifdef CONFIG_NET_DMA if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); if (tp->ucopy.dma_chan) { tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( tp->ucopy.dma_chan, skb, offset, msg->msg_iov, used, tp->ucopy.pinned_list); if (tp->ucopy.dma_cookie < 0) { printk(KERN_ALERT "dma_cookie < 0\n"); /* Exception. Bailout! */ if (!copied) copied = -EFAULT; break; } dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); if ((offset + used) == skb->len) copied_early = 1; } else #endif { err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, used); if (err) { /* Exception. Bailout! */ if (!copied) copied = -EFAULT; break; } } } *seq += used; copied += used; len -= used; tcp_rcv_space_adjust(sk); skip_copy: if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { tp->urg_data = 0; tcp_fast_path_check(sk); } if (used + offset < skb->len) continue; if (tcp_hdr(skb)->fin) goto found_fin_ok; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); copied_early = 0; } continue; found_fin_ok: /* Process the FIN. */ ++*seq; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); copied_early = 0; } break; } while (len > 0); if (user_recv) { if (!skb_queue_empty(&tp->ucopy.prequeue)) { int chunk; tp->ucopy.len = copied > 0 ? len : 0; tcp_prequeue_process(sk); if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } } tp->ucopy.task = NULL; tp->ucopy.len = 0; } #ifdef CONFIG_NET_DMA tcp_service_net_dma(sk, true); /* Wait for queue to drain */ tp->ucopy.dma_chan = NULL; if (tp->ucopy.pinned_list) { dma_unpin_iovec_pages(tp->ucopy.pinned_list); tp->ucopy.pinned_list = NULL; } #endif /* According to UNIX98, msg_name/msg_namelen are ignored * on connected socket. I was just happy when found this 8) --ANK */ /* Clean up data we have read: This will do ACK frames. */ tcp_cleanup_rbuf(sk, copied); release_sock(sk); if (copied > 0) uid_stat_tcp_rcv(current_uid(), copied); return copied; out: release_sock(sk); return err; recv_urg: err = tcp_recv_urg(sk, msg, len, flags); if (err > 0) uid_stat_tcp_rcv(current_uid(), err); goto out; } EXPORT_SYMBOL(tcp_recvmsg); void tcp_set_state(struct sock *sk, int state) { int oldstate = sk->sk_state; switch (state) { case TCP_ESTABLISHED: if (oldstate != TCP_ESTABLISHED) TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); break; case TCP_CLOSE: if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); sk->sk_prot->unhash(sk); if (inet_csk(sk)->icsk_bind_hash && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) inet_put_port(sk); /* fall through */ default: if (oldstate == TCP_ESTABLISHED) TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); } /* Change state AFTER socket is unhashed to avoid closed * socket sitting in hash tables. */ sk->sk_state = state; #ifdef STATE_TRACE SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); #endif } EXPORT_SYMBOL_GPL(tcp_set_state); /* * State processing on a close. This implements the state shift for * sending our FIN frame. Note that we only send a FIN for some * states. A shutdown() may have already sent the FIN, or we may be * closed. */ static const unsigned char new_state[16] = { /* current state: new state: action: */ /* (Invalid) */ TCP_CLOSE, /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, /* TCP_SYN_SENT */ TCP_CLOSE, /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, /* TCP_TIME_WAIT */ TCP_CLOSE, /* TCP_CLOSE */ TCP_CLOSE, /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, /* TCP_LAST_ACK */ TCP_LAST_ACK, /* TCP_LISTEN */ TCP_CLOSE, /* TCP_CLOSING */ TCP_CLOSING, }; static int tcp_close_state(struct sock *sk) { int next = (int)new_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; tcp_set_state(sk, ns); return next & TCP_ACTION_FIN; } /* * Shutdown the sending side of a connection. Much like close except * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). */ void tcp_shutdown(struct sock *sk, int how) { /* We need to grab some memory, and put together a FIN, * and then put it into the queue to be sent. * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. */ if (!(how & SEND_SHUTDOWN)) return; /* If we've already sent a FIN, or it's a closed state, skip this. */ if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { /* Clear out any half completed packets. FIN if needed. */ if (tcp_close_state(sk)) tcp_send_fin(sk); } } EXPORT_SYMBOL(tcp_shutdown); void tcp_close(struct sock *sk, long timeout) { struct sk_buff *skb; int data_was_unread = 0; int state; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); /* Special case. */ inet_csk_listen_stop(sk); goto adjudge_to_death; } /* We need to flush the recv. buffs. We do this only on the * descriptor close, not protocol-sourced closes, because the * reader process may not have drained the data yet! */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - tcp_hdr(skb)->fin; data_was_unread += len; __kfree_skb(skb); } sk_mem_reclaim(sk); /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ if (sk->sk_state == TCP_CLOSE) goto adjudge_to_death; /* As outlined in RFC 2525, section 2.17, we send a RST here because * data was lost. To witness the awful effects of the old behavior of * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk * GET in an FTP client, suspend the process, wait for the client to * advertise a zero window, then kill -9 the FTP client, wheee... * Note: timeout is always zero in such a case. */ if (data_was_unread) { /* Unread data was tossed, zap the connection. */ NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, sk->sk_allocation); } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); } else if (tcp_close_state(sk)) { /* We FIN if the application ate all the data before * zapping the connection. */ /* RED-PEN. Formally speaking, we have broken TCP state * machine. State transitions: * * TCP_ESTABLISHED -> TCP_FIN_WAIT1 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) * TCP_CLOSE_WAIT -> TCP_LAST_ACK * * are legal only when FIN has been sent (i.e. in window), * rather than queued out of window. Purists blame. * * F.e. "RFC state" is ESTABLISHED, * if Linux state is FIN-WAIT-1, but FIN is still not sent. * * The visible declinations are that sometimes * we enter time-wait state, when it is not required really * (harmless), do not send active resets, when they are * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when * they look as CLOSING or LAST_ACK for Linux) * Probably, I missed some more holelets. * --ANK */ tcp_send_fin(sk); } sk_stream_wait_close(sk, timeout); adjudge_to_death: state = sk->sk_state; sock_hold(sk); sock_orphan(sk); /* It is the last release_sock in its life. It will remove backlog. */ release_sock(sk); /* Now socket is owned by kernel and we acquire BH lock to finish close. No need to check for user refs. */ local_bh_disable(); bh_lock_sock(sk); WARN_ON(sock_owned_by_user(sk)); percpu_counter_inc(sk->sk_prot->orphan_count); /* Have we already been destroyed by a softirq or backlog? */ if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) goto out; /* This is a (useful) BSD violating of the RFC. There is a * problem with TCP as specified in that the other end could * keep a socket open forever with no application left this end. * We use a 3 minute timeout (about the same as BSD) then kill * our end. If they send after that then tough - BUT: long enough * that we won't make the old 4*rto = almost no time - whoops * reset mistake. * * Nope, it was not mistake. It is really desired behaviour * f.e. on http servers, when such sockets are useless, but * consume significant resources. Let's do it with special * linger2 option. --ANK */ if (sk->sk_state == TCP_FIN_WAIT2) { struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONLINGER); } else { const int tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto out; } } } if (sk->sk_state != TCP_CLOSE) { sk_mem_reclaim(sk); if (tcp_too_many_orphans(sk, 0)) { if (net_ratelimit()) printk(KERN_INFO "TCP: too many of orphaned " "sockets\n"); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); } } if (sk->sk_state == TCP_CLOSE) inet_csk_destroy_sock(sk); /* Otherwise, socket is reprieved until protocol close. */ out: bh_unlock_sock(sk); local_bh_enable(); sock_put(sk); } EXPORT_SYMBOL(tcp_close); /* These states need RST on ABORT according to RFC793 */ static inline int tcp_need_reset(int state) { return (1 << state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); } int tcp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int err = 0; int old_state = sk->sk_state; if (old_state != TCP_CLOSE) tcp_set_state(sk, TCP_CLOSE); /* ABORT function of RFC793 */ if (old_state == TCP_LISTEN) { inet_csk_listen_stop(sk); } else if (tcp_need_reset(old_state) || (tp->snd_nxt != tp->write_seq && (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { /* The last check adjusts for discrepancy of Linux wrt. RFC * states */ tcp_send_active_reset(sk, gfp_any()); sk->sk_err = ECONNRESET; } else if (old_state == TCP_SYN_SENT) sk->sk_err = ECONNRESET; tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); tcp_write_queue_purge(sk); __skb_queue_purge(&tp->out_of_order_queue); #ifdef CONFIG_NET_DMA __skb_queue_purge(&sk->sk_async_wait_queue); #endif inet->inet_dport = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt = 0; if ((tp->write_seq += tp->max_window + 2) == 0) tp->write_seq = 1; icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; tp->packets_out = 0; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_cnt = 0; tp->bytes_acked = 0; tp->window_clamp = 0; tcp_set_ca_state(sk, TCP_CA_Open); tcp_clear_retrans(tp); inet_csk_delack_init(sk); tcp_init_send_head(sk); memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); sk->sk_error_report(sk); return err; } EXPORT_SYMBOL(tcp_disconnect); /* * Socket option code for TCP. */ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int val; int err = 0; /* These are data/string values, all the others are ints */ switch (optname) { case TCP_CONGESTION: { char name[TCP_CA_NAME_MAX]; if (optlen < 1) return -EINVAL; val = strncpy_from_user(name, optval, min_t(long, TCP_CA_NAME_MAX-1, optlen)); if (val < 0) return -EFAULT; name[val] = 0; lock_sock(sk); err = tcp_set_congestion_control(sk, name); release_sock(sk); return err; } case TCP_COOKIE_TRANSACTIONS: { struct tcp_cookie_transactions ctd; struct tcp_cookie_values *cvp = NULL; if (sizeof(ctd) > optlen) return -EINVAL; if (copy_from_user(&ctd, optval, sizeof(ctd))) return -EFAULT; if (ctd.tcpct_used > sizeof(ctd.tcpct_value) || ctd.tcpct_s_data_desired > TCP_MSS_DESIRED) return -EINVAL; if (ctd.tcpct_cookie_desired == 0) { /* default to global value */ } else if ((0x1 & ctd.tcpct_cookie_desired) || ctd.tcpct_cookie_desired > TCP_COOKIE_MAX || ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) { return -EINVAL; } if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) { /* Supercedes all other values */ lock_sock(sk); if (tp->cookie_values != NULL) { kref_put(&tp->cookie_values->kref, tcp_cookie_values_release); tp->cookie_values = NULL; } tp->rx_opt.cookie_in_always = 0; /* false */ tp->rx_opt.cookie_out_never = 1; /* true */ release_sock(sk); return err; } /* Allocate ancillary memory before locking. */ if (ctd.tcpct_used > 0 || (tp->cookie_values == NULL && (sysctl_tcp_cookie_size > 0 || ctd.tcpct_cookie_desired > 0 || ctd.tcpct_s_data_desired > 0))) { cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used, GFP_KERNEL); if (cvp == NULL) return -ENOMEM; kref_init(&cvp->kref); } lock_sock(sk); tp->rx_opt.cookie_in_always = (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags); tp->rx_opt.cookie_out_never = 0; /* false */ if (tp->cookie_values != NULL) { if (cvp != NULL) { /* Changed values are recorded by a changed * pointer, ensuring the cookie will differ, * without separately hashing each value later. */ kref_put(&tp->cookie_values->kref, tcp_cookie_values_release); } else { cvp = tp->cookie_values; } } if (cvp != NULL) { cvp->cookie_desired = ctd.tcpct_cookie_desired; if (ctd.tcpct_used > 0) { memcpy(cvp->s_data_payload, ctd.tcpct_value, ctd.tcpct_used); cvp->s_data_desired = ctd.tcpct_used; cvp->s_data_constant = 1; /* true */ } else { /* No constant payload data. */ cvp->s_data_desired = ctd.tcpct_s_data_desired; cvp->s_data_constant = 0; /* false */ } tp->cookie_values = cvp; } release_sock(sk); return err; } default: /* fallthru */ break; } if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; lock_sock(sk); switch (optname) { case TCP_MAXSEG: /* Values greater than interface MTU won't take effect. However * at the point when this call is done we typically don't yet * know which interface is going to be used */ if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { err = -EINVAL; break; } tp->rx_opt.user_mss = val; break; case TCP_NODELAY: if (val) { /* TCP_NODELAY is weaker than TCP_CORK, so that * this option on corked socket is remembered, but * it is not activated until cork is cleared. * * However, when TCP_NODELAY is set we make * an explicit push, which overrides even TCP_CORK * for currently queued segments. */ tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; tcp_push_pending_frames(sk); } else { tp->nonagle &= ~TCP_NAGLE_OFF; } break; case TCP_THIN_LINEAR_TIMEOUTS: if (val < 0 || val > 1) err = -EINVAL; else tp->thin_lto = val; break; case TCP_THIN_DUPACK: if (val < 0 || val > 1) err = -EINVAL; else tp->thin_dupack = val; break; case TCP_CORK: /* When set indicates to always queue non-full frames. * Later the user clears this option and we transmit * any pending partial frames in the queue. This is * meant to be used alongside sendfile() to get properly * filled frames when the user (for example) must write * out headers with a write() call first and then use * sendfile to send out the data parts. * * TCP_CORK can be set together with TCP_NODELAY and it is * stronger than TCP_NODELAY. */ if (val) { tp->nonagle |= TCP_NAGLE_CORK; } else { tp->nonagle &= ~TCP_NAGLE_CORK; if (tp->nonagle&TCP_NAGLE_OFF) tp->nonagle |= TCP_NAGLE_PUSH; tcp_push_pending_frames(sk); } break; case TCP_KEEPIDLE: if (val < 1 || val > MAX_TCP_KEEPIDLE) err = -EINVAL; else { tp->keepalive_time = val * HZ; if (sock_flag(sk, SOCK_KEEPOPEN) && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { u32 elapsed = keepalive_time_elapsed(tp); if (tp->keepalive_time > elapsed) elapsed = tp->keepalive_time - elapsed; else elapsed = 0; inet_csk_reset_keepalive_timer(sk, elapsed); } } break; case TCP_KEEPINTVL: if (val < 1 || val > MAX_TCP_KEEPINTVL) err = -EINVAL; else tp->keepalive_intvl = val * HZ; break; case TCP_KEEPCNT: if (val < 1 || val > MAX_TCP_KEEPCNT) err = -EINVAL; else tp->keepalive_probes = val; break; case TCP_SYNCNT: if (val < 1 || val > MAX_TCP_SYNCNT) err = -EINVAL; else icsk->icsk_syn_retries = val; break; case TCP_LINGER2: if (val < 0) tp->linger2 = -1; else if (val > sysctl_tcp_fin_timeout / HZ) tp->linger2 = 0; else tp->linger2 = val * HZ; break; case TCP_DEFER_ACCEPT: /* Translate value in seconds to number of retransmits */ icsk->icsk_accept_queue.rskq_defer_accept = secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); break; case TCP_WINDOW_CLAMP: if (!val) { if (sk->sk_state != TCP_CLOSE) { err = -EINVAL; break; } tp->window_clamp = 0; } else tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? SOCK_MIN_RCVBUF / 2 : val; break; case TCP_QUICKACK: if (!val) { icsk->icsk_ack.pingpong = 1; } else { icsk->icsk_ack.pingpong = 0; if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && inet_csk_ack_scheduled(sk)) { icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; tcp_cleanup_rbuf(sk, 1); if (!(val & 1)) icsk->icsk_ack.pingpong = 1; } } break; #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: /* Read the IP->Key mappings from userspace */ err = tp->af_specific->md5_parse(sk, optval, optlen); break; #endif case TCP_USER_TIMEOUT: /* Cap the max timeout in ms TCP will retry/retrans * before giving up and aborting (ETIMEDOUT) a connection. */ if (val < 0) err = -EINVAL; else icsk->icsk_user_timeout = msecs_to_jiffies(val); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct inet_connection_sock *icsk = inet_csk(sk); if (level != SOL_TCP) return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(tcp_setsockopt); #ifdef CONFIG_COMPAT int compat_tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_TCP) return inet_csk_compat_setsockopt(sk, level, optname, optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(compat_tcp_setsockopt); #endif /* Return information about state of tcp endpoint in API format. */ void tcp_get_info(struct sock *sk, struct tcp_info *info) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 now = tcp_time_stamp; memset(info, 0, sizeof(*info)); info->tcpi_state = sk->sk_state; info->tcpi_ca_state = icsk->icsk_ca_state; info->tcpi_retransmits = icsk->icsk_retransmits; info->tcpi_probes = icsk->icsk_probes_out; info->tcpi_backoff = icsk->icsk_backoff; if (tp->rx_opt.tstamp_ok) info->tcpi_options |= TCPI_OPT_TIMESTAMPS; if (tcp_is_sack(tp)) info->tcpi_options |= TCPI_OPT_SACK; if (tp->rx_opt.wscale_ok) { info->tcpi_options |= TCPI_OPT_WSCALE; info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; } if (tp->ecn_flags&TCP_ECN_OK) info->tcpi_options |= TCPI_OPT_ECN; info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); info->tcpi_snd_mss = tp->mss_cache; info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; if (sk->sk_state == TCP_LISTEN) { info->tcpi_unacked = sk->sk_ack_backlog; info->tcpi_sacked = sk->sk_max_ack_backlog; } else { info->tcpi_unacked = tp->packets_out; info->tcpi_sacked = tp->sacked_out; } info->tcpi_lost = tp->lost_out; info->tcpi_retrans = tp->retrans_out; info->tcpi_fackets = tp->fackets_out; info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); info->tcpi_pmtu = icsk->icsk_pmtu_cookie; info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; info->tcpi_snd_ssthresh = tp->snd_ssthresh; info->tcpi_snd_cwnd = tp->snd_cwnd; info->tcpi_advmss = tp->advmss; info->tcpi_reordering = tp->reordering; info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; info->tcpi_rcv_space = tp->rcvq_space.space; info->tcpi_total_retrans = tp->total_retrans; } EXPORT_SYMBOL_GPL(tcp_get_info); static int do_tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int val, len; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case TCP_MAXSEG: val = tp->mss_cache; if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) val = tp->rx_opt.user_mss; break; case TCP_NODELAY: val = !!(tp->nonagle&TCP_NAGLE_OFF); break; case TCP_CORK: val = !!(tp->nonagle&TCP_NAGLE_CORK); break; case TCP_KEEPIDLE: val = keepalive_time_when(tp) / HZ; break; case TCP_KEEPINTVL: val = keepalive_intvl_when(tp) / HZ; break; case TCP_KEEPCNT: val = keepalive_probes(tp); break; case TCP_SYNCNT: val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; break; case TCP_LINGER2: val = tp->linger2; if (val >= 0) val = (val ? : sysctl_tcp_fin_timeout) / HZ; break; case TCP_DEFER_ACCEPT: val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); break; case TCP_WINDOW_CLAMP: val = tp->window_clamp; break; case TCP_INFO: { struct tcp_info info; if (get_user(len, optlen)) return -EFAULT; tcp_get_info(sk, &info); len = min_t(unsigned int, len, sizeof(info)); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } case TCP_QUICKACK: val = !icsk->icsk_ack.pingpong; break; case TCP_CONGESTION: if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, TCP_CA_NAME_MAX); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) return -EFAULT; return 0; case TCP_COOKIE_TRANSACTIONS: { struct tcp_cookie_transactions ctd; struct tcp_cookie_values *cvp = tp->cookie_values; if (get_user(len, optlen)) return -EFAULT; if (len < sizeof(ctd)) return -EINVAL; memset(&ctd, 0, sizeof(ctd)); ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ? TCP_COOKIE_IN_ALWAYS : 0) | (tp->rx_opt.cookie_out_never ? TCP_COOKIE_OUT_NEVER : 0); if (cvp != NULL) { ctd.tcpct_flags |= (cvp->s_data_in ? TCP_S_DATA_IN : 0) | (cvp->s_data_out ? TCP_S_DATA_OUT : 0); ctd.tcpct_cookie_desired = cvp->cookie_desired; ctd.tcpct_s_data_desired = cvp->s_data_desired; memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], cvp->cookie_pair_size); ctd.tcpct_used = cvp->cookie_pair_size; } if (put_user(sizeof(ctd), optlen)) return -EFAULT; if (copy_to_user(optval, &ctd, sizeof(ctd))) return -EFAULT; return 0; } case TCP_THIN_LINEAR_TIMEOUTS: val = tp->thin_lto; break; case TCP_THIN_DUPACK: val = tp->thin_dupack; break; case TCP_USER_TIMEOUT: val = jiffies_to_msecs(icsk->icsk_user_timeout); break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct inet_connection_sock *icsk = inet_csk(sk); if (level != SOL_TCP) return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); return do_tcp_getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(tcp_getsockopt); #ifdef CONFIG_COMPAT int compat_tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_TCP) return inet_csk_compat_getsockopt(sk, level, optname, optval, optlen); return do_tcp_getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(compat_tcp_getsockopt); #endif struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct tcphdr *th; unsigned thlen; unsigned int seq; __be32 delta; unsigned int oldlen; unsigned int mss; if (!pskb_may_pull(skb, sizeof(*th))) goto out; th = tcp_hdr(skb); thlen = th->doff * 4; if (thlen < sizeof(*th)) goto out; if (!pskb_may_pull(skb, thlen)) goto out; oldlen = (u16)~skb->len; __skb_pull(skb, thlen); mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_TCPV4 | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_TCPV6 | 0) || !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } segs = skb_segment(skb, features); if (IS_ERR(segs)) goto out; delta = htonl(oldlen + (thlen + mss)); skb = segs; th = tcp_hdr(skb); seq = ntohl(th->seq); do { th->fin = th->psh = 0; th->check = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) th->check = csum_fold(csum_partial(skb_transport_header(skb), thlen, skb->csum)); seq += mss; skb = skb->next; th = tcp_hdr(skb); th->seq = htonl(seq); th->cwr = 0; } while (skb->next); delta = htonl(oldlen + (skb->tail - skb->transport_header) + skb->data_len); th->check = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) th->check = csum_fold(csum_partial(skb_transport_header(skb), thlen, skb->csum)); out: return segs; } EXPORT_SYMBOL(tcp_tso_segment); struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct sk_buff **pp = NULL; struct sk_buff *p; struct tcphdr *th; struct tcphdr *th2; unsigned int len; unsigned int thlen; __be32 flags; unsigned int mss = 1; unsigned int hlen; unsigned int off; int flush = 1; int i; off = skb_gro_offset(skb); hlen = off + sizeof(*th); th = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { th = skb_gro_header_slow(skb, hlen, off); if (unlikely(!th)) goto out; } thlen = th->doff * 4; if (thlen < sizeof(*th)) goto out; hlen = off + thlen; if (skb_gro_header_hard(skb, hlen)) { th = skb_gro_header_slow(skb, hlen, off); if (unlikely(!th)) goto out; } skb_gro_pull(skb, thlen); len = skb_gro_len(skb); flags = tcp_flag_word(th); for (; (p = *head); head = &p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; th2 = tcp_hdr(p); if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { NAPI_GRO_CB(p)->same_flow = 0; continue; } goto found; } goto out_check_final; found: flush = NAPI_GRO_CB(p)->flush; flush |= (__force int)(flags & TCP_FLAG_CWR); flush |= (__force int)((flags ^ tcp_flag_word(th2)) & ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); flush |= (__force int)(th->ack_seq ^ th2->ack_seq); for (i = sizeof(*th); i < thlen; i += 4) flush |= *(u32 *)((u8 *)th + i) ^ *(u32 *)((u8 *)th2 + i); mss = skb_shinfo(p)->gso_size; flush |= (len - 1) >= mss; flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); if (flush || skb_gro_receive(head, skb)) { mss = 1; goto out_check_final; } p = *head; th2 = tcp_hdr(p); tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); out_check_final: flush = len < mss; flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)); if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) pp = head; out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } EXPORT_SYMBOL(tcp_gro_receive); int tcp_gro_complete(struct sk_buff *skb) { struct tcphdr *th = tcp_hdr(skb); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; if (th->cwr) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; return 0; } EXPORT_SYMBOL(tcp_gro_complete); #ifdef CONFIG_TCP_MD5SIG static unsigned long tcp_md5sig_users; static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) { int cpu; for_each_possible_cpu(cpu) { struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); if (p) { if (p->md5_desc.tfm) crypto_free_hash(p->md5_desc.tfm); kfree(p); } } free_percpu(pool); } void tcp_free_md5sig_pool(void) { struct tcp_md5sig_pool * __percpu *pool = NULL; spin_lock_bh(&tcp_md5sig_pool_lock); if (--tcp_md5sig_users == 0) { pool = tcp_md5sig_pool; tcp_md5sig_pool = NULL; } spin_unlock_bh(&tcp_md5sig_pool_lock); if (pool) __tcp_free_md5sig_pool(pool); } EXPORT_SYMBOL(tcp_free_md5sig_pool); static struct tcp_md5sig_pool * __percpu * __tcp_alloc_md5sig_pool(struct sock *sk) { int cpu; struct tcp_md5sig_pool * __percpu *pool; pool = alloc_percpu(struct tcp_md5sig_pool *); if (!pool) return NULL; for_each_possible_cpu(cpu) { struct tcp_md5sig_pool *p; struct crypto_hash *hash; p = kzalloc(sizeof(*p), sk->sk_allocation); if (!p) goto out_free; *per_cpu_ptr(pool, cpu) = p; hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); if (!hash || IS_ERR(hash)) goto out_free; p->md5_desc.tfm = hash; } return pool; out_free: __tcp_free_md5sig_pool(pool); return NULL; } struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) { struct tcp_md5sig_pool * __percpu *pool; int alloc = 0; retry: spin_lock_bh(&tcp_md5sig_pool_lock); pool = tcp_md5sig_pool; if (tcp_md5sig_users++ == 0) { alloc = 1; spin_unlock_bh(&tcp_md5sig_pool_lock); } else if (!pool) { tcp_md5sig_users--; spin_unlock_bh(&tcp_md5sig_pool_lock); cpu_relax(); goto retry; } else spin_unlock_bh(&tcp_md5sig_pool_lock); if (alloc) { /* we cannot hold spinlock here because this may sleep. */ struct tcp_md5sig_pool * __percpu *p; p = __tcp_alloc_md5sig_pool(sk); spin_lock_bh(&tcp_md5sig_pool_lock); if (!p) { tcp_md5sig_users--; spin_unlock_bh(&tcp_md5sig_pool_lock); return NULL; } pool = tcp_md5sig_pool; if (pool) { /* oops, it has already been assigned. */ spin_unlock_bh(&tcp_md5sig_pool_lock); __tcp_free_md5sig_pool(p); } else { tcp_md5sig_pool = pool = p; spin_unlock_bh(&tcp_md5sig_pool_lock); } } return pool; } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); /** * tcp_get_md5sig_pool - get md5sig_pool for this user * * We use percpu structure, so if we succeed, we exit with preemption * and BH disabled, to make sure another thread or softirq handling * wont try to get same context. */ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { struct tcp_md5sig_pool * __percpu *p; local_bh_disable(); spin_lock(&tcp_md5sig_pool_lock); p = tcp_md5sig_pool; if (p) tcp_md5sig_users++; spin_unlock(&tcp_md5sig_pool_lock); if (p) return *this_cpu_ptr(p); local_bh_enable(); return NULL; } EXPORT_SYMBOL(tcp_get_md5sig_pool); void tcp_put_md5sig_pool(void) { local_bh_enable(); tcp_free_md5sig_pool(); } EXPORT_SYMBOL(tcp_put_md5sig_pool); int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, struct tcphdr *th) { struct scatterlist sg; int err; __sum16 old_checksum = th->check; th->check = 0; /* options aren't included in the hash */ sg_init_one(&sg, th, sizeof(struct tcphdr)); err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); th->check = old_checksum; return err; } EXPORT_SYMBOL(tcp_md5_hash_header); int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, struct sk_buff *skb, unsigned header_len) { struct scatterlist sg; const struct tcphdr *tp = tcp_hdr(skb); struct hash_desc *desc = &hp->md5_desc; unsigned i; const unsigned head_data_len = skb_headlen(skb) > header_len ? skb_headlen(skb) - header_len : 0; const struct skb_shared_info *shi = skb_shinfo(skb); struct sk_buff *frag_iter; sg_init_table(&sg, 1); sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); if (crypto_hash_update(desc, &sg, head_data_len)) return 1; for (i = 0; i < shi->nr_frags; ++i) { const struct skb_frag_struct *f = &shi->frags[i]; unsigned int offset = f->page_offset; struct page *page = f->page + (offset >> PAGE_SHIFT); sg_set_page(&sg, page, f->size, offset_in_page(offset)); if (crypto_hash_update(desc, &sg, f->size)) return 1; } skb_walk_frags(skb, frag_iter) if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) return 1; return 0; } EXPORT_SYMBOL(tcp_md5_hash_skb_data); int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) { struct scatterlist sg; sg_init_one(&sg, key->key, key->keylen); return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); } EXPORT_SYMBOL(tcp_md5_hash_key); #endif /** * Each Responder maintains up to two secret values concurrently for * efficient secret rollover. Each secret value has 4 states: * * Generating. (tcp_secret_generating != tcp_secret_primary) * Generates new Responder-Cookies, but not yet used for primary * verification. This is a short-term state, typically lasting only * one round trip time (RTT). * * Primary. (tcp_secret_generating == tcp_secret_primary) * Used both for generation and primary verification. * * Retiring. (tcp_secret_retiring != tcp_secret_secondary) * Used for verification, until the first failure that can be * verified by the newer Generating secret. At that time, this * cookie's state is changed to Secondary, and the Generating * cookie's state is changed to Primary. This is a short-term state, * typically lasting only one round trip time (RTT). * * Secondary. (tcp_secret_retiring == tcp_secret_secondary) * Used for secondary verification, after primary verification * failures. This state lasts no more than twice the Maximum Segment * Lifetime (2MSL). Then, the secret is discarded. */ struct tcp_cookie_secret { /* The secret is divided into two parts. The digest part is the * equivalent of previously hashing a secret and saving the state, * and serves as an initialization vector (IV). The message part * serves as the trailing secret. */ u32 secrets[COOKIE_WORKSPACE_WORDS]; unsigned long expires; }; #define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL) #define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2) #define TCP_SECRET_LIFE (HZ * 600) static struct tcp_cookie_secret tcp_secret_one; static struct tcp_cookie_secret tcp_secret_two; /* Essentially a circular list, without dynamic allocation. */ static struct tcp_cookie_secret *tcp_secret_generating; static struct tcp_cookie_secret *tcp_secret_primary; static struct tcp_cookie_secret *tcp_secret_retiring; static struct tcp_cookie_secret *tcp_secret_secondary; static DEFINE_SPINLOCK(tcp_secret_locker); /* Select a pseudo-random word in the cookie workspace. */ static inline u32 tcp_cookie_work(const u32 *ws, const int n) { return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])]; } /* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed. * Called in softirq context. * Returns: 0 for success. */ int tcp_cookie_generator(u32 *bakery) { unsigned long jiffy = jiffies; if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) { spin_lock_bh(&tcp_secret_locker); if (!time_after_eq(jiffy, tcp_secret_generating->expires)) { /* refreshed by another */ memcpy(bakery, &tcp_secret_generating->secrets[0], COOKIE_WORKSPACE_WORDS); } else { /* still needs refreshing */ get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS); /* The first time, paranoia assumes that the * randomization function isn't as strong. But, * this secret initialization is delayed until * the last possible moment (packet arrival). * Although that time is observable, it is * unpredictably variable. Mash in the most * volatile clock bits available, and expire the * secret extra quickly. */ if (unlikely(tcp_secret_primary->expires == tcp_secret_secondary->expires)) { struct timespec tv; getnstimeofday(&tv); bakery[COOKIE_DIGEST_WORDS+0] ^= (u32)tv.tv_nsec; tcp_secret_secondary->expires = jiffy + TCP_SECRET_1MSL + (0x0f & tcp_cookie_work(bakery, 0)); } else { tcp_secret_secondary->expires = jiffy + TCP_SECRET_LIFE + (0xff & tcp_cookie_work(bakery, 1)); tcp_secret_primary->expires = jiffy + TCP_SECRET_2MSL + (0x1f & tcp_cookie_work(bakery, 2)); } memcpy(&tcp_secret_secondary->secrets[0], bakery, COOKIE_WORKSPACE_WORDS); rcu_assign_pointer(tcp_secret_generating, tcp_secret_secondary); rcu_assign_pointer(tcp_secret_retiring, tcp_secret_primary); /* * Neither call_rcu() nor synchronize_rcu() needed. * Retiring data is not freed. It is replaced after * further (locked) pointer updates, and a quiet time * (minimum 1MSL, maximum LIFE - 2MSL). */ } spin_unlock_bh(&tcp_secret_locker); } else { rcu_read_lock_bh(); memcpy(bakery, &rcu_dereference(tcp_secret_generating)->secrets[0], COOKIE_WORKSPACE_WORDS); rcu_read_unlock_bh(); } return 0; } EXPORT_SYMBOL(tcp_cookie_generator); void tcp_done(struct sock *sk) { if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); else inet_csk_destroy_sock(sk); } EXPORT_SYMBOL_GPL(tcp_done); extern struct tcp_congestion_ops tcp_reno; static __initdata unsigned long thash_entries; static int __init set_thash_entries(char *str) { if (!str) return 0; thash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("thash_entries=", set_thash_entries); void __init tcp_init(void) { struct sk_buff *skb = NULL; unsigned long limit; int i, max_rshare, max_wshare, cnt; unsigned long jiffy = jiffies; BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); percpu_counter_init(&tcp_sockets_allocated, 0); percpu_counter_init(&tcp_orphan_count, 0); tcp_hashinfo.bind_bucket_cachep = kmem_cache_create("tcp_bind_bucket", sizeof(struct inet_bind_bucket), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* Size and allocate the main established and bind bucket * hash tables. * * The methodology is similar to that of the buffer cache. */ tcp_hashinfo.ehash = alloc_large_system_hash("TCP established", sizeof(struct inet_ehash_bucket), thash_entries, (totalram_pages >= 128 * 1024) ? 13 : 15, 0, NULL, &tcp_hashinfo.ehash_mask, thash_entries ? 0 : 512 * 1024); for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); } if (inet_ehash_locks_alloc(&tcp_hashinfo)) panic("TCP: failed to alloc ehash_locks"); tcp_hashinfo.bhash = alloc_large_system_hash("TCP bind", sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_mask + 1, (totalram_pages >= 128 * 1024) ? 13 : 15, 0, &tcp_hashinfo.bhash_size, NULL, 64 * 1024); tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); } cnt = tcp_hashinfo.ehash_mask + 1; tcp_death_row.sysctl_max_tw_buckets = cnt / 2; sysctl_tcp_max_orphans = cnt / 2; sysctl_max_syn_backlog = max(128, cnt / 256); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_tcp_mem[0] = limit / 4 * 3; sysctl_tcp_mem[1] = limit; sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* Set per-socket limits to no more than 1/128 the pressure threshold */ limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); max_wshare = min(4UL*1024*1024, limit); max_rshare = min(6UL*1024*1024, limit); sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; sysctl_tcp_wmem[1] = 16*1024; sysctl_tcp_wmem[2] = max(64*1024, max_wshare); sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; sysctl_tcp_rmem[1] = 87380; sysctl_tcp_rmem[2] = max(87380, max_rshare); printk(KERN_INFO "TCP: Hash tables configured " "(established %u bind %u)\n", tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); tcp_register_congestion_control(&tcp_reno); memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets)); memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets)); tcp_secret_one.expires = jiffy; /* past due */ tcp_secret_two.expires = jiffy; /* past due */ tcp_secret_generating = &tcp_secret_one; tcp_secret_primary = &tcp_secret_one; tcp_secret_retiring = &tcp_secret_two; tcp_secret_secondary = &tcp_secret_two; } static int tcp_is_local(struct net *net, __be32 addr) { struct rtable *rt; struct flowi4 fl4 = { .daddr = addr }; rt = ip_route_output_key(net, &fl4); if (IS_ERR_OR_NULL(rt)) return 0; return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int tcp_is_local6(struct net *net, struct in6_addr *addr) { struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0); return rt6 && rt6->rt6i_dev && (rt6->rt6i_dev->flags & IFF_LOOPBACK); } #endif /* * tcp_nuke_addr - destroy all sockets on the given local address * if local address is the unspecified address (0.0.0.0 or ::), destroy all * sockets with local addresses that are not configured. */ int tcp_nuke_addr(struct net *net, struct sockaddr *addr) { int family = addr->sa_family; unsigned int bucket; struct in_addr *in; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct in6_addr *in6; #endif if (family == AF_INET) { in = &((struct sockaddr_in *)addr)->sin_addr; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) } else if (family == AF_INET6) { in6 = &((struct sockaddr_in6 *)addr)->sin6_addr; #endif } else { return -EAFNOSUPPORT; } for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) { struct hlist_nulls_node *node; struct sock *sk; spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket); restart: spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) { struct inet_sock *inet = inet_sk(sk); if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT) continue; if (sock_flag(sk, SOCK_DEAD)) continue; if (family == AF_INET) { __be32 s4 = inet->inet_rcv_saddr; if (s4 == LOOPBACK4_IPV6) continue; if (in->s_addr != s4 && !(in->s_addr == INADDR_ANY && !tcp_is_local(net, s4))) continue; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (family == AF_INET6) { struct in6_addr *s6; if (!inet->pinet6) continue; s6 = &inet->pinet6->rcv_saddr; if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED) continue; if (!ipv6_addr_equal(in6, s6) && !(ipv6_addr_equal(in6, &in6addr_any) && !tcp_is_local6(net, s6))) continue; } #endif sock_hold(sk); spin_unlock_bh(lock); local_bh_disable(); bh_lock_sock(sk); sk->sk_err = ETIMEDOUT; sk->sk_error_report(sk); tcp_done(sk); bh_unlock_sock(sk); local_bh_enable(); sock_put(sk); goto restart; } spin_unlock_bh(lock); } return 0; }
gpl-2.0
rajamalw/motorola_kernel_condor
drivers/slimbus/slimbus.c
429
86732
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/idr.h> #include <linux/pm_runtime.h> #include <linux/slimbus/slimbus.h> #define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p)) #define SLIM_HDL_TO_LA(hdl) ((u32)((hdl) & 0xFF000000) >> 24) #define SLIM_HDL_TO_FLOW(hdl) (((u32)(hdl) & 0xFF0000) >> 16) #define SLIM_HDL_TO_PORT(hdl) ((u32)(hdl) & 0xFF) #define SLIM_HDL_TO_CHIDX(hdl) ((u16)(hdl) & 0xFF) #define SLIM_GRP_TO_NCHAN(hdl) ((u16)(hdl >> 8) & 0xFF) #define SLIM_SLAVE_PORT(p, la) (((la)<<16) | (p)) #define SLIM_MGR_PORT(p) ((0xFF << 16) | (p)) #define SLIM_LA_MANAGER 0xFF #define SLIM_START_GRP (1 << 8) #define SLIM_END_GRP (1 << 9) #define SLIM_MAX_INTR_COEFF_3 (SLIM_SL_PER_SUPERFRAME/3) #define SLIM_MAX_INTR_COEFF_1 SLIM_SL_PER_SUPERFRAME static DEFINE_MUTEX(slim_lock); static DEFINE_IDR(ctrl_idr); static struct device_type slim_dev_type; static struct device_type slim_ctrl_type; static const struct slim_device_id *slim_match(const struct slim_device_id *id, const struct slim_device *slim_dev) { while (id->name[0]) { if (strncmp(slim_dev->name, id->name, SLIMBUS_NAME_SIZE) == 0) return id; id++; } return NULL; } static int slim_device_match(struct device *dev, struct device_driver *driver) { struct slim_device *slim_dev; struct slim_driver *drv = to_slim_driver(driver); if (dev->type == &slim_dev_type) slim_dev = to_slim_device(dev); else return 0; if (drv->id_table) return slim_match(drv->id_table, slim_dev) != NULL; if (driver->name) return strncmp(slim_dev->name, driver->name, SLIMBUS_NAME_SIZE) == 0; return 0; } #ifdef CONFIG_PM_SLEEP static int slim_legacy_suspend(struct device *dev, pm_message_t mesg) { struct slim_device *slim_dev = NULL; struct slim_driver *driver; if (dev->type == &slim_dev_type) slim_dev = to_slim_device(dev); if (!slim_dev || !dev->driver) return 0; driver = to_slim_driver(dev->driver); if (!driver->suspend) return 0; return driver->suspend(slim_dev, mesg); } static int slim_legacy_resume(struct device *dev) { struct slim_device *slim_dev = NULL; struct slim_driver *driver; if (dev->type == &slim_dev_type) slim_dev = to_slim_device(dev); if (!slim_dev || !dev->driver) return 0; driver = to_slim_driver(dev->driver); if (!driver->resume) return 0; return driver->resume(slim_dev); } static int slim_pm_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_suspend(dev); else return slim_legacy_suspend(dev, PMSG_SUSPEND); } static int slim_pm_resume(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_resume(dev); else return slim_legacy_resume(dev); } #else #define slim_pm_suspend NULL #define slim_pm_resume NULL #endif static const struct dev_pm_ops slimbus_pm = { .suspend = slim_pm_suspend, .resume = slim_pm_resume, SET_RUNTIME_PM_OPS( pm_generic_suspend, pm_generic_resume, pm_generic_runtime_idle ) }; struct bus_type slimbus_type = { .name = "slimbus", .match = slim_device_match, .pm = &slimbus_pm, }; EXPORT_SYMBOL_GPL(slimbus_type); struct device slimbus_dev = { .init_name = "slimbus", }; static void __exit slimbus_exit(void) { device_unregister(&slimbus_dev); bus_unregister(&slimbus_type); } static int __init slimbus_init(void) { int retval; retval = bus_register(&slimbus_type); if (!retval) retval = device_register(&slimbus_dev); if (retval) bus_unregister(&slimbus_type); return retval; } postcore_initcall(slimbus_init); module_exit(slimbus_exit); static int slim_drv_probe(struct device *dev) { const struct slim_driver *sdrv = to_slim_driver(dev->driver); struct slim_device *sbdev = to_slim_device(dev); struct slim_controller *ctrl = sbdev->ctrl; if (sdrv->probe) { int ret; ret = sdrv->probe(sbdev); if (ret) return ret; if (sdrv->device_up) queue_work(ctrl->wq, &sbdev->wd); return 0; } return -ENODEV; } static int slim_drv_remove(struct device *dev) { const struct slim_driver *sdrv = to_slim_driver(dev->driver); struct slim_device *sbdev = to_slim_device(dev); sbdev->notified = false; if (sdrv->remove) return sdrv->remove(to_slim_device(dev)); return -ENODEV; } static void slim_drv_shutdown(struct device *dev) { const struct slim_driver *sdrv = to_slim_driver(dev->driver); if (sdrv->shutdown) sdrv->shutdown(to_slim_device(dev)); } /* * slim_driver_register: Client driver registration with slimbus * @drv:Client driver to be associated with client-device. * This API will register the client driver with the slimbus * It is called from the driver's module-init function. */ int slim_driver_register(struct slim_driver *drv) { drv->driver.bus = &slimbus_type; if (drv->probe) drv->driver.probe = slim_drv_probe; if (drv->remove) drv->driver.remove = slim_drv_remove; if (drv->shutdown) drv->driver.shutdown = slim_drv_shutdown; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(slim_driver_register); /* * slim_driver_unregister: Undo effects of slim_driver_register * @drv: Client driver to be unregistered */ void slim_driver_unregister(struct slim_driver *drv) { if (drv) driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(slim_driver_unregister); #define slim_ctrl_attr_gr NULL static void slim_ctrl_release(struct device *dev) { struct slim_controller *ctrl = to_slim_controller(dev); complete(&ctrl->dev_released); } static struct device_type slim_ctrl_type = { .groups = slim_ctrl_attr_gr, .release = slim_ctrl_release, }; static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl) { if (!ctrl || !get_device(&ctrl->dev)) return NULL; return ctrl; } static void slim_ctrl_put(struct slim_controller *ctrl) { if (ctrl) put_device(&ctrl->dev); } #define slim_device_attr_gr NULL #define slim_device_uevent NULL static void slim_dev_release(struct device *dev) { struct slim_device *sbdev = to_slim_device(dev); slim_ctrl_put(sbdev->ctrl); } static struct device_type slim_dev_type = { .groups = slim_device_attr_gr, .uevent = slim_device_uevent, .release = slim_dev_release, }; static void slim_report(struct work_struct *work) { struct slim_driver *sbdrv; struct slim_device *sbdev = container_of(work, struct slim_device, wd); if (!sbdev->dev.driver) return; /* check if device-up or down needs to be called */ if ((!sbdev->reported && !sbdev->notified) || (sbdev->reported && sbdev->notified)) return; sbdrv = to_slim_driver(sbdev->dev.driver); /* * address no longer valid, means device reported absent, whereas * address valid, means device reported present */ if (sbdev->notified && !sbdev->reported) { sbdev->notified = false; if (sbdrv->device_down) sbdrv->device_down(sbdev); } else if (!sbdev->notified && sbdev->reported) { sbdev->notified = true; if (sbdrv->device_up) sbdrv->device_up(sbdev); } } /* * slim_add_device: Add a new device without register board info. * @ctrl: Controller to which this device is to be added to. * Called when device doesn't have an explicit client-driver to be probed, or * the client-driver is a module installed dynamically. */ int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev) { sbdev->dev.bus = &slimbus_type; sbdev->dev.parent = ctrl->dev.parent; sbdev->dev.type = &slim_dev_type; sbdev->dev.driver = NULL; sbdev->ctrl = ctrl; slim_ctrl_get(ctrl); dev_set_name(&sbdev->dev, "%s", sbdev->name); mutex_init(&sbdev->sldev_reconf); INIT_LIST_HEAD(&sbdev->mark_define); INIT_LIST_HEAD(&sbdev->mark_suspend); INIT_LIST_HEAD(&sbdev->mark_removal); INIT_WORK(&sbdev->wd, slim_report); mutex_lock(&ctrl->m_ctrl); list_add_tail(&sbdev->dev_list, &ctrl->devs); mutex_unlock(&ctrl->m_ctrl); /* probe slave on this controller */ return device_register(&sbdev->dev); } EXPORT_SYMBOL_GPL(slim_add_device); struct sbi_boardinfo { struct list_head list; struct slim_boardinfo board_info; }; static LIST_HEAD(board_list); static LIST_HEAD(slim_ctrl_list); static DEFINE_MUTEX(board_lock); /* If controller is not present, only add to boards list */ static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl, struct slim_boardinfo *bi) { int ret; if (ctrl->nr != bi->bus_num) return; ret = slim_add_device(ctrl, bi->slim_slave); if (ret != 0) dev_err(ctrl->dev.parent, "can't create new device for %s\n", bi->slim_slave->name); } /* * slim_register_board_info: Board-initialization routine. * @info: List of all devices on all controllers present on the board. * @n: number of entries. * API enumerates respective devices on corresponding controller. * Called from board-init function. */ int slim_register_board_info(struct slim_boardinfo const *info, unsigned n) { struct sbi_boardinfo *bi; int i; bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); if (!bi) return -ENOMEM; for (i = 0; i < n; i++, bi++, info++) { struct slim_controller *ctrl; memcpy(&bi->board_info, info, sizeof(*info)); mutex_lock(&board_lock); list_add_tail(&bi->list, &board_list); list_for_each_entry(ctrl, &slim_ctrl_list, list) slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info); mutex_unlock(&board_lock); } return 0; } EXPORT_SYMBOL_GPL(slim_register_board_info); /* * slim_ctrl_add_boarddevs: Add devices registered by board-info * @ctrl: Controller to which these devices are to be added to. * This API is called by controller when it is up and running. * If devices on a controller were registered before controller, * this will make sure that they get probed when controller is up. */ void slim_ctrl_add_boarddevs(struct slim_controller *ctrl) { struct sbi_boardinfo *bi; mutex_lock(&board_lock); list_add_tail(&ctrl->list, &slim_ctrl_list); list_for_each_entry(bi, &board_list, list) slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info); mutex_unlock(&board_lock); } EXPORT_SYMBOL_GPL(slim_ctrl_add_boarddevs); /* * slim_busnum_to_ctrl: Map bus number to controller * @busnum: Bus number * Returns controller representing this bus number */ struct slim_controller *slim_busnum_to_ctrl(u32 bus_num) { struct slim_controller *ctrl; mutex_lock(&board_lock); list_for_each_entry(ctrl, &slim_ctrl_list, list) if (bus_num == ctrl->nr) { mutex_unlock(&board_lock); return ctrl; } mutex_unlock(&board_lock); return NULL; } EXPORT_SYMBOL_GPL(slim_busnum_to_ctrl); static int slim_register_controller(struct slim_controller *ctrl) { int ret = 0; /* Can't register until after driver model init */ if (WARN_ON(!slimbus_type.p)) { ret = -EAGAIN; goto out_list; } dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr); ctrl->dev.bus = &slimbus_type; ctrl->dev.type = &slim_ctrl_type; ctrl->num_dev = 0; if (!ctrl->min_cg) ctrl->min_cg = SLIM_MIN_CLK_GEAR; if (!ctrl->max_cg) ctrl->max_cg = SLIM_MAX_CLK_GEAR; mutex_init(&ctrl->m_ctrl); mutex_init(&ctrl->sched.m_reconf); ret = device_register(&ctrl->dev); if (ret) goto out_list; dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%x\n", ctrl->name, (u32)&ctrl->dev); if (ctrl->nports) { ctrl->ports = kzalloc(ctrl->nports * sizeof(struct slim_port), GFP_KERNEL); if (!ctrl->ports) { ret = -ENOMEM; goto err_port_failed; } } if (ctrl->nchans) { ctrl->chans = kzalloc(ctrl->nchans * sizeof(struct slim_ich), GFP_KERNEL); if (!ctrl->chans) { ret = -ENOMEM; goto err_chan_failed; } ctrl->sched.chc1 = kzalloc(ctrl->nchans * sizeof(struct slim_ich *), GFP_KERNEL); if (!ctrl->sched.chc1) { kfree(ctrl->chans); ret = -ENOMEM; goto err_chan_failed; } ctrl->sched.chc3 = kzalloc(ctrl->nchans * sizeof(struct slim_ich *), GFP_KERNEL); if (!ctrl->sched.chc3) { kfree(ctrl->sched.chc1); kfree(ctrl->chans); ret = -ENOMEM; goto err_chan_failed; } } #ifdef DEBUG ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL); #endif init_completion(&ctrl->pause_comp); INIT_LIST_HEAD(&ctrl->devs); ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev)); if (!ctrl->wq) goto err_workq_failed; return 0; err_workq_failed: kfree(ctrl->sched.chc3); kfree(ctrl->sched.chc1); kfree(ctrl->chans); err_chan_failed: kfree(ctrl->ports); err_port_failed: device_unregister(&ctrl->dev); out_list: mutex_lock(&slim_lock); idr_remove(&ctrl_idr, ctrl->nr); mutex_unlock(&slim_lock); return ret; } /* slim_remove_device: Remove the effect of slim_add_device() */ void slim_remove_device(struct slim_device *sbdev) { device_unregister(&sbdev->dev); } EXPORT_SYMBOL_GPL(slim_remove_device); static void slim_ctrl_remove_device(struct slim_controller *ctrl, struct slim_boardinfo *bi) { if (ctrl->nr == bi->bus_num) slim_remove_device(bi->slim_slave); } /* * slim_del_controller: Controller tear-down. * Controller added with the above API is teared down using this API. */ int slim_del_controller(struct slim_controller *ctrl) { struct slim_controller *found; struct sbi_boardinfo *bi; /* First make sure that this bus was added */ mutex_lock(&slim_lock); found = idr_find(&ctrl_idr, ctrl->nr); mutex_unlock(&slim_lock); if (found != ctrl) return -EINVAL; /* Remove all clients */ mutex_lock(&board_lock); list_for_each_entry(bi, &board_list, list) slim_ctrl_remove_device(ctrl, &bi->board_info); mutex_unlock(&board_lock); init_completion(&ctrl->dev_released); device_unregister(&ctrl->dev); wait_for_completion(&ctrl->dev_released); list_del(&ctrl->list); destroy_workqueue(ctrl->wq); /* free bus id */ mutex_lock(&slim_lock); idr_remove(&ctrl_idr, ctrl->nr); mutex_unlock(&slim_lock); kfree(ctrl->sched.chc1); kfree(ctrl->sched.chc3); #ifdef DEBUG kfree(ctrl->sched.slots); #endif kfree(ctrl->chans); kfree(ctrl->ports); return 0; } EXPORT_SYMBOL_GPL(slim_del_controller); /* * slim_add_numbered_controller: Controller bring-up. * @ctrl: Controller to be registered. * A controller is registered with the framework using this API. ctrl->nr is the * desired number with which slimbus framework registers the controller. * Function will return -EBUSY if the number is in use. */ int slim_add_numbered_controller(struct slim_controller *ctrl) { int id; int status; if (ctrl->nr & ~MAX_ID_MASK) return -EINVAL; retry: if (idr_pre_get(&ctrl_idr, GFP_KERNEL) == 0) return -ENOMEM; mutex_lock(&slim_lock); status = idr_get_new_above(&ctrl_idr, ctrl, ctrl->nr, &id); if (status == 0 && id != ctrl->nr) { status = -EAGAIN; idr_remove(&ctrl_idr, id); } mutex_unlock(&slim_lock); if (status == -EAGAIN) goto retry; if (status == 0) status = slim_register_controller(ctrl); return status; } EXPORT_SYMBOL_GPL(slim_add_numbered_controller); /* * slim_report_absent: Controller calls this function when a device * reports absent, OR when the device cannot be communicated with * @sbdev: Device that cannot be reached, or sent report absent */ void slim_report_absent(struct slim_device *sbdev) { struct slim_controller *ctrl; int i; if (!sbdev) return; ctrl = sbdev->ctrl; if (!ctrl) return; /* invalidate logical addresses */ mutex_lock(&ctrl->m_ctrl); for (i = 0; i < ctrl->num_dev; i++) { if (sbdev->laddr == ctrl->addrt[i].laddr) ctrl->addrt[i].valid = false; } mutex_unlock(&ctrl->m_ctrl); sbdev->reported = false; queue_work(ctrl->wq, &sbdev->wd); } EXPORT_SYMBOL(slim_report_absent); /* * slim_framer_booted: This function is called by controller after the active * framer has booted (using Bus Reset sequence, or after it has shutdown and has * come back up). Components, devices on the bus may be in undefined state, * and this function triggers their drivers to do the needful * to bring them back in Reset state so that they can acquire sync, report * present and be operational again. */ void slim_framer_booted(struct slim_controller *ctrl) { struct slim_device *sbdev; struct list_head *pos, *next; if (!ctrl) return; mutex_lock(&ctrl->m_ctrl); list_for_each_safe(pos, next, &ctrl->devs) { struct slim_driver *sbdrv; sbdev = list_entry(pos, struct slim_device, dev_list); mutex_unlock(&ctrl->m_ctrl); if (sbdev && sbdev->dev.driver) { sbdrv = to_slim_driver(sbdev->dev.driver); if (sbdrv->reset_device) sbdrv->reset_device(sbdev); } mutex_lock(&ctrl->m_ctrl); } mutex_unlock(&ctrl->m_ctrl); } EXPORT_SYMBOL(slim_framer_booted); /* * slim_msg_response: Deliver Message response received from a device to the * framework. * @ctrl: Controller handle * @reply: Reply received from the device * @len: Length of the reply * @tid: Transaction ID received with which framework can associate reply. * Called by controller to inform framework about the response received. * This helps in making the API asynchronous, and controller-driver doesn't need * to manage 1 more table other than the one managed by framework mapping TID * with buffers */ void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len) { int i; struct slim_msg_txn *txn; mutex_lock(&ctrl->m_ctrl); txn = ctrl->txnt[tid]; if (txn == NULL || txn->rbuf == NULL) { if (txn == NULL) dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d", tid, len); else dev_err(&ctrl->dev, "Invalid client buffer passed\n"); mutex_unlock(&ctrl->m_ctrl); return; } for (i = 0; i < len; i++) txn->rbuf[i] = reply[i]; if (txn->comp) complete(txn->comp); ctrl->txnt[tid] = NULL; mutex_unlock(&ctrl->m_ctrl); kfree(txn); } EXPORT_SYMBOL_GPL(slim_msg_response); static int slim_processtxn(struct slim_controller *ctrl, u8 dt, u16 mc, u16 ec, u8 mt, u8 *rbuf, const u8 *wbuf, u8 len, u8 mlen, struct completion *comp, u8 la, u8 *tid) { u8 i = 0; int ret = 0; struct slim_msg_txn *txn = kmalloc(sizeof(struct slim_msg_txn), GFP_KERNEL); if (!txn) return -ENOMEM; if (tid) { mutex_lock(&ctrl->m_ctrl); for (i = 0; i < ctrl->last_tid; i++) { if (ctrl->txnt[i] == NULL) break; } if (i >= ctrl->last_tid) { if (ctrl->last_tid == 255) { mutex_unlock(&ctrl->m_ctrl); kfree(txn); return -ENOMEM; } ctrl->txnt = krealloc(ctrl->txnt, (i + 1) * sizeof(struct slim_msg_txn *), GFP_KERNEL); if (!ctrl->txnt) { mutex_unlock(&ctrl->m_ctrl); kfree(txn); return -ENOMEM; } ctrl->last_tid++; } ctrl->txnt[i] = txn; mutex_unlock(&ctrl->m_ctrl); txn->tid = i; *tid = i; } txn->mc = mc; txn->mt = mt; txn->dt = dt; txn->ec = ec; txn->la = la; txn->rbuf = rbuf; txn->wbuf = wbuf; txn->rl = mlen; txn->len = len; txn->comp = comp; ret = ctrl->xfer_msg(ctrl, txn); if (!tid) kfree(txn); return ret; } static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr, u8 e_len, u8 *entry) { u8 i; for (i = 0; i < ctrl->num_dev; i++) { if (ctrl->addrt[i].valid && memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) { *entry = i; return 0; } } return -ENXIO; } /* * slim_assign_laddr: Assign logical address to a device enumerated. * @ctrl: Controller with which device is enumerated. * @e_addr: 6-byte elemental address of the device. * @e_len: buffer length for e_addr * @laddr: Return logical address (if valid flag is false) * @valid: true if laddr holds a valid address that controller wants to * set for this enumeration address. Otherwise framework sets index into * address table as logical address. * Called by controller in response to REPORT_PRESENT. Framework will assign * a logical address to this enumeration address. * Function returns -EXFULL to indicate that all logical addresses are already * taken. */ int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr, u8 e_len, u8 *laddr, bool valid) { int ret; u8 i = 0; bool exists = false; struct slim_device *sbdev; struct list_head *pos, *next; mutex_lock(&ctrl->m_ctrl); /* already assigned */ if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) { *laddr = ctrl->addrt[i].laddr; exists = true; } else { if (ctrl->num_dev >= 254) { ret = -EXFULL; goto ret_assigned_laddr; } for (i = 0; i < ctrl->num_dev; i++) { if (ctrl->addrt[i].valid == false) break; } if (i == ctrl->num_dev) { ctrl->addrt = krealloc(ctrl->addrt, (ctrl->num_dev + 1) * sizeof(struct slim_addrt), GFP_KERNEL); if (!ctrl->addrt) { ret = -ENOMEM; goto ret_assigned_laddr; } ctrl->num_dev++; } memcpy(ctrl->addrt[i].eaddr, e_addr, e_len); ctrl->addrt[i].valid = true; /* Preferred address is index into table */ if (!valid) *laddr = i; } ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6, *laddr); if (ret) { ctrl->addrt[i].valid = false; goto ret_assigned_laddr; } ctrl->addrt[i].laddr = *laddr; dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr); ret_assigned_laddr: mutex_unlock(&ctrl->m_ctrl); if (exists || ret) return ret; pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr, e_addr[1], e_addr[2]); mutex_lock(&ctrl->m_ctrl); list_for_each_safe(pos, next, &ctrl->devs) { sbdev = list_entry(pos, struct slim_device, dev_list); if (memcmp(sbdev->e_addr, e_addr, 6) == 0) { struct slim_driver *sbdrv; sbdev->laddr = *laddr; sbdev->reported = true; if (sbdev->dev.driver) { sbdrv = to_slim_driver(sbdev->dev.driver); if (sbdrv->device_up) queue_work(ctrl->wq, &sbdev->wd); } break; } } mutex_unlock(&ctrl->m_ctrl); return 0; } EXPORT_SYMBOL_GPL(slim_assign_laddr); /* * slim_get_logical_addr: Return the logical address of a slimbus device. * @sb: client handle requesting the adddress. * @e_addr: Elemental address of the device. * @e_len: Length of e_addr * @laddr: output buffer to store the address * context: can sleep * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if * the device with this elemental address is not found. */ int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr, u8 e_len, u8 *laddr) { int ret = 0; u8 entry; struct slim_controller *ctrl = sb->ctrl; if (!ctrl || !laddr || !e_addr || e_len != 6) return -EINVAL; mutex_lock(&ctrl->m_ctrl); ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry); if (!ret) *laddr = ctrl->addrt[entry].laddr; mutex_unlock(&ctrl->m_ctrl); if (ret == -ENXIO && ctrl->get_laddr) { ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr); if (!ret) ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr, true); } return ret; } EXPORT_SYMBOL_GPL(slim_get_logical_addr); static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper, u8 *rbuf, const u8 *wbuf, u8 len) { if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00) return -EINVAL; switch (oper) { case SLIM_MSG_MC_REQUEST_VALUE: case SLIM_MSG_MC_REQUEST_INFORMATION: if (rbuf == NULL) return -EINVAL; return 0; case SLIM_MSG_MC_CHANGE_VALUE: case SLIM_MSG_MC_CLEAR_INFORMATION: if (wbuf == NULL) return -EINVAL; return 0; case SLIM_MSG_MC_REQUEST_CHANGE_VALUE: case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION: if (rbuf == NULL || wbuf == NULL) return -EINVAL; return 0; default: return -EINVAL; } } static u16 slim_slicecodefromsize(u32 req) { u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16}; if (req >= 8) return 0; else return codetosize[req]; } static u16 slim_slicesize(u32 code) { u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7}; if (code == 0) code = 1; if (code > 16) code = 16; return sizetocode[code - 1]; } /* Message APIs Unicast message APIs used by slimbus slave drivers */ /* * Message API access routines. * @sb: client handle requesting elemental message reads, writes. * @msg: Input structure for start-offset, number of bytes to read. * @rbuf: data buffer to be filled with values read. * @len: data buffer size * @wbuf: data buffer containing value/information to be written * context: can sleep * Returns: * -EINVAL: Invalid parameters * -ETIMEDOUT: If controller could not complete the request. This may happen if * the bus lines are not clocked, controller is not powered-on, slave with * given address is not enumerated/responding. */ int slim_request_val_element(struct slim_device *sb, struct slim_ele_access *msg, u8 *buf, u8 len) { struct slim_controller *ctrl = sb->ctrl; if (!ctrl) return -EINVAL; return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf, NULL, len); } EXPORT_SYMBOL_GPL(slim_request_val_element); int slim_request_inf_element(struct slim_device *sb, struct slim_ele_access *msg, u8 *buf, u8 len) { struct slim_controller *ctrl = sb->ctrl; if (!ctrl) return -EINVAL; return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION, buf, NULL, len); } EXPORT_SYMBOL_GPL(slim_request_inf_element); int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg, const u8 *buf, u8 len) { struct slim_controller *ctrl = sb->ctrl; if (!ctrl) return -EINVAL; return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf, len); } EXPORT_SYMBOL_GPL(slim_change_val_element); int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg, u8 *buf, u8 len) { struct slim_controller *ctrl = sb->ctrl; if (!ctrl) return -EINVAL; return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL, buf, len); } EXPORT_SYMBOL_GPL(slim_clear_inf_element); int slim_request_change_val_element(struct slim_device *sb, struct slim_ele_access *msg, u8 *rbuf, const u8 *wbuf, u8 len) { struct slim_controller *ctrl = sb->ctrl; if (!ctrl) return -EINVAL; return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE, rbuf, wbuf, len); } EXPORT_SYMBOL_GPL(slim_request_change_val_element); int slim_request_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg, u8 *rbuf, const u8 *wbuf, u8 len) { struct slim_controller *ctrl = sb->ctrl; if (!ctrl) return -EINVAL; return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION, rbuf, wbuf, len); } EXPORT_SYMBOL_GPL(slim_request_clear_inf_element); /* * Broadcast message API: * call this API directly with sbdev = NULL. * For broadcast reads, make sure that buffers are big-enough to incorporate * replies from all logical addresses. * All controllers may not support broadcast */ int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev, struct slim_ele_access *msg, u16 mc, u8 *rbuf, const u8 *wbuf, u8 len) { DECLARE_COMPLETION_ONSTACK(complete); int ret; u16 sl, cur; u16 ec; u8 tid, mlen = 6; ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len); if (ret) goto xfer_err; sl = slim_slicesize(len); dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n", msg->start_offset, len, mc, sl); cur = slim_slicecodefromsize(sl); ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4)); if (wbuf) mlen += len; if (rbuf) { mlen++; if (!msg->comp) ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen, &complete, sbdev->laddr, &tid); else ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen, msg->comp, sbdev->laddr, &tid); /* sync read */ if (!ret && !msg->comp) { ret = wait_for_completion_timeout(&complete, HZ); if (!ret) { struct slim_msg_txn *txn; dev_err(&ctrl->dev, "slimbus Read timed out"); mutex_lock(&ctrl->m_ctrl); txn = ctrl->txnt[tid]; /* Invalidate the transaction */ ctrl->txnt[tid] = NULL; mutex_unlock(&ctrl->m_ctrl); kfree(txn); ret = -ETIMEDOUT; } else ret = 0; } else if (ret < 0 && !msg->comp) { struct slim_msg_txn *txn; dev_err(&ctrl->dev, "slimbus Read error"); mutex_lock(&ctrl->m_ctrl); txn = ctrl->txnt[tid]; /* Invalidate the transaction */ ctrl->txnt[tid] = NULL; mutex_unlock(&ctrl->m_ctrl); kfree(txn); } } else ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen, NULL, sbdev->laddr, NULL); xfer_err: return ret; } EXPORT_SYMBOL_GPL(slim_xfer_msg); /* * slim_alloc_mgrports: Allocate port on manager side. * @sb: device/client handle. * @req: Port request type. * @nports: Number of ports requested * @rh: output buffer to store the port handles * @hsz: size of buffer storing handles * context: can sleep * This port will be typically used by SW. e.g. client driver wants to receive * some data from audio codec HW using a data channel. * Port allocated using this API will be used to receive the data. * If half-duplex ports are requested, two adjacent ports are allocated for * 1 half-duplex port. So the handle-buffer size should be twice the number * of half-duplex ports to be allocated. * -EDQUOT is returned if all ports are in use. */ int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req, int nports, u32 *rh, int hsz) { int i, j; int ret = -EINVAL; int nphysp = nports; struct slim_controller *ctrl = sb->ctrl; if (!rh || !ctrl) return -EINVAL; if (req == SLIM_REQ_HALF_DUP) nphysp *= 2; if (hsz/sizeof(u32) < nphysp) return -EINVAL; mutex_lock(&ctrl->m_ctrl); for (i = 0; i < ctrl->nports; i++) { bool multiok = true; if (ctrl->ports[i].state != SLIM_P_FREE) continue; /* Start half duplex channel at even port */ if (req == SLIM_REQ_HALF_DUP && (i % 2)) continue; /* Allocate ports contiguously for multi-ch */ if (ctrl->nports < (i + nphysp)) { i = ctrl->nports; break; } if (req == SLIM_REQ_MULTI_CH) { multiok = true; for (j = i; j < i + nphysp; j++) { if (ctrl->ports[j].state != SLIM_P_FREE) { multiok = false; break; } } if (!multiok) continue; } break; } if (i >= ctrl->nports) { ret = -EDQUOT; goto alloc_err; } ret = 0; for (j = i; j < i + nphysp; j++) { ctrl->ports[j].state = SLIM_P_UNCFG; ctrl->ports[j].req = req; if (req == SLIM_REQ_HALF_DUP && (j % 2)) ctrl->ports[j].flow = SLIM_SINK; else ctrl->ports[j].flow = SLIM_SRC; if (ctrl->alloc_port) ret = ctrl->alloc_port(ctrl, j); if (ret) { for (; j >= i; j--) ctrl->ports[j].state = SLIM_P_FREE; goto alloc_err; } *rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j); } alloc_err: mutex_unlock(&ctrl->m_ctrl); return ret; } EXPORT_SYMBOL_GPL(slim_alloc_mgrports); /* Deallocate the port(s) allocated using the API above */ int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports) { int i; struct slim_controller *ctrl = sb->ctrl; if (!ctrl || !hdl) return -EINVAL; mutex_lock(&ctrl->m_ctrl); for (i = 0; i < nports; i++) { u8 pn; pn = SLIM_HDL_TO_PORT(hdl[i]); if (pn >= ctrl->nports || ctrl->ports[pn].state == SLIM_P_CFG) { int j, ret; if (pn >= ctrl->nports) { dev_err(&ctrl->dev, "invalid port number"); ret = -EINVAL; } else { dev_err(&ctrl->dev, "Can't dealloc connected port:%d", i); ret = -EISCONN; } for (j = i - 1; j >= 0; j--) { pn = SLIM_HDL_TO_PORT(hdl[j]); ctrl->ports[pn].state = SLIM_P_UNCFG; } mutex_unlock(&ctrl->m_ctrl); return ret; } if (ctrl->dealloc_port) ctrl->dealloc_port(ctrl, pn); ctrl->ports[pn].state = SLIM_P_FREE; } mutex_unlock(&ctrl->m_ctrl); return 0; } EXPORT_SYMBOL_GPL(slim_dealloc_mgrports); /* * slim_get_slaveport: Get slave port handle * @la: slave device logical address. * @idx: port index at slave * @rh: return handle * @flw: Flow type (source or destination) * This API only returns a slave port's representation as expected by slimbus * driver. This port is not managed by the slimbus driver. Caller is expected * to have visibility of this port since it's a device-port. */ int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw) { if (rh == NULL) return -EINVAL; *rh = SLIM_PORT_HDL(la, flw, idx); return 0; } EXPORT_SYMBOL_GPL(slim_get_slaveport); static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph, enum slim_port_flow flow) { int ret; u16 mc; u8 buf[2]; u32 la = SLIM_HDL_TO_LA(ph); u8 pn = (u8)SLIM_HDL_TO_PORT(ph); if (flow == SLIM_SRC) mc = SLIM_MSG_MC_CONNECT_SOURCE; else mc = SLIM_MSG_MC_CONNECT_SINK; buf[0] = pn; buf[1] = ctrl->chans[ch].chan; if (la == SLIM_LA_MANAGER) ctrl->ports[pn].flow = flow; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0, SLIM_MSG_MT_CORE, NULL, buf, 2, 6, NULL, la, NULL); if (!ret && la == SLIM_LA_MANAGER) ctrl->ports[pn].state = SLIM_P_CFG; return ret; } static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph) { int ret; u16 mc; u32 la = SLIM_HDL_TO_LA(ph); u8 pn = (u8)SLIM_HDL_TO_PORT(ph); mc = SLIM_MSG_MC_DISCONNECT_PORT; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0, SLIM_MSG_MT_CORE, NULL, &pn, 1, 5, NULL, la, NULL); if (ret) return ret; if (la == SLIM_LA_MANAGER) ctrl->ports[pn].state = SLIM_P_UNCFG; return 0; } /* * slim_connect_src: Connect source port to channel. * @sb: client handle * @srch: source handle to be connected to this channel * @chanh: Channel with which the ports need to be associated with. * Per slimbus specification, a channel may have 1 source port. * Channel specified in chanh needs to be allocated first. * Returns -EALREADY if source is already configured for this channel. * Returns -ENOTCONN if channel is not allocated * Returns -EINVAL if invalid direction is specified for non-manager port, * or if the manager side port number is out of bounds, or in incorrect state */ int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh) { struct slim_controller *ctrl = sb->ctrl; int ret; u8 chan = SLIM_HDL_TO_CHIDX(chanh); struct slim_ich *slc = &ctrl->chans[chan]; enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch); u8 la = SLIM_HDL_TO_LA(srch); /* manager ports don't have direction when they are allocated */ if (la != SLIM_LA_MANAGER && flow != SLIM_SRC) return -EINVAL; mutex_lock(&ctrl->sched.m_reconf); if (la == SLIM_LA_MANAGER) { u8 pn = SLIM_HDL_TO_PORT(srch); if (pn >= ctrl->nports || ctrl->ports[pn].state != SLIM_P_UNCFG) { ret = -EINVAL; goto connect_src_err; } } if (slc->state == SLIM_CH_FREE) { ret = -ENOTCONN; goto connect_src_err; } /* * Once channel is removed, its ports can be considered disconnected * So its ports can be reassigned. Source port is zeroed * when channel is deallocated. */ if (slc->srch) { ret = -EALREADY; goto connect_src_err; } ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC); if (!ret) slc->srch = srch; connect_src_err: mutex_unlock(&ctrl->sched.m_reconf); return ret; } EXPORT_SYMBOL_GPL(slim_connect_src); /* * slim_connect_sink: Connect sink port(s) to channel. * @sb: client handle * @sinkh: sink handle(s) to be connected to this channel * @nsink: number of sinks * @chanh: Channel with which the ports need to be associated with. * Per slimbus specification, a channel may have multiple sink-ports. * Channel specified in chanh needs to be allocated first. * Returns -EALREADY if sink is already configured for this channel. * Returns -ENOTCONN if channel is not allocated * Returns -EINVAL if invalid parameters are passed, or invalid direction is * specified for non-manager port, or if the manager side port number is out of * bounds, or in incorrect state */ int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh) { struct slim_controller *ctrl = sb->ctrl; int j; int ret = 0; u8 chan = SLIM_HDL_TO_CHIDX(chanh); struct slim_ich *slc = &ctrl->chans[chan]; if (!sinkh || !nsink) return -EINVAL; mutex_lock(&ctrl->sched.m_reconf); /* * Once channel is removed, its ports can be considered disconnected * So its ports can be reassigned. Sink ports are freed when channel * is deallocated. */ if (slc->state == SLIM_CH_FREE) { ret = -ENOTCONN; goto connect_sink_err; } for (j = 0; j < nsink; j++) { enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]); u8 la = SLIM_HDL_TO_LA(sinkh[j]); u8 pn = SLIM_HDL_TO_PORT(sinkh[j]); if (la != SLIM_LA_MANAGER && flow != SLIM_SINK) ret = -EINVAL; else if (la == SLIM_LA_MANAGER && (pn >= ctrl->nports || ctrl->ports[pn].state != SLIM_P_UNCFG)) ret = -EINVAL; else ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK); if (ret) { for (j = j - 1; j >= 0; j--) disconnect_port_ch(ctrl, sinkh[j]); goto connect_sink_err; } } slc->sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)), GFP_KERNEL); if (!slc->sinkh) { ret = -ENOMEM; for (j = 0; j < nsink; j++) disconnect_port_ch(ctrl, sinkh[j]); goto connect_sink_err; } memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink)); slc->nsink += nsink; connect_sink_err: mutex_unlock(&ctrl->sched.m_reconf); return ret; } EXPORT_SYMBOL_GPL(slim_connect_sink); /* * slim_disconnect_ports: Disconnect port(s) from channel * @sb: client handle * @ph: ports to be disconnected * @nph: number of ports. * Disconnects ports from a channel. */ int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph) { struct slim_controller *ctrl = sb->ctrl; int i; mutex_lock(&ctrl->sched.m_reconf); for (i = 0; i < nph; i++) disconnect_port_ch(ctrl, ph[i]); mutex_unlock(&ctrl->sched.m_reconf); return 0; } EXPORT_SYMBOL_GPL(slim_disconnect_ports); /* * slim_port_xfer: Schedule buffer to be transferred/received using port-handle. * @sb: client handle * @ph: port-handle * @iobuf: buffer to be transferred or populated * @len: buffer size. * @comp: completion signal to indicate transfer done or error. * context: can sleep * Returns number of bytes transferred/received if used synchronously. * Will return 0 if used asynchronously. * Client will call slim_port_get_xfer_status to get error and/or number of * bytes transferred if used asynchronously. */ int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len, struct completion *comp) { struct slim_controller *ctrl = sb->ctrl; u8 pn = SLIM_HDL_TO_PORT(ph); dev_dbg(&ctrl->dev, "port xfer: num:%d", pn); return ctrl->port_xfer(ctrl, pn, iobuf, len, comp); } EXPORT_SYMBOL_GPL(slim_port_xfer); /* * slim_port_get_xfer_status: Poll for port transfers, or get transfer status * after completion is done. * @sb: client handle * @ph: port-handle * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed. * @done_len: Number of bytes transferred. * This can be called when port_xfer complition is signalled. * The API will return port transfer error (underflow/overflow/disconnect) * and/or done_len will reflect number of bytes transferred. Note that * done_len may be valid even if port error (overflow/underflow) has happened. * e.g. If the transfer was scheduled with a few bytes to be transferred and * client has not supplied more data to be transferred, done_len will indicate * number of bytes transferred with underflow error. To avoid frequent underflow * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that * channel has data to be transferred even if client is not ready to transfer * data all the time. done_buf will indicate address of the last buffer * processed from the multiple transfers. */ enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph, u8 **done_buf, u32 *done_len) { struct slim_controller *ctrl = sb->ctrl; u8 pn = SLIM_HDL_TO_PORT(ph); u32 la = SLIM_HDL_TO_LA(ph); enum slim_port_err err; dev_dbg(&ctrl->dev, "get status port num:%d", pn); /* * Framework only has insight into ports managed by ported device * used by the manager and not slave */ if (la != SLIM_LA_MANAGER) { if (done_buf) *done_buf = NULL; if (done_len) *done_len = 0; return SLIM_P_NOT_OWNED; } err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len); if (err == SLIM_P_INPROGRESS) err = ctrl->ports[pn].err; return err; } EXPORT_SYMBOL_GPL(slim_port_get_xfer_status); static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc) { struct slim_ich **arr; int i, j; int *len; int sl = slc->seglen << slc->rootexp; /* Channel is already active and other end is transmitting data */ if (slc->state >= SLIM_CH_ACTIVE) return; if (slc->coeff == SLIM_COEFF_1) { arr = ctrl->sched.chc1; len = &ctrl->sched.num_cc1; } else { arr = ctrl->sched.chc3; len = &ctrl->sched.num_cc3; sl *= 3; } *len += 1; /* Insert the channel based on rootexp and seglen */ for (i = 0; i < *len - 1; i++) { /* * Primary key: exp low to high. * Secondary key: seglen: high to low */ if ((slc->rootexp > arr[i]->rootexp) || ((slc->rootexp == arr[i]->rootexp) && (slc->seglen < arr[i]->seglen))) continue; else break; } for (j = *len - 1; j > i; j--) arr[j] = arr[j - 1]; arr[i] = slc; if (!ctrl->allocbw) ctrl->sched.usedslots += sl; return; } static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc) { struct slim_ich **arr; int i; u32 la, ph; int *len; if (slc->coeff == SLIM_COEFF_1) { arr = ctrl->sched.chc1; len = &ctrl->sched.num_cc1; } else { arr = ctrl->sched.chc3; len = &ctrl->sched.num_cc3; } for (i = 0; i < *len; i++) { if (arr[i] == slc) break; } if (i >= *len) return -EXFULL; for (; i < *len - 1; i++) arr[i] = arr[i + 1]; *len -= 1; arr[*len] = NULL; slc->state = SLIM_CH_ALLOCATED; slc->newintr = 0; slc->newoff = 0; for (i = 0; i < slc->nsink; i++) { ph = slc->sinkh[i]; la = SLIM_HDL_TO_LA(ph); /* * For ports managed by manager's ported device, no need to send * disconnect. It is client's responsibility to call disconnect * on ports owned by the slave device */ if (la == SLIM_LA_MANAGER) ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG; } ph = slc->srch; la = SLIM_HDL_TO_LA(ph); if (la == SLIM_LA_MANAGER) ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG; kfree(slc->sinkh); slc->sinkh = NULL; slc->srch = 0; slc->nsink = 0; return 0; } static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop) { u32 rate = 0, rate4k = 0, rate11k = 0; u32 exp = 0; u32 pr = 0; bool exact = true; bool done = false; enum slim_ch_rate ratefam; if (prop->prot >= SLIM_PUSH) return 0; if (prop->baser == SLIM_RATE_1HZ) { rate = prop->ratem / 4000; rate4k = rate; if (rate * 4000 == prop->ratem) ratefam = SLIM_RATE_4000HZ; else { rate = prop->ratem / 11025; rate11k = rate; if (rate * 11025 == prop->ratem) ratefam = SLIM_RATE_11025HZ; else ratefam = SLIM_RATE_1HZ; } } else { ratefam = prop->baser; rate = prop->ratem; } if (ratefam == SLIM_RATE_1HZ) { exact = false; if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) { rate = rate4k + 1; ratefam = SLIM_RATE_4000HZ; } else { rate = rate11k + 1; ratefam = SLIM_RATE_11025HZ; } } /* covert rate to coeff-exp */ while (!done) { while ((rate & 0x1) != 0x1) { rate >>= 1; exp++; } if (rate > 3) { /* roundup if not exact */ rate++; exact = false; } else done = true; } if (ratefam == SLIM_RATE_4000HZ) { if (rate == 1) pr = 0x10; else { pr = 0; exp++; } } else { pr = 8; exp++; } if (exp <= 7) { pr |= exp; if (exact) pr |= 0x80; } else pr = 0; return pr; } static int slim_nextdefine_ch(struct slim_device *sb, u8 chan) { struct slim_controller *ctrl = sb->ctrl; u32 chrate = 0; u32 exp = 0; u32 coeff = 0; bool exact = true; bool done = false; int ret = 0; struct slim_ich *slc = &ctrl->chans[chan]; struct slim_ch *prop = &slc->prop; slc->prrate = slim_calc_prrate(ctrl, prop); dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate); if (prop->baser == SLIM_RATE_4000HZ) chrate = 4000 * prop->ratem; else if (prop->baser == SLIM_RATE_11025HZ) chrate = 11025 * prop->ratem; else chrate = prop->ratem; /* max allowed sample freq = 768 seg/frame */ if (chrate > 3600000) return -EDQUOT; if (prop->baser == SLIM_RATE_4000HZ && ctrl->a_framer->superfreq == 4000) coeff = prop->ratem; else if (prop->baser == SLIM_RATE_11025HZ && ctrl->a_framer->superfreq == 3675) coeff = 3 * prop->ratem; else { u32 tempr = 0; tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8; coeff = tempr / ctrl->a_framer->rootfreq; if (coeff * ctrl->a_framer->rootfreq != tempr) { coeff++; exact = false; } } /* convert coeff to coeff-exponent */ exp = 0; while (!done) { while ((coeff & 0x1) != 0x1) { coeff >>= 1; exp++; } if (coeff > 3) { coeff++; exact = false; } else done = true; } if (prop->prot == SLIM_HARD_ISO && !exact) return -EPROTONOSUPPORT; else if (prop->prot == SLIM_AUTO_ISO) { if (exact) prop->prot = SLIM_HARD_ISO; else { /* Push-Pull not supported for now */ return -EPROTONOSUPPORT; } } slc->rootexp = exp; slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL; if (prop->prot != SLIM_HARD_ISO) slc->seglen++; if (prop->prot >= SLIM_EXT_SMPLX) slc->seglen++; /* convert coeff to enum */ if (coeff == 1) { if (exp > 9) ret = -EIO; coeff = SLIM_COEFF_1; } else { if (exp > 8) ret = -EIO; coeff = SLIM_COEFF_3; } slc->coeff = coeff; return ret; } /* * slim_alloc_ch: Allocate a slimbus channel and return its handle. * @sb: client handle. * @chanh: return channel handle * Slimbus channels are limited to 256 per specification. * -EXFULL is returned if all channels are in use. * Although slimbus specification supports 256 channels, a controller may not * support that many channels. */ int slim_alloc_ch(struct slim_device *sb, u16 *chanh) { struct slim_controller *ctrl = sb->ctrl; u16 i; if (!ctrl) return -EINVAL; mutex_lock(&ctrl->sched.m_reconf); for (i = 0; i < ctrl->nchans; i++) { if (ctrl->chans[i].state == SLIM_CH_FREE) break; } if (i >= ctrl->nchans) { mutex_unlock(&ctrl->sched.m_reconf); return -EXFULL; } *chanh = i; ctrl->chans[i].nextgrp = 0; ctrl->chans[i].state = SLIM_CH_ALLOCATED; ctrl->chans[i].chan = (u8)(ctrl->reserved + i); mutex_unlock(&ctrl->sched.m_reconf); return 0; } EXPORT_SYMBOL_GPL(slim_alloc_ch); /* * slim_query_ch: Get reference-counted handle for a channel number. Every * channel is reference counted by upto one as producer and the others as * consumer) * @sb: client handle * @chan: slimbus channel number * @chanh: return channel handle * If request channel number is not in use, it is allocated, and reference * count is set to one. If the channel was was already allocated, this API * will return handle to that channel and reference count is incremented. * -EXFULL is returned if all channels are in use */ int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh) { struct slim_controller *ctrl = sb->ctrl; u16 i, j; int ret = 0; if (!ctrl || !chanh) return -EINVAL; mutex_lock(&ctrl->sched.m_reconf); /* start with modulo number */ i = ch % ctrl->nchans; for (j = 0; j < ctrl->nchans; j++) { if (ctrl->chans[i].chan == ch) { *chanh = i; ctrl->chans[i].ref++; if (ctrl->chans[i].state == SLIM_CH_FREE) ctrl->chans[i].state = SLIM_CH_ALLOCATED; goto query_out; } i = (i + 1) % ctrl->nchans; } /* Channel not in table yet */ ret = -EXFULL; for (j = 0; j < ctrl->nchans; j++) { if (ctrl->chans[i].state == SLIM_CH_FREE) { ctrl->chans[i].state = SLIM_CH_ALLOCATED; *chanh = i; ctrl->chans[i].ref++; ctrl->chans[i].chan = ch; ctrl->chans[i].nextgrp = 0; ret = 0; break; } i = (i + 1) % ctrl->nchans; } query_out: mutex_unlock(&ctrl->sched.m_reconf); dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d", ch, i, ctrl->chans[i].ref, ret); return ret; } EXPORT_SYMBOL_GPL(slim_query_ch); /* * slim_dealloc_ch: Deallocate channel allocated using the API above * -EISCONN is returned if the channel is tried to be deallocated without * being removed first. * -ENOTCONN is returned if deallocation is tried on a channel that's not * allocated. */ int slim_dealloc_ch(struct slim_device *sb, u16 chanh) { struct slim_controller *ctrl = sb->ctrl; u8 chan = SLIM_HDL_TO_CHIDX(chanh); struct slim_ich *slc = &ctrl->chans[chan]; if (!ctrl) return -EINVAL; mutex_lock(&ctrl->sched.m_reconf); if (slc->state == SLIM_CH_FREE) { mutex_unlock(&ctrl->sched.m_reconf); return -ENOTCONN; } if (slc->ref > 1) { slc->ref--; mutex_unlock(&ctrl->sched.m_reconf); dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d", slc->chan, chanh, slc->ref); return 0; } if (slc->state >= SLIM_CH_PENDING_ACTIVE) { dev_err(&ctrl->dev, "Channel:%d should be removed first", chan); mutex_unlock(&ctrl->sched.m_reconf); return -EISCONN; } slc->ref--; slc->state = SLIM_CH_FREE; mutex_unlock(&ctrl->sched.m_reconf); dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d", slc->chan, chanh, slc->ref); return 0; } EXPORT_SYMBOL_GPL(slim_dealloc_ch); /* * slim_get_ch_state: Channel state. * This API returns the channel's state (active, suspended, inactive etc) */ enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh) { u8 chan = SLIM_HDL_TO_CHIDX(chanh); struct slim_ich *slc = &sb->ctrl->chans[chan]; return slc->state; } EXPORT_SYMBOL_GPL(slim_get_ch_state); /* * slim_define_ch: Define a channel.This API defines channel parameters for a * given channel. * @sb: client handle. * @prop: slim_ch structure with channel parameters desired to be used. * @chanh: list of channels to be defined. * @nchan: number of channels in a group (1 if grp is false) * @grp: Are the channels grouped * @grph: return group handle if grouping of channels is desired. * Channels can be grouped if multiple channels use same parameters * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped * and given 1 handle for simplicity and avoid repeatedly calling the API) * -EISCONN is returned if channel is already used with different parameters. * -ENXIO is returned if the channel is not yet allocated. */ int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh, u8 nchan, bool grp, u16 *grph) { struct slim_controller *ctrl = sb->ctrl; int i, ret = 0; if (!ctrl || !chanh || !prop || !nchan) return -EINVAL; mutex_lock(&ctrl->sched.m_reconf); for (i = 0; i < nchan; i++) { u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]); struct slim_ich *slc = &ctrl->chans[chan]; dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan, (int)ctrl->chans[chan].state); if (slc->state < SLIM_CH_ALLOCATED) { ret = -ENXIO; goto err_define_ch; } if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) { if (prop->ratem != slc->prop.ratem || prop->sampleszbits != slc->prop.sampleszbits || prop->baser != slc->prop.baser) { ret = -EISCONN; goto err_define_ch; } } else if (slc->state > SLIM_CH_DEFINED) { ret = -EISCONN; goto err_define_ch; } else { ctrl->chans[chan].prop = *prop; ret = slim_nextdefine_ch(sb, chan); if (ret) goto err_define_ch; } if (i < (nchan - 1)) ctrl->chans[chan].nextgrp = chanh[i + 1]; if (i == 0) ctrl->chans[chan].nextgrp |= SLIM_START_GRP; if (i == (nchan - 1)) ctrl->chans[chan].nextgrp |= SLIM_END_GRP; } if (grp) *grph = ((nchan << 8) | SLIM_HDL_TO_CHIDX(chanh[0])); for (i = 0; i < nchan; i++) { u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]); struct slim_ich *slc = &ctrl->chans[chan]; if (slc->state == SLIM_CH_ALLOCATED) slc->state = SLIM_CH_DEFINED; } err_define_ch: dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret); mutex_unlock(&ctrl->sched.m_reconf); return ret; } EXPORT_SYMBOL_GPL(slim_define_ch); static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl) { u32 code = 0; if (*ctrlw == *subfrml) { *ctrlw = 8; *subfrml = 8; *msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME - SLIM_GDE_SLOTS_PER_SUPERFRAME; return 0; } if (*subfrml == 6) { code = 0; *msgsl = 256; } else if (*subfrml == 8) { code = 1; *msgsl = 192; } else if (*subfrml == 24) { code = 2; *msgsl = 64; } else { /* 32 */ code = 3; *msgsl = 48; } if (*ctrlw < 8) { if (*ctrlw >= 6) { *ctrlw = 6; code |= 0x14; } else { if (*ctrlw == 5) *ctrlw = 4; code |= (*ctrlw << 2); } } else { code -= 2; if (*ctrlw >= 24) { *ctrlw = 24; code |= 0x1e; } else if (*ctrlw >= 16) { *ctrlw = 16; code |= 0x1c; } else if (*ctrlw >= 12) { *ctrlw = 12; code |= 0x1a; } else { *ctrlw = 8; code |= 0x18; } } *msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME - SLIM_GDE_SLOTS_PER_SUPERFRAME; return code; } static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach, int sz, u32 shft) { int i; u32 oldoff; for (i = 0; i < sz; i++) { struct slim_ich *slc; if (ach[i] == NULL) continue; slc = ach[i]; if (slc->state == SLIM_CH_PENDING_REMOVAL) continue; oldoff = slc->newoff; slc->newoff += shft; /* seg. offset must be <= interval */ if (slc->newoff >= slc->newintr) slc->newoff -= slc->newintr; } } static int slim_sched_chans(struct slim_device *sb, u32 clkgear, u32 *ctrlw, u32 *subfrml) { int coeff1, coeff3; enum slim_ch_coeff bias; struct slim_controller *ctrl = sb->ctrl; int last1 = ctrl->sched.num_cc1 - 1; int last3 = ctrl->sched.num_cc3 - 1; /* * Find first channels with coeff 1 & 3 as starting points for * scheduling */ for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) { struct slim_ich *slc = ctrl->sched.chc3[coeff3]; if (slc->state == SLIM_CH_PENDING_REMOVAL) continue; else break; } for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) { struct slim_ich *slc = ctrl->sched.chc1[coeff1]; if (slc->state == SLIM_CH_PENDING_REMOVAL) continue; else break; } if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) { *ctrlw = 8; *subfrml = 8; return 0; } else if (coeff3 == ctrl->sched.num_cc3) bias = SLIM_COEFF_1; else bias = SLIM_COEFF_3; /* * Find last chan in coeff1, 3 list, we will use to know when we * have done scheduling all coeff1 channels */ while (last1 >= 0) { if (ctrl->sched.chc1[last1] != NULL && (ctrl->sched.chc1[last1])->state != SLIM_CH_PENDING_REMOVAL) break; last1--; } while (last3 >= 0) { if (ctrl->sched.chc3[last3] != NULL && (ctrl->sched.chc3[last3])->state != SLIM_CH_PENDING_REMOVAL) break; last3--; } if (bias == SLIM_COEFF_1) { struct slim_ich *slc1 = ctrl->sched.chc1[coeff1]; u32 expshft = SLIM_MAX_CLK_GEAR - clkgear; int curexp, finalexp; u32 curintr, curmaxsl; int opensl1[2]; int maxctrlw1; finalexp = (ctrl->sched.chc1[last1])->rootexp; curexp = (int)expshft - 1; curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1); curmaxsl = curintr >> 1; opensl1[0] = opensl1[1] = curmaxsl; while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) { curintr >>= 1; curmaxsl >>= 1; /* update 4K family open slot records */ if (opensl1[1] < opensl1[0]) opensl1[1] -= curmaxsl; else opensl1[1] = opensl1[0] - curmaxsl; opensl1[0] = curmaxsl; if (opensl1[1] < 0) { opensl1[0] += opensl1[1]; opensl1[1] = 0; } if (opensl1[0] <= 0) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } curexp++; /* schedule 4k family channels */ while ((coeff1 < ctrl->sched.num_cc1) && (curexp == (int)(slc1->rootexp + expshft))) { if (slc1->state == SLIM_CH_PENDING_REMOVAL) { coeff1++; slc1 = ctrl->sched.chc1[coeff1]; continue; } if (opensl1[1] >= opensl1[0] || (finalexp == (int)slc1->rootexp && curintr <= 24 && opensl1[0] == curmaxsl)) { opensl1[1] -= slc1->seglen; slc1->newoff = curmaxsl + opensl1[1]; if (opensl1[1] < 0 && opensl1[0] == curmaxsl) { opensl1[0] += opensl1[1]; opensl1[1] = 0; if (opensl1[0] < 0) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } } } else { if (slc1->seglen > opensl1[0]) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } slc1->newoff = opensl1[0] - slc1->seglen; opensl1[0] = slc1->newoff; } slc1->newintr = curintr; coeff1++; slc1 = ctrl->sched.chc1[coeff1]; } } /* Leave some slots for messaging space */ if (opensl1[1] <= 0 && opensl1[0] <= 0) return -EXFULL; if (opensl1[1] > opensl1[0]) { int temp = opensl1[0]; opensl1[0] = opensl1[1]; opensl1[1] = temp; shiftsegoffsets(ctrl, ctrl->sched.chc1, ctrl->sched.num_cc1, curmaxsl); } /* choose subframe mode to maximize bw */ maxctrlw1 = opensl1[0]; if (opensl1[0] == curmaxsl) maxctrlw1 += opensl1[1]; if (curintr >= 24) { *subfrml = 24; *ctrlw = maxctrlw1; } else if (curintr == 12) { if (maxctrlw1 > opensl1[1] * 4) { *subfrml = 24; *ctrlw = maxctrlw1; } else { *subfrml = 6; *ctrlw = opensl1[1]; } } else { *subfrml = 6; *ctrlw = maxctrlw1; } } else { struct slim_ich *slc1 = NULL; struct slim_ich *slc3 = ctrl->sched.chc3[coeff3]; u32 expshft = SLIM_MAX_CLK_GEAR - clkgear; int curexp, finalexp, exp1; u32 curintr, curmaxsl; int opensl3[2]; int opensl1[6]; bool opensl1valid = false; int maxctrlw1, maxctrlw3, i; finalexp = (ctrl->sched.chc3[last3])->rootexp; if (last1 >= 0) { slc1 = ctrl->sched.chc1[coeff1]; exp1 = (ctrl->sched.chc1[last1])->rootexp; if (exp1 > finalexp) finalexp = exp1; } curexp = (int)expshft - 1; curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1); curmaxsl = curintr >> 1; opensl3[0] = opensl3[1] = curmaxsl; while (coeff1 < ctrl->sched.num_cc1 || coeff3 < ctrl->sched.num_cc3 || curintr > 32) { curintr >>= 1; curmaxsl >>= 1; /* update 12k family open slot records */ if (opensl3[1] < opensl3[0]) opensl3[1] -= curmaxsl; else opensl3[1] = opensl3[0] - curmaxsl; opensl3[0] = curmaxsl; if (opensl3[1] < 0) { opensl3[0] += opensl3[1]; opensl3[1] = 0; } if (opensl3[0] <= 0) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } curexp++; /* schedule 12k family channels */ while (coeff3 < ctrl->sched.num_cc3 && curexp == (int)slc3->rootexp + expshft) { if (slc3->state == SLIM_CH_PENDING_REMOVAL) { coeff3++; slc3 = ctrl->sched.chc3[coeff3]; continue; } opensl1valid = false; if (opensl3[1] >= opensl3[0] || (finalexp == (int)slc3->rootexp && curintr <= 32 && opensl3[0] == curmaxsl && last1 < 0)) { opensl3[1] -= slc3->seglen; slc3->newoff = curmaxsl + opensl3[1]; if (opensl3[1] < 0 && opensl3[0] == curmaxsl) { opensl3[0] += opensl3[1]; opensl3[1] = 0; } if (opensl3[0] < 0) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } } else { if (slc3->seglen > opensl3[0]) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } slc3->newoff = opensl3[0] - slc3->seglen; opensl3[0] = slc3->newoff; } slc3->newintr = curintr; coeff3++; slc3 = ctrl->sched.chc3[coeff3]; } /* update 4k openslot records */ if (opensl1valid == false) { for (i = 0; i < 3; i++) { opensl1[i * 2] = opensl3[0]; opensl1[(i * 2) + 1] = opensl3[1]; } } else { int opensl1p[6]; memcpy(opensl1p, opensl1, sizeof(opensl1)); for (i = 0; i < 3; i++) { if (opensl1p[i] < opensl1p[i + 3]) opensl1[(i * 2) + 1] = opensl1p[i]; else opensl1[(i * 2) + 1] = opensl1p[i + 3]; } for (i = 0; i < 3; i++) { opensl1[(i * 2) + 1] -= curmaxsl; opensl1[i * 2] = curmaxsl; if (opensl1[(i * 2) + 1] < 0) { opensl1[i * 2] += opensl1[(i * 2) + 1]; opensl1[(i * 2) + 1] = 0; } if (opensl1[i * 2] < 0) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } } } /* schedule 4k family channels */ while (coeff1 < ctrl->sched.num_cc1 && curexp == (int)slc1->rootexp + expshft) { /* searchorder effective when opensl valid */ static const int srcho[] = { 5, 2, 4, 1, 3, 0 }; int maxopensl = 0; int maxi = 0; if (slc1->state == SLIM_CH_PENDING_REMOVAL) { coeff1++; slc1 = ctrl->sched.chc1[coeff1]; continue; } opensl1valid = true; for (i = 0; i < 6; i++) { if (opensl1[srcho[i]] > maxopensl) { maxopensl = opensl1[srcho[i]]; maxi = srcho[i]; } } opensl1[maxi] -= slc1->seglen; slc1->newoff = (curmaxsl * maxi) + opensl1[maxi]; if (opensl1[maxi] < 0) { if (((maxi & 1) == 1) && (opensl1[maxi - 1] == curmaxsl)) { opensl1[maxi - 1] += opensl1[maxi]; if (opensl3[0] > opensl1[maxi - 1]) opensl3[0] = opensl1[maxi - 1]; opensl3[1] = 0; opensl1[maxi] = 0; if (opensl1[maxi - 1] < 0) { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } } else { dev_dbg(&ctrl->dev, "reconfig failed:%d\n", __LINE__); return -EXFULL; } } else { if (opensl3[maxi & 1] > opensl1[maxi]) opensl3[maxi & 1] = opensl1[maxi]; } slc1->newintr = curintr * 3; coeff1++; slc1 = ctrl->sched.chc1[coeff1]; } } /* Leave some slots for messaging space */ if (opensl3[1] <= 0 && opensl3[0] <= 0) return -EXFULL; /* swap 1st and 2nd bucket if 2nd bucket has more open slots */ if (opensl3[1] > opensl3[0]) { int temp = opensl3[0]; opensl3[0] = opensl3[1]; opensl3[1] = temp; temp = opensl1[5]; opensl1[5] = opensl1[4]; opensl1[4] = opensl1[3]; opensl1[3] = opensl1[2]; opensl1[2] = opensl1[1]; opensl1[1] = opensl1[0]; opensl1[0] = temp; shiftsegoffsets(ctrl, ctrl->sched.chc1, ctrl->sched.num_cc1, curmaxsl); shiftsegoffsets(ctrl, ctrl->sched.chc3, ctrl->sched.num_cc3, curmaxsl); } /* subframe mode to maximize BW */ maxctrlw3 = opensl3[0]; maxctrlw1 = opensl1[0]; if (opensl3[0] == curmaxsl) maxctrlw3 += opensl3[1]; for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++) maxctrlw1 += opensl1[i + 1]; if (curintr >= 32) { *subfrml = 32; *ctrlw = maxctrlw3; } else if (curintr == 16) { if (maxctrlw3 > (opensl3[1] * 4)) { *subfrml = 32; *ctrlw = maxctrlw3; } else { *subfrml = 8; *ctrlw = opensl3[1]; } } else { if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) { *subfrml = 24; *ctrlw = maxctrlw1; } else { *subfrml = 8; *ctrlw = maxctrlw3; } } } return 0; } #ifdef DEBUG static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw, u32 subfrml, u32 clkgear) { int sl, i; int cc1 = 0; int cc3 = 0; struct slim_ich *slc = NULL; if (!ctrl->sched.slots) return 0; memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME); dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear); for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) { for (i = 0; i < ctrlw; i++) ctrl->sched.slots[sl + i] = 33; } while (cc1 < ctrl->sched.num_cc1) { slc = ctrl->sched.chc1[cc1]; if (slc == NULL) { dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n", cc1); return -EIO; } dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n", (slc - ctrl->chans), slc->newoff, slc->newintr, slc->seglen); if (slc->state != SLIM_CH_PENDING_REMOVAL) { for (sl = slc->newoff; sl < SLIM_SL_PER_SUPERFRAME; sl += slc->newintr) { for (i = 0; i < slc->seglen; i++) { if (ctrl->sched.slots[sl + i]) return -EXFULL; ctrl->sched.slots[sl + i] = cc1 + 1; } } } cc1++; } while (cc3 < ctrl->sched.num_cc3) { slc = ctrl->sched.chc3[cc3]; if (slc == NULL) { dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n", cc3); return -EIO; } dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n", (slc - ctrl->chans), slc->newoff, slc->newintr, slc->seglen); if (slc->state != SLIM_CH_PENDING_REMOVAL) { for (sl = slc->newoff; sl < SLIM_SL_PER_SUPERFRAME; sl += slc->newintr) { for (i = 0; i < slc->seglen; i++) { if (ctrl->sched.slots[sl + i]) return -EXFULL; ctrl->sched.slots[sl + i] = cc3 + 1; } } } cc3++; } return 0; } #else static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw, u32 subfrml, u32 clkgear) { return 0; } #endif static void slim_sort_chan_grp(struct slim_controller *ctrl, struct slim_ich *slc) { u8 last = (u8)-1; u8 second = 0; for (; last > 0; last--) { struct slim_ich *slc1 = slc; struct slim_ich *slc2; u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp); slc2 = &ctrl->chans[next]; for (second = 1; second <= last && slc2 && (slc2->state == SLIM_CH_ACTIVE || slc2->state == SLIM_CH_PENDING_ACTIVE); second++) { if (slc1->newoff > slc2->newoff) { u32 temp = slc2->newoff; slc2->newoff = slc1->newoff; slc1->newoff = temp; } if (slc2->nextgrp & SLIM_END_GRP) { last = second; break; } slc1 = slc2; next = SLIM_HDL_TO_CHIDX(slc1->nextgrp); slc2 = &ctrl->chans[next]; } if (slc2 == NULL) last = second - 1; } } static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear) { u32 msgsl = 0; u32 ctrlw = 0; u32 subfrml = 0; int ret = -EIO; struct slim_controller *ctrl = sb->ctrl; u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl; u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME - SLIM_GDE_SLOTS_PER_SUPERFRAME; *clkgear = SLIM_MAX_CLK_GEAR; dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl); dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n", ctrl->sched.usedslots, ctrl->sched.pending_msgsl, *clkgear); /* * If number of slots are 0, that means channels are inactive. * It is very likely that the manager will call clock pause very soon. * By making sure that bus is in MAX_GEAR, clk pause sequence will take * minimum amount of time. */ if (ctrl->sched.usedslots != 0) { while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) { *clkgear -= 1; usedsl *= 2; } } /* * Try scheduling data channels at current clock gear, if all channels * can be scheduled, or reserved BW can't be satisfied, increase clock * gear and try again */ for (; *clkgear <= ctrl->max_cg; (*clkgear)++) { ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml); if (ret == 0) { *subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl); if ((msgsl >> (ctrl->max_cg - *clkgear) < ctrl->sched.pending_msgsl) && (*clkgear < ctrl->max_cg)) continue; else break; } } if (ret == 0) { int i; /* Sort channel-groups */ for (i = 0; i < ctrl->sched.num_cc1; i++) { struct slim_ich *slc = ctrl->sched.chc1[i]; if (slc->state == SLIM_CH_PENDING_REMOVAL) continue; if ((slc->nextgrp & SLIM_START_GRP) && !(slc->nextgrp & SLIM_END_GRP)) { slim_sort_chan_grp(ctrl, slc); } } for (i = 0; i < ctrl->sched.num_cc3; i++) { struct slim_ich *slc = ctrl->sched.chc3[i]; if (slc->state == SLIM_CH_PENDING_REMOVAL) continue; if ((slc->nextgrp & SLIM_START_GRP) && !(slc->nextgrp & SLIM_END_GRP)) { slim_sort_chan_grp(ctrl, slc); } } ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear); } return ret; } static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff) { struct slim_ich **arr; int len, i; if (coeff == SLIM_COEFF_1) { arr = ctrl->sched.chc1; len = ctrl->sched.num_cc1; } else { arr = ctrl->sched.chc3; len = ctrl->sched.num_cc3; } for (i = 0; i < len; i++) { struct slim_ich *slc = arr[i]; if (slc->state == SLIM_CH_ACTIVE || slc->state == SLIM_CH_SUSPENDED) slc->offset = slc->newoff; slc->interval = slc->newintr; } } static void slim_chan_changes(struct slim_device *sb, bool revert) { struct slim_controller *ctrl = sb->ctrl; while (!list_empty(&sb->mark_define)) { struct slim_ich *slc; struct slim_pending_ch *pch = list_entry(sb->mark_define.next, struct slim_pending_ch, pending); slc = &ctrl->chans[pch->chan]; if (revert) { if (slc->state == SLIM_CH_PENDING_ACTIVE) { u32 sl = slc->seglen << slc->rootexp; if (slc->coeff == SLIM_COEFF_3) sl *= 3; if (!ctrl->allocbw) ctrl->sched.usedslots -= sl; slim_remove_ch(ctrl, slc); slc->state = SLIM_CH_DEFINED; } } else { slc->state = SLIM_CH_ACTIVE; slc->def++; } list_del_init(&pch->pending); kfree(pch); } while (!list_empty(&sb->mark_removal)) { struct slim_pending_ch *pch = list_entry(sb->mark_removal.next, struct slim_pending_ch, pending); struct slim_ich *slc = &ctrl->chans[pch->chan]; u32 sl = slc->seglen << slc->rootexp; if (revert || slc->def > 0) { if (slc->coeff == SLIM_COEFF_3) sl *= 3; if (!ctrl->allocbw) ctrl->sched.usedslots += sl; if (revert) slc->def++; slc->state = SLIM_CH_ACTIVE; } else slim_remove_ch(ctrl, slc); list_del_init(&pch->pending); kfree(pch); } while (!list_empty(&sb->mark_suspend)) { struct slim_pending_ch *pch = list_entry(sb->mark_suspend.next, struct slim_pending_ch, pending); struct slim_ich *slc = &ctrl->chans[pch->chan]; if (revert) slc->state = SLIM_CH_ACTIVE; list_del_init(&pch->pending); kfree(pch); } /* Change already active channel if reconfig succeeded */ if (!revert) { slim_change_existing_chans(ctrl, SLIM_COEFF_1); slim_change_existing_chans(ctrl, SLIM_COEFF_3); } } /* * slim_reconfigure_now: Request reconfiguration now. * @sb: client handle * This API does what commit flag in other scheduling APIs do. * -EXFULL is returned if there is no space in TDM to reserve the * bandwidth. -EBUSY is returned if reconfiguration request is already in * progress. */ int slim_reconfigure_now(struct slim_device *sb) { u8 i; u8 wbuf[4]; u32 clkgear, subframe; u32 curexp; int ret; struct slim_controller *ctrl = sb->ctrl; u32 expshft; u32 segdist; struct slim_pending_ch *pch; mutex_lock(&ctrl->sched.m_reconf); /* * If there are no pending changes from this client, avoid sending * the reconfiguration sequence */ if (sb->pending_msgsl == sb->cur_msgsl && list_empty(&sb->mark_define) && list_empty(&sb->mark_suspend)) { struct list_head *pos, *next; list_for_each_safe(pos, next, &sb->mark_removal) { struct slim_ich *slc; pch = list_entry(pos, struct slim_pending_ch, pending); slc = &ctrl->chans[pch->chan]; if (slc->def > 0) slc->def--; /* Disconnect source port to free it up */ if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr) slc->srch = 0; /* * If controller overrides BW allocation, * delete this in remove channel itself */ if (slc->def != 0 && !ctrl->allocbw) { list_del(&pch->pending); kfree(pch); } } if (list_empty(&sb->mark_removal)) { mutex_unlock(&ctrl->sched.m_reconf); pr_info("SLIM_CL: skip reconfig sequence"); return 0; } } ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl; list_for_each_entry(pch, &sb->mark_define, pending) { struct slim_ich *slc = &ctrl->chans[pch->chan]; slim_add_ch(ctrl, slc); if (slc->state < SLIM_CH_ACTIVE) slc->state = SLIM_CH_PENDING_ACTIVE; } list_for_each_entry(pch, &sb->mark_removal, pending) { struct slim_ich *slc = &ctrl->chans[pch->chan]; u32 sl = slc->seglen << slc->rootexp; if (slc->coeff == SLIM_COEFF_3) sl *= 3; if (!ctrl->allocbw) ctrl->sched.usedslots -= sl; slc->state = SLIM_CH_PENDING_REMOVAL; } list_for_each_entry(pch, &sb->mark_suspend, pending) { struct slim_ich *slc = &ctrl->chans[pch->chan]; slc->state = SLIM_CH_SUSPENDED; } /* * Controller can override default channel scheduling algorithm. * (e.g. if controller needs to use fixed channel scheduling based * on number of channels) */ if (ctrl->allocbw) ret = ctrl->allocbw(sb, &subframe, &clkgear); else ret = slim_allocbw(sb, &subframe, &clkgear); if (!ret) { ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL); dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret); } if (!ret && subframe != ctrl->sched.subfrmcode) { wbuf[0] = (u8)(subframe & 0xFF); ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_SUBFRAME_MODE, 0, SLIM_MSG_MT_CORE, NULL, (u8 *)&subframe, 1, 4, NULL, 0, NULL); dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n", (int)wbuf[0], ret); } if (!ret && clkgear != ctrl->clkgear) { wbuf[0] = (u8)(clkgear & 0xFF); ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_CLOCK_GEAR, 0, SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4, NULL, 0, NULL); dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n", (int)wbuf[0], ret); } if (ret) goto revert_reconfig; expshft = SLIM_MAX_CLK_GEAR - clkgear; /* activate/remove channel */ list_for_each_entry(pch, &sb->mark_define, pending) { struct slim_ich *slc = &ctrl->chans[pch->chan]; /* Define content */ wbuf[0] = slc->chan; wbuf[1] = slc->prrate; wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4); wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL; dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n", wbuf[0], wbuf[1], wbuf[2], wbuf[3]); /* Right now, channel link bit is not supported */ ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_DEFINE_CONTENT, 0, SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 4, 7, NULL, 0, NULL); if (ret) goto revert_reconfig; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL, 0, SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 1, 4, NULL, 0, NULL); if (ret) goto revert_reconfig; } list_for_each_entry(pch, &sb->mark_removal, pending) { struct slim_ich *slc = &ctrl->chans[pch->chan]; dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan); wbuf[0] = slc->chan; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_REMOVE_CHANNEL, 0, SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4, NULL, 0, NULL); if (ret) goto revert_reconfig; } list_for_each_entry(pch, &sb->mark_suspend, pending) { struct slim_ich *slc = &ctrl->chans[pch->chan]; dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan); wbuf[0] = slc->chan; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL, 0, SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4, NULL, 0, NULL); if (ret) goto revert_reconfig; } /* Define CC1 channel */ for (i = 0; i < ctrl->sched.num_cc1; i++) { struct slim_ich *slc = ctrl->sched.chc1[i]; if (slc->state == SLIM_CH_PENDING_REMOVAL) continue; curexp = slc->rootexp + expshft; segdist = (slc->newoff << curexp) & 0x1FF; expshft = SLIM_MAX_CLK_GEAR - clkgear; dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n", slc->newintr, slc->interval, segdist); dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n", slc->newoff, slc->offset); if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref || slc->newintr != slc->interval || slc->newoff != slc->offset) { segdist |= 0x200; segdist >>= curexp; segdist |= (slc->newoff << (curexp + 1)) & 0xC00; wbuf[0] = slc->chan; wbuf[1] = (u8)(segdist & 0xFF); wbuf[2] = (u8)((segdist & 0xF00) >> 8) | (slc->prop.prot << 4); wbuf[3] = slc->seglen; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0, SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4, 7, NULL, 0, NULL); if (ret) goto revert_reconfig; } } /* Define CC3 channels */ for (i = 0; i < ctrl->sched.num_cc3; i++) { struct slim_ich *slc = ctrl->sched.chc3[i]; if (slc->state == SLIM_CH_PENDING_REMOVAL) continue; curexp = slc->rootexp + expshft; segdist = (slc->newoff << curexp) & 0x1FF; expshft = SLIM_MAX_CLK_GEAR - clkgear; dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n", slc->newintr, slc->interval, segdist); dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n", slc->newoff, slc->offset); if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref || slc->newintr != slc->interval || slc->newoff != slc->offset) { segdist |= 0x200; segdist >>= curexp; segdist |= 0xC00; wbuf[0] = slc->chan; wbuf[1] = (u8)(segdist & 0xFF); wbuf[2] = (u8)((segdist & 0xF00) >> 8) | (slc->prop.prot << 4); wbuf[3] = (u8)(slc->seglen); ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0, SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4, 7, NULL, 0, NULL); if (ret) goto revert_reconfig; } } ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_MC_RECONFIGURE_NOW, 0, SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL); dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret); if (!ret) { ctrl->sched.subfrmcode = subframe; ctrl->clkgear = clkgear; ctrl->sched.msgsl = ctrl->sched.pending_msgsl; sb->cur_msgsl = sb->pending_msgsl; slim_chan_changes(sb, false); mutex_unlock(&ctrl->sched.m_reconf); return 0; } revert_reconfig: /* Revert channel changes */ slim_chan_changes(sb, true); mutex_unlock(&ctrl->sched.m_reconf); return ret; } EXPORT_SYMBOL_GPL(slim_reconfigure_now); static int add_pending_ch(struct list_head *listh, u8 chan) { struct slim_pending_ch *pch; pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL); if (!pch) return -ENOMEM; pch->chan = chan; list_add_tail(&pch->pending, listh); return 0; } /* * slim_control_ch: Channel control API. * @sb: client handle * @chanh: group or channel handle to be controlled * @chctrl: Control command (activate/suspend/remove) * @commit: flag to indicate whether the control should take effect right-away. * This API activates, removes or suspends a channel (or group of channels) * chanh indicates the channel or group handle (returned by the define_ch API). * Reconfiguration may be time-consuming since it can change all other active * channel allocations on the bus, change in clock gear used by the slimbus, * and change in the control space width used for messaging. * commit makes sure that multiple channels can be activated/deactivated before * reconfiguration is started. * -EXFULL is returned if there is no space in TDM to reserve the bandwidth. * -EISCONN/-ENOTCONN is returned if the channel is already connected or not * yet defined. * -EINVAL is returned if individual control of a grouped-channel is attempted. */ int slim_control_ch(struct slim_device *sb, u16 chanh, enum slim_ch_control chctrl, bool commit) { struct slim_controller *ctrl = sb->ctrl; int ret = 0; /* Get rid of the group flag in MSB if any */ u8 chan = SLIM_HDL_TO_CHIDX(chanh); u8 nchan = 0; struct slim_ich *slc = &ctrl->chans[chan]; if (!(slc->nextgrp & SLIM_START_GRP)) return -EINVAL; mutex_lock(&sb->sldev_reconf); do { struct slim_pending_ch *pch; u8 add_mark_removal = true; slc = &ctrl->chans[chan]; dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl, slc->def); if (slc->state < SLIM_CH_DEFINED) { ret = -ENOTCONN; break; } if (chctrl == SLIM_CH_SUSPEND) { ret = add_pending_ch(&sb->mark_suspend, chan); if (ret) break; } else if (chctrl == SLIM_CH_ACTIVATE) { if (slc->state > SLIM_CH_ACTIVE) { ret = -EISCONN; break; } ret = add_pending_ch(&sb->mark_define, chan); if (ret) break; } else { if (slc->state < SLIM_CH_ACTIVE) { ret = -ENOTCONN; break; } /* If channel removal request comes when pending * in the mark_define, remove it from the define * list instead of adding it to removal list */ if (!list_empty(&sb->mark_define)) { struct list_head *pos, *next; list_for_each_safe(pos, next, &sb->mark_define) { pch = list_entry(pos, struct slim_pending_ch, pending); if (pch->chan == chan) { list_del(&pch->pending); kfree(pch); add_mark_removal = false; break; } } } if (add_mark_removal == true) { ret = add_pending_ch(&sb->mark_removal, chan); if (ret) break; } } nchan++; if (nchan < SLIM_GRP_TO_NCHAN(chanh)) chan = SLIM_HDL_TO_CHIDX(slc->nextgrp); } while (nchan < SLIM_GRP_TO_NCHAN(chanh)); if (!ret && commit == true) ret = slim_reconfigure_now(sb); mutex_unlock(&sb->sldev_reconf); return ret; } EXPORT_SYMBOL_GPL(slim_control_ch); /* * slim_reservemsg_bw: Request to reserve bandwidth for messages. * @sb: client handle * @bw_bps: message bandwidth in bits per second to be requested * @commit: indicates whether the reconfiguration needs to be acted upon. * This API call can be grouped with slim_control_ch API call with only one of * the APIs specifying the commit flag to avoid reconfiguration being called too * frequently. -EXFULL is returned if there is no space in TDM to reserve the * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request * is already in progress. */ int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit) { struct slim_controller *ctrl = sb->ctrl; int ret = 0; int sl; mutex_lock(&sb->sldev_reconf); if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq) sl = SLIM_SL_PER_SUPERFRAME; else { sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) + (ctrl->a_framer->rootfreq/2 - 1)) / (ctrl->a_framer->rootfreq/2); } dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl, sb->cur_msgsl); sb->pending_msgsl = sl; if (commit == true) ret = slim_reconfigure_now(sb); mutex_unlock(&sb->sldev_reconf); return ret; } EXPORT_SYMBOL_GPL(slim_reservemsg_bw); /* * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be * paused or woken up out of clock pause * or woken up from clock pause * @ctrl: controller requesting bus to be paused or woken up * @wakeup: Wakeup this controller from clock pause. * @restart: Restart time value per spec used for clock pause. This value * isn't used when controller is to be woken up. * This API executes clock pause reconfiguration sequence if wakeup is false. * If wakeup is true, controller's wakeup is called * Slimbus clock is idle and can be disabled by the controller later. */ int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart) { int ret = 0; int i; if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED) return -EINVAL; mutex_lock(&ctrl->m_ctrl); if (wakeup) { if (ctrl->clk_state == SLIM_CLK_ACTIVE) { mutex_unlock(&ctrl->m_ctrl); return 0; } wait_for_completion(&ctrl->pause_comp); /* * Slimbus framework will call controller wakeup * Controller should make sure that it sets active framer * out of clock pause by doing appropriate setting */ if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup) ret = ctrl->wakeup(ctrl); /* * If wakeup fails, make sure that next attempt can succeed. * Since we already consumed pause_comp, complete it so * that next wakeup isn't blocked forever */ if (!ret) ctrl->clk_state = SLIM_CLK_ACTIVE; else complete(&ctrl->pause_comp); mutex_unlock(&ctrl->m_ctrl); return ret; } else { switch (ctrl->clk_state) { case SLIM_CLK_ENTERING_PAUSE: case SLIM_CLK_PAUSE_FAILED: /* * If controller is already trying to enter clock pause, * let it finish. * In case of error, retry * In both cases, previous clock pause has signalled * completion. */ wait_for_completion(&ctrl->pause_comp); /* retry upon failure */ if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) { ctrl->clk_state = SLIM_CLK_ACTIVE; break; } else { mutex_unlock(&ctrl->m_ctrl); /* * Signal completion so that wakeup can wait on * it. */ complete(&ctrl->pause_comp); return 0; } break; case SLIM_CLK_PAUSED: /* already paused */ mutex_unlock(&ctrl->m_ctrl); return 0; case SLIM_CLK_ACTIVE: default: break; } } /* Pending response for a message */ for (i = 0; i < ctrl->last_tid; i++) { if (ctrl->txnt[i]) { ret = -EBUSY; pr_info("slim_clk_pause: txn-rsp for %d pending", i); mutex_unlock(&ctrl->m_ctrl); return -EBUSY; } } ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE; mutex_unlock(&ctrl->m_ctrl); mutex_lock(&ctrl->sched.m_reconf); /* Data channels active */ if (ctrl->sched.usedslots) { pr_info("slim_clk_pause: data channel active"); ret = -EBUSY; goto clk_pause_ret; } ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL); if (ret) goto clk_pause_ret; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK, 0, SLIM_MSG_MT_CORE, NULL, &restart, 1, 4, NULL, 0, NULL); if (ret) goto clk_pause_ret; ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST, SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW, 0, SLIM_MSG_MT_CORE, NULL, NULL, 0, 3, NULL, 0, NULL); if (ret) goto clk_pause_ret; clk_pause_ret: if (ret) ctrl->clk_state = SLIM_CLK_PAUSE_FAILED; else ctrl->clk_state = SLIM_CLK_PAUSED; complete(&ctrl->pause_comp); mutex_unlock(&ctrl->sched.m_reconf); return ret; } EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1"); MODULE_DESCRIPTION("Slimbus module"); MODULE_ALIAS("platform:slimbus");
gpl-2.0
mopplayer/OK6410-Kernel4.1.4-With-Ubuntu9.04
arch/arm/common/sa1111.c
429
37449
/* * linux/arch/arm/common/sa1111.c * * SA1111 support * * Original code by John Dorsey * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains all generic SA1111 support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. */ #include <linux/module.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/mach/irq.h> #include <asm/mach-types.h> #include <asm/sizes.h> #include <asm/hardware/sa1111.h> /* SA1111 IRQs */ #define IRQ_GPAIN0 (0) #define IRQ_GPAIN1 (1) #define IRQ_GPAIN2 (2) #define IRQ_GPAIN3 (3) #define IRQ_GPBIN0 (4) #define IRQ_GPBIN1 (5) #define IRQ_GPBIN2 (6) #define IRQ_GPBIN3 (7) #define IRQ_GPBIN4 (8) #define IRQ_GPBIN5 (9) #define IRQ_GPCIN0 (10) #define IRQ_GPCIN1 (11) #define IRQ_GPCIN2 (12) #define IRQ_GPCIN3 (13) #define IRQ_GPCIN4 (14) #define IRQ_GPCIN5 (15) #define IRQ_GPCIN6 (16) #define IRQ_GPCIN7 (17) #define IRQ_MSTXINT (18) #define IRQ_MSRXINT (19) #define IRQ_MSSTOPERRINT (20) #define IRQ_TPTXINT (21) #define IRQ_TPRXINT (22) #define IRQ_TPSTOPERRINT (23) #define SSPXMTINT (24) #define SSPRCVINT (25) #define SSPROR (26) #define AUDXMTDMADONEA (32) #define AUDRCVDMADONEA (33) #define AUDXMTDMADONEB (34) #define AUDRCVDMADONEB (35) #define AUDTFSR (36) #define AUDRFSR (37) #define AUDTUR (38) #define AUDROR (39) #define AUDDTS (40) #define AUDRDD (41) #define AUDSTO (42) #define IRQ_USBPWR (43) #define IRQ_HCIM (44) #define IRQ_HCIBUFFACC (45) #define IRQ_HCIRMTWKP (46) #define IRQ_NHCIMFCIR (47) #define IRQ_USB_PORT_RESUME (48) #define IRQ_S0_READY_NINT (49) #define IRQ_S1_READY_NINT (50) #define IRQ_S0_CD_VALID (51) #define IRQ_S1_CD_VALID (52) #define IRQ_S0_BVD1_STSCHG (53) #define IRQ_S1_BVD1_STSCHG (54) #define SA1111_IRQ_NR (55) extern void sa1110_mb_enable(void); extern void sa1110_mb_disable(void); /* * We keep the following data for the overall SA1111. Note that the * struct device and struct resource are "fake"; they should be supplied * by the bus above us. However, in the interests of getting all SA1111 * drivers converted over to the device model, we provide this as an * anchor point for all the other drivers. */ struct sa1111 { struct device *dev; struct clk *clk; unsigned long phys; int irq; int irq_base; /* base for cascaded on-chip IRQs */ spinlock_t lock; void __iomem *base; struct sa1111_platform_data *pdata; #ifdef CONFIG_PM void *saved_state; #endif }; /* * We _really_ need to eliminate this. Its only users * are the PWM and DMA checking code. */ static struct sa1111 *g_sa1111; struct sa1111_dev_info { unsigned long offset; unsigned long skpcr_mask; bool dma; unsigned int devid; unsigned int irq[6]; }; static struct sa1111_dev_info sa1111_devices[] = { { .offset = SA1111_USB, .skpcr_mask = SKPCR_UCLKEN, .dma = true, .devid = SA1111_DEVID_USB, .irq = { IRQ_USBPWR, IRQ_HCIM, IRQ_HCIBUFFACC, IRQ_HCIRMTWKP, IRQ_NHCIMFCIR, IRQ_USB_PORT_RESUME }, }, { .offset = 0x0600, .skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN, .dma = true, .devid = SA1111_DEVID_SAC, .irq = { AUDXMTDMADONEA, AUDXMTDMADONEB, AUDRCVDMADONEA, AUDRCVDMADONEB }, }, { .offset = 0x0800, .skpcr_mask = SKPCR_SCLKEN, .devid = SA1111_DEVID_SSP, }, { .offset = SA1111_KBD, .skpcr_mask = SKPCR_PTCLKEN, .devid = SA1111_DEVID_PS2_KBD, .irq = { IRQ_TPRXINT, IRQ_TPTXINT }, }, { .offset = SA1111_MSE, .skpcr_mask = SKPCR_PMCLKEN, .devid = SA1111_DEVID_PS2_MSE, .irq = { IRQ_MSRXINT, IRQ_MSTXINT }, }, { .offset = 0x1800, .skpcr_mask = 0, .devid = SA1111_DEVID_PCMCIA, .irq = { IRQ_S0_READY_NINT, IRQ_S0_CD_VALID, IRQ_S0_BVD1_STSCHG, IRQ_S1_READY_NINT, IRQ_S1_CD_VALID, IRQ_S1_BVD1_STSCHG, }, }, }; /* * SA1111 interrupt support. Since clearing an IRQ while there are * active IRQs causes the interrupt output to pulse, the upper levels * will call us again if there are more interrupts to process. */ static void sa1111_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned int stat0, stat1, i; struct sa1111 *sachip = irq_get_handler_data(irq); void __iomem *mapbase = sachip->base + SA1111_INTC; stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1); sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0); desc->irq_data.chip->irq_ack(&desc->irq_data); sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); if (stat0 == 0 && stat1 == 0) { do_bad_IRQ(irq, desc); return; } for (i = 0; stat0; i++, stat0 >>= 1) if (stat0 & 1) generic_handle_irq(i + sachip->irq_base); for (i = 32; stat1; i++, stat1 >>= 1) if (stat1 & 1) generic_handle_irq(i + sachip->irq_base); /* For level-based interrupts */ desc->irq_data.chip->irq_unmask(&desc->irq_data); } #define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base)) #define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32)) static void sa1111_ack_irq(struct irq_data *d) { } static void sa1111_mask_lowirq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned long ie0; ie0 = sa1111_readl(mapbase + SA1111_INTEN0); ie0 &= ~SA1111_IRQMASK_LO(d->irq); writel(ie0, mapbase + SA1111_INTEN0); } static void sa1111_unmask_lowirq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned long ie0; ie0 = sa1111_readl(mapbase + SA1111_INTEN0); ie0 |= SA1111_IRQMASK_LO(d->irq); sa1111_writel(ie0, mapbase + SA1111_INTEN0); } /* * Attempt to re-trigger the interrupt. The SA1111 contains a register * (INTSET) which claims to do this. However, in practice no amount of * manipulation of INTEN and INTSET guarantees that the interrupt will * be triggered. In fact, its very difficult, if not impossible to get * INTSET to re-trigger the interrupt. */ static int sa1111_retrigger_lowirq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned int mask = SA1111_IRQMASK_LO(d->irq); unsigned long ip0; int i; ip0 = sa1111_readl(mapbase + SA1111_INTPOL0); for (i = 0; i < 8; i++) { sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0); sa1111_writel(ip0, mapbase + SA1111_INTPOL0); if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask) break; } if (i == 8) pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", d->irq); return i == 8 ? -1 : 0; } static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned int mask = SA1111_IRQMASK_LO(d->irq); unsigned long ip0; if (flags == IRQ_TYPE_PROBE) return 0; if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0) return -EINVAL; ip0 = sa1111_readl(mapbase + SA1111_INTPOL0); if (flags & IRQ_TYPE_EDGE_RISING) ip0 &= ~mask; else ip0 |= mask; sa1111_writel(ip0, mapbase + SA1111_INTPOL0); sa1111_writel(ip0, mapbase + SA1111_WAKEPOL0); return 0; } static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned int mask = SA1111_IRQMASK_LO(d->irq); unsigned long we0; we0 = sa1111_readl(mapbase + SA1111_WAKEEN0); if (on) we0 |= mask; else we0 &= ~mask; sa1111_writel(we0, mapbase + SA1111_WAKEEN0); return 0; } static struct irq_chip sa1111_low_chip = { .name = "SA1111-l", .irq_ack = sa1111_ack_irq, .irq_mask = sa1111_mask_lowirq, .irq_unmask = sa1111_unmask_lowirq, .irq_retrigger = sa1111_retrigger_lowirq, .irq_set_type = sa1111_type_lowirq, .irq_set_wake = sa1111_wake_lowirq, }; static void sa1111_mask_highirq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned long ie1; ie1 = sa1111_readl(mapbase + SA1111_INTEN1); ie1 &= ~SA1111_IRQMASK_HI(d->irq); sa1111_writel(ie1, mapbase + SA1111_INTEN1); } static void sa1111_unmask_highirq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned long ie1; ie1 = sa1111_readl(mapbase + SA1111_INTEN1); ie1 |= SA1111_IRQMASK_HI(d->irq); sa1111_writel(ie1, mapbase + SA1111_INTEN1); } /* * Attempt to re-trigger the interrupt. The SA1111 contains a register * (INTSET) which claims to do this. However, in practice no amount of * manipulation of INTEN and INTSET guarantees that the interrupt will * be triggered. In fact, its very difficult, if not impossible to get * INTSET to re-trigger the interrupt. */ static int sa1111_retrigger_highirq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned int mask = SA1111_IRQMASK_HI(d->irq); unsigned long ip1; int i; ip1 = sa1111_readl(mapbase + SA1111_INTPOL1); for (i = 0; i < 8; i++) { sa1111_writel(ip1 ^ mask, mapbase + SA1111_INTPOL1); sa1111_writel(ip1, mapbase + SA1111_INTPOL1); if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask) break; } if (i == 8) pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", d->irq); return i == 8 ? -1 : 0; } static int sa1111_type_highirq(struct irq_data *d, unsigned int flags) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned int mask = SA1111_IRQMASK_HI(d->irq); unsigned long ip1; if (flags == IRQ_TYPE_PROBE) return 0; if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0) return -EINVAL; ip1 = sa1111_readl(mapbase + SA1111_INTPOL1); if (flags & IRQ_TYPE_EDGE_RISING) ip1 &= ~mask; else ip1 |= mask; sa1111_writel(ip1, mapbase + SA1111_INTPOL1); sa1111_writel(ip1, mapbase + SA1111_WAKEPOL1); return 0; } static int sa1111_wake_highirq(struct irq_data *d, unsigned int on) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC; unsigned int mask = SA1111_IRQMASK_HI(d->irq); unsigned long we1; we1 = sa1111_readl(mapbase + SA1111_WAKEEN1); if (on) we1 |= mask; else we1 &= ~mask; sa1111_writel(we1, mapbase + SA1111_WAKEEN1); return 0; } static struct irq_chip sa1111_high_chip = { .name = "SA1111-h", .irq_ack = sa1111_ack_irq, .irq_mask = sa1111_mask_highirq, .irq_unmask = sa1111_unmask_highirq, .irq_retrigger = sa1111_retrigger_highirq, .irq_set_type = sa1111_type_highirq, .irq_set_wake = sa1111_wake_highirq, }; static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) { void __iomem *irqbase = sachip->base + SA1111_INTC; unsigned i, irq; int ret; /* * We're guaranteed that this region hasn't been taken. */ request_mem_region(sachip->phys + SA1111_INTC, 512, "irq"); ret = irq_alloc_descs(-1, irq_base, SA1111_IRQ_NR, -1); if (ret <= 0) { dev_err(sachip->dev, "unable to allocate %u irqs: %d\n", SA1111_IRQ_NR, ret); if (ret == 0) ret = -EINVAL; return ret; } sachip->irq_base = ret; /* disable all IRQs */ sa1111_writel(0, irqbase + SA1111_INTEN0); sa1111_writel(0, irqbase + SA1111_INTEN1); sa1111_writel(0, irqbase + SA1111_WAKEEN0); sa1111_writel(0, irqbase + SA1111_WAKEEN1); /* * detect on rising edge. Note: Feb 2001 Errata for SA1111 * specifies that S0ReadyInt and S1ReadyInt should be '1'. */ sa1111_writel(0, irqbase + SA1111_INTPOL0); sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) | SA1111_IRQMASK_HI(IRQ_S1_READY_NINT), irqbase + SA1111_INTPOL1); /* clear all IRQs */ sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0); sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); for (i = IRQ_GPAIN0; i <= SSPROR; i++) { irq = sachip->irq_base + i; irq_set_chip_and_handler(irq, &sa1111_low_chip, handle_edge_irq); irq_set_chip_data(irq, sachip); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) { irq = sachip->irq_base + i; irq_set_chip_and_handler(irq, &sa1111_high_chip, handle_edge_irq); irq_set_chip_data(irq, sachip); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } /* * Register SA1111 interrupt */ irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); irq_set_handler_data(sachip->irq, sachip); irq_set_chained_handler(sachip->irq, sa1111_irq_handler); dev_info(sachip->dev, "Providing IRQ%u-%u\n", sachip->irq_base, sachip->irq_base + SA1111_IRQ_NR - 1); return 0; } /* * Bring the SA1111 out of reset. This requires a set procedure: * 1. nRESET asserted (by hardware) * 2. CLK turned on from SA1110 * 3. nRESET deasserted * 4. VCO turned on, PLL_BYPASS turned off * 5. Wait lock time, then assert RCLKEn * 7. PCR set to allow clocking of individual functions * * Until we've done this, the only registers we can access are: * SBI_SKCR * SBI_SMCR * SBI_SKID */ static void sa1111_wake(struct sa1111 *sachip) { unsigned long flags, r; spin_lock_irqsave(&sachip->lock, flags); clk_enable(sachip->clk); /* * Turn VCO on, and disable PLL Bypass. */ r = sa1111_readl(sachip->base + SA1111_SKCR); r &= ~SKCR_VCO_OFF; sa1111_writel(r, sachip->base + SA1111_SKCR); r |= SKCR_PLL_BYPASS | SKCR_OE_EN; sa1111_writel(r, sachip->base + SA1111_SKCR); /* * Wait lock time. SA1111 manual _doesn't_ * specify a figure for this! We choose 100us. */ udelay(100); /* * Enable RCLK. We also ensure that RDYEN is set. */ r |= SKCR_RCLKEN | SKCR_RDYEN; sa1111_writel(r, sachip->base + SA1111_SKCR); /* * Wait 14 RCLK cycles for the chip to finish coming out * of reset. (RCLK=24MHz). This is 590ns. */ udelay(1); /* * Ensure all clocks are initially off. */ sa1111_writel(0, sachip->base + SA1111_SKPCR); spin_unlock_irqrestore(&sachip->lock, flags); } #ifdef CONFIG_ARCH_SA1100 static u32 sa1111_dma_mask[] = { ~0, ~(1 << 20), ~(1 << 23), ~(1 << 24), ~(1 << 25), ~(1 << 20), ~(1 << 20), 0, }; /* * Configure the SA1111 shared memory controller. */ void sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, unsigned int cas_latency) { unsigned int smcr = SMCR_DTIM | SMCR_MBGE | FInsrt(drac, SMCR_DRAC); if (cas_latency == 3) smcr |= SMCR_CLAT; sa1111_writel(smcr, sachip->base + SA1111_SMCR); /* * Now clear the bits in the DMA mask to work around the SA1111 * DMA erratum (Intel StrongARM SA-1111 Microprocessor Companion * Chip Specification Update, June 2000, Erratum #7). */ if (sachip->dev->dma_mask) *sachip->dev->dma_mask &= sa1111_dma_mask[drac >> 2]; sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; } #endif static void sa1111_dev_release(struct device *_dev) { struct sa1111_dev *dev = SA1111_DEV(_dev); kfree(dev); } static int sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, struct sa1111_dev_info *info) { struct sa1111_dev *dev; unsigned i; int ret; dev = kzalloc(sizeof(struct sa1111_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err_alloc; } device_initialize(&dev->dev); dev_set_name(&dev->dev, "%4.4lx", info->offset); dev->devid = info->devid; dev->dev.parent = sachip->dev; dev->dev.bus = &sa1111_bus_type; dev->dev.release = sa1111_dev_release; dev->res.start = sachip->phys + info->offset; dev->res.end = dev->res.start + 511; dev->res.name = dev_name(&dev->dev); dev->res.flags = IORESOURCE_MEM; dev->mapbase = sachip->base + info->offset; dev->skpcr_mask = info->skpcr_mask; for (i = 0; i < ARRAY_SIZE(info->irq); i++) dev->irq[i] = sachip->irq_base + info->irq[i]; /* * If the parent device has a DMA mask associated with it, and * this child supports DMA, propagate it down to the children. */ if (info->dma && sachip->dev->dma_mask) { dev->dma_mask = *sachip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; dev->dev.coherent_dma_mask = sachip->dev->coherent_dma_mask; } ret = request_resource(parent, &dev->res); if (ret) { dev_err(sachip->dev, "failed to allocate resource for %s\n", dev->res.name); goto err_resource; } ret = device_add(&dev->dev); if (ret) goto err_add; return 0; err_add: release_resource(&dev->res); err_resource: put_device(&dev->dev); err_alloc: return ret; } /** * sa1111_probe - probe for a single SA1111 chip. * @phys_addr: physical address of device. * * Probe for a SA1111 chip. This must be called * before any other SA1111-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %-EINVAL no platform data passed * %0 successful. */ static int __sa1111_probe(struct device *me, struct resource *mem, int irq) { struct sa1111_platform_data *pd = me->platform_data; struct sa1111 *sachip; unsigned long id; unsigned int has_devs; int i, ret = -ENODEV; if (!pd) return -EINVAL; sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL); if (!sachip) return -ENOMEM; sachip->clk = clk_get(me, "SA1111_CLK"); if (IS_ERR(sachip->clk)) { ret = PTR_ERR(sachip->clk); goto err_free; } ret = clk_prepare(sachip->clk); if (ret) goto err_clkput; spin_lock_init(&sachip->lock); sachip->dev = me; dev_set_drvdata(sachip->dev, sachip); sachip->pdata = pd; sachip->phys = mem->start; sachip->irq = irq; /* * Map the whole region. This also maps the * registers for our children. */ sachip->base = ioremap(mem->start, PAGE_SIZE * 2); if (!sachip->base) { ret = -ENOMEM; goto err_clk_unprep; } /* * Probe for the chip. Only touch the SBI registers. */ id = sa1111_readl(sachip->base + SA1111_SKID); if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id); ret = -ENODEV; goto err_unmap; } pr_info("SA1111 Microprocessor Companion Chip: silicon revision %lx, metal revision %lx\n", (id & SKID_SIREV_MASK) >> 4, id & SKID_MTREV_MASK); /* * We found it. Wake the chip up, and initialise. */ sa1111_wake(sachip); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (sachip->irq != NO_IRQ) { ret = sa1111_setup_irq(sachip, pd->irq_base); if (ret) goto err_unmap; } #ifdef CONFIG_ARCH_SA1100 { unsigned int val; /* * The SDRAM configuration of the SA1110 and the SA1111 must * match. This is very important to ensure that SA1111 accesses * don't corrupt the SDRAM. Note that this ungates the SA1111's * MBGNT signal, so we must have called sa1110_mb_disable() * beforehand. */ sa1111_configure_smc(sachip, 1, FExtr(MDCNFG, MDCNFG_SA1110_DRAC0), FExtr(MDCNFG, MDCNFG_SA1110_TDL0)); /* * We only need to turn on DCLK whenever we want to use the * DMA. It can otherwise be held firmly in the off position. * (currently, we always enable it.) */ val = sa1111_readl(sachip->base + SA1111_SKPCR); sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR); /* * Enable the SA1110 memory bus request and grant signals. */ sa1110_mb_enable(); } #endif g_sa1111 = sachip; has_devs = ~0; if (pd) has_devs &= ~pd->disable_devs; for (i = 0; i < ARRAY_SIZE(sa1111_devices); i++) if (sa1111_devices[i].devid & has_devs) sa1111_init_one_child(sachip, mem, &sa1111_devices[i]); return 0; err_unmap: iounmap(sachip->base); err_clk_unprep: clk_unprepare(sachip->clk); err_clkput: clk_put(sachip->clk); err_free: kfree(sachip); return ret; } static int sa1111_remove_one(struct device *dev, void *data) { struct sa1111_dev *sadev = SA1111_DEV(dev); device_del(&sadev->dev); release_resource(&sadev->res); put_device(&sadev->dev); return 0; } static void __sa1111_remove(struct sa1111 *sachip) { void __iomem *irqbase = sachip->base + SA1111_INTC; device_for_each_child(sachip->dev, NULL, sa1111_remove_one); /* disable all IRQs */ sa1111_writel(0, irqbase + SA1111_INTEN0); sa1111_writel(0, irqbase + SA1111_INTEN1); sa1111_writel(0, irqbase + SA1111_WAKEEN0); sa1111_writel(0, irqbase + SA1111_WAKEEN1); clk_disable(sachip->clk); clk_unprepare(sachip->clk); if (sachip->irq != NO_IRQ) { irq_set_chained_handler(sachip->irq, NULL); irq_set_handler_data(sachip->irq, NULL); irq_free_descs(sachip->irq_base, SA1111_IRQ_NR); release_mem_region(sachip->phys + SA1111_INTC, 512); } iounmap(sachip->base); clk_put(sachip->clk); kfree(sachip); } struct sa1111_save_data { unsigned int skcr; unsigned int skpcr; unsigned int skcdr; unsigned char skaud; unsigned char skpwm0; unsigned char skpwm1; /* * Interrupt controller */ unsigned int intpol0; unsigned int intpol1; unsigned int inten0; unsigned int inten1; unsigned int wakepol0; unsigned int wakepol1; unsigned int wakeen0; unsigned int wakeen1; }; #ifdef CONFIG_PM static int sa1111_suspend(struct platform_device *dev, pm_message_t state) { struct sa1111 *sachip = platform_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags; unsigned int val; void __iomem *base; save = kmalloc(sizeof(struct sa1111_save_data), GFP_KERNEL); if (!save) return -ENOMEM; sachip->saved_state = save; spin_lock_irqsave(&sachip->lock, flags); /* * Save state. */ base = sachip->base; save->skcr = sa1111_readl(base + SA1111_SKCR); save->skpcr = sa1111_readl(base + SA1111_SKPCR); save->skcdr = sa1111_readl(base + SA1111_SKCDR); save->skaud = sa1111_readl(base + SA1111_SKAUD); save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0); save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1); sa1111_writel(0, sachip->base + SA1111_SKPWM0); sa1111_writel(0, sachip->base + SA1111_SKPWM1); base = sachip->base + SA1111_INTC; save->intpol0 = sa1111_readl(base + SA1111_INTPOL0); save->intpol1 = sa1111_readl(base + SA1111_INTPOL1); save->inten0 = sa1111_readl(base + SA1111_INTEN0); save->inten1 = sa1111_readl(base + SA1111_INTEN1); save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0); save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1); save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0); save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1); /* * Disable. */ val = sa1111_readl(sachip->base + SA1111_SKCR); sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR); clk_disable(sachip->clk); spin_unlock_irqrestore(&sachip->lock, flags); #ifdef CONFIG_ARCH_SA1100 sa1110_mb_disable(); #endif return 0; } /* * sa1111_resume - Restore the SA1111 device state. * @dev: device to restore * * Restore the general state of the SA1111; clock control and * interrupt controller. Other parts of the SA1111 must be * restored by their respective drivers, and must be called * via LDM after this function. */ static int sa1111_resume(struct platform_device *dev) { struct sa1111 *sachip = platform_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags, id; void __iomem *base; save = sachip->saved_state; if (!save) return 0; /* * Ensure that the SA1111 is still here. * FIXME: shouldn't do this here. */ id = sa1111_readl(sachip->base + SA1111_SKID); if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { __sa1111_remove(sachip); platform_set_drvdata(dev, NULL); kfree(save); return 0; } /* * First of all, wake up the chip. */ sa1111_wake(sachip); #ifdef CONFIG_ARCH_SA1100 /* Enable the memory bus request/grant signals */ sa1110_mb_enable(); #endif /* * Only lock for write ops. Also, sa1111_wake must be called with * released spinlock! */ spin_lock_irqsave(&sachip->lock, flags); sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0); sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1); base = sachip->base; sa1111_writel(save->skcr, base + SA1111_SKCR); sa1111_writel(save->skpcr, base + SA1111_SKPCR); sa1111_writel(save->skcdr, base + SA1111_SKCDR); sa1111_writel(save->skaud, base + SA1111_SKAUD); sa1111_writel(save->skpwm0, base + SA1111_SKPWM0); sa1111_writel(save->skpwm1, base + SA1111_SKPWM1); base = sachip->base + SA1111_INTC; sa1111_writel(save->intpol0, base + SA1111_INTPOL0); sa1111_writel(save->intpol1, base + SA1111_INTPOL1); sa1111_writel(save->inten0, base + SA1111_INTEN0); sa1111_writel(save->inten1, base + SA1111_INTEN1); sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0); sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1); sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0); sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1); spin_unlock_irqrestore(&sachip->lock, flags); sachip->saved_state = NULL; kfree(save); return 0; } #else #define sa1111_suspend NULL #define sa1111_resume NULL #endif static int sa1111_probe(struct platform_device *pdev) { struct resource *mem; int irq; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENXIO; return __sa1111_probe(&pdev->dev, mem, irq); } static int sa1111_remove(struct platform_device *pdev) { struct sa1111 *sachip = platform_get_drvdata(pdev); if (sachip) { #ifdef CONFIG_PM kfree(sachip->saved_state); sachip->saved_state = NULL; #endif __sa1111_remove(sachip); platform_set_drvdata(pdev, NULL); } return 0; } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. * * We also need to handle the SDRAM configuration for * PXA250/SA1110 machine classes. */ static struct platform_driver sa1111_device_driver = { .probe = sa1111_probe, .remove = sa1111_remove, .suspend = sa1111_suspend, .resume = sa1111_resume, .driver = { .name = "sa1111", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct sa1111 *sa1111_chip_driver(struct sa1111_dev *sadev) { return (struct sa1111 *)dev_get_drvdata(sadev->dev.parent); } /* * The bits in the opdiv field are non-linear. */ static unsigned char opdiv_table[] = { 1, 4, 2, 8 }; static unsigned int __sa1111_pll_clock(struct sa1111 *sachip) { unsigned int skcdr, fbdiv, ipdiv, opdiv; skcdr = sa1111_readl(sachip->base + SA1111_SKCDR); fbdiv = (skcdr & 0x007f) + 2; ipdiv = ((skcdr & 0x0f80) >> 7) + 2; opdiv = opdiv_table[(skcdr & 0x3000) >> 12]; return 3686400 * fbdiv / (ipdiv * opdiv); } /** * sa1111_pll_clock - return the current PLL clock frequency. * @sadev: SA1111 function block * * BUG: we should look at SKCR. We also blindly believe that * the chip is being fed with the 3.6864MHz clock. * * Returns the PLL clock in Hz. */ unsigned int sa1111_pll_clock(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); return __sa1111_pll_clock(sachip); } EXPORT_SYMBOL(sa1111_pll_clock); /** * sa1111_select_audio_mode - select I2S or AC link mode * @sadev: SA1111 function block * @mode: One of %SA1111_AUDIO_ACLINK or %SA1111_AUDIO_I2S * * Frob the SKCR to select AC Link mode or I2S mode for * the audio block. */ void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; spin_lock_irqsave(&sachip->lock, flags); val = sa1111_readl(sachip->base + SA1111_SKCR); if (mode == SA1111_AUDIO_I2S) { val &= ~SKCR_SELAC; } else { val |= SKCR_SELAC; } sa1111_writel(val, sachip->base + SA1111_SKCR); spin_unlock_irqrestore(&sachip->lock, flags); } EXPORT_SYMBOL(sa1111_select_audio_mode); /** * sa1111_set_audio_rate - set the audio sample rate * @sadev: SA1111 SAC function block * @rate: sample rate to select */ int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned int div; if (sadev->devid != SA1111_DEVID_SAC) return -EINVAL; div = (__sa1111_pll_clock(sachip) / 256 + rate / 2) / rate; if (div == 0) div = 1; if (div > 128) div = 128; sa1111_writel(div - 1, sachip->base + SA1111_SKAUD); return 0; } EXPORT_SYMBOL(sa1111_set_audio_rate); /** * sa1111_get_audio_rate - get the audio sample rate * @sadev: SA1111 SAC function block device */ int sa1111_get_audio_rate(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long div; if (sadev->devid != SA1111_DEVID_SAC) return -EINVAL; div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1; return __sa1111_pll_clock(sachip) / (256 * div); } EXPORT_SYMBOL(sa1111_get_audio_rate); void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int dir, unsigned int sleep_dir) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; void __iomem *gpio = sachip->base + SA1111_GPIO; #define MODIFY_BITS(port, mask, dir) \ if (mask) { \ val = sa1111_readl(port); \ val &= ~(mask); \ val |= (dir) & (mask); \ sa1111_writel(val, port); \ } spin_lock_irqsave(&sachip->lock, flags); MODIFY_BITS(gpio + SA1111_GPIO_PADDR, bits & 15, dir); MODIFY_BITS(gpio + SA1111_GPIO_PBDDR, (bits >> 8) & 255, dir >> 8); MODIFY_BITS(gpio + SA1111_GPIO_PCDDR, (bits >> 16) & 255, dir >> 16); MODIFY_BITS(gpio + SA1111_GPIO_PASDR, bits & 15, sleep_dir); MODIFY_BITS(gpio + SA1111_GPIO_PBSDR, (bits >> 8) & 255, sleep_dir >> 8); MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16); spin_unlock_irqrestore(&sachip->lock, flags); } EXPORT_SYMBOL(sa1111_set_io_dir); void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; void __iomem *gpio = sachip->base + SA1111_GPIO; spin_lock_irqsave(&sachip->lock, flags); MODIFY_BITS(gpio + SA1111_GPIO_PADWR, bits & 15, v); MODIFY_BITS(gpio + SA1111_GPIO_PBDWR, (bits >> 8) & 255, v >> 8); MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16); spin_unlock_irqrestore(&sachip->lock, flags); } EXPORT_SYMBOL(sa1111_set_io); void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; void __iomem *gpio = sachip->base + SA1111_GPIO; spin_lock_irqsave(&sachip->lock, flags); MODIFY_BITS(gpio + SA1111_GPIO_PASSR, bits & 15, v); MODIFY_BITS(gpio + SA1111_GPIO_PBSSR, (bits >> 8) & 255, v >> 8); MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16); spin_unlock_irqrestore(&sachip->lock, flags); } EXPORT_SYMBOL(sa1111_set_sleep_io); /* * Individual device operations. */ /** * sa1111_enable_device - enable an on-chip SA1111 function block * @sadev: SA1111 function block device to enable */ int sa1111_enable_device(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; int ret = 0; if (sachip->pdata && sachip->pdata->enable) ret = sachip->pdata->enable(sachip->pdata->data, sadev->devid); if (ret == 0) { spin_lock_irqsave(&sachip->lock, flags); val = sa1111_readl(sachip->base + SA1111_SKPCR); sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR); spin_unlock_irqrestore(&sachip->lock, flags); } return ret; } EXPORT_SYMBOL(sa1111_enable_device); /** * sa1111_disable_device - disable an on-chip SA1111 function block * @sadev: SA1111 function block device to disable */ void sa1111_disable_device(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; spin_lock_irqsave(&sachip->lock, flags); val = sa1111_readl(sachip->base + SA1111_SKPCR); sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR); spin_unlock_irqrestore(&sachip->lock, flags); if (sachip->pdata && sachip->pdata->disable) sachip->pdata->disable(sachip->pdata->data, sadev->devid); } EXPORT_SYMBOL(sa1111_disable_device); /* * SA1111 "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int sa1111_match(struct device *_dev, struct device_driver *_drv) { struct sa1111_dev *dev = SA1111_DEV(_dev); struct sa1111_driver *drv = SA1111_DRV(_drv); return dev->devid & drv->devid; } static int sa1111_bus_suspend(struct device *dev, pm_message_t state) { struct sa1111_dev *sadev = SA1111_DEV(dev); struct sa1111_driver *drv = SA1111_DRV(dev->driver); int ret = 0; if (drv && drv->suspend) ret = drv->suspend(sadev, state); return ret; } static int sa1111_bus_resume(struct device *dev) { struct sa1111_dev *sadev = SA1111_DEV(dev); struct sa1111_driver *drv = SA1111_DRV(dev->driver); int ret = 0; if (drv && drv->resume) ret = drv->resume(sadev); return ret; } static void sa1111_bus_shutdown(struct device *dev) { struct sa1111_driver *drv = SA1111_DRV(dev->driver); if (drv && drv->shutdown) drv->shutdown(SA1111_DEV(dev)); } static int sa1111_bus_probe(struct device *dev) { struct sa1111_dev *sadev = SA1111_DEV(dev); struct sa1111_driver *drv = SA1111_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(sadev); return ret; } static int sa1111_bus_remove(struct device *dev) { struct sa1111_dev *sadev = SA1111_DEV(dev); struct sa1111_driver *drv = SA1111_DRV(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(sadev); return ret; } struct bus_type sa1111_bus_type = { .name = "sa1111-rab", .match = sa1111_match, .probe = sa1111_bus_probe, .remove = sa1111_bus_remove, .suspend = sa1111_bus_suspend, .resume = sa1111_bus_resume, .shutdown = sa1111_bus_shutdown, }; EXPORT_SYMBOL(sa1111_bus_type); int sa1111_driver_register(struct sa1111_driver *driver) { driver->drv.bus = &sa1111_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(sa1111_driver_register); void sa1111_driver_unregister(struct sa1111_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(sa1111_driver_unregister); #ifdef CONFIG_DMABOUNCE /* * According to the "Intel StrongARM SA-1111 Microprocessor Companion * Chip Specification Update" (June 2000), erratum #7, there is a * significant bug in the SA1111 SDRAM shared memory controller. If * an access to a region of memory above 1MB relative to the bank base, * it is important that address bit 10 _NOT_ be asserted. Depending * on the configuration of the RAM, bit 10 may correspond to one * of several different (processor-relative) address bits. * * This routine only identifies whether or not a given DMA address * is susceptible to the bug. * * This should only get called for sa1111_device types due to the * way we configure our device dma_masks. */ static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) { /* * Section 4.6 of the "Intel StrongARM SA-1111 Development Module * User's Guide" mentions that jumpers R51 and R52 control the * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or * SDRAM bank 1 on Neponset). The default configuration selects * Assabet, so any address in bank 1 is necessarily invalid. */ return (machine_is_assabet() || machine_is_pfs168()) && (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); } static int sa1111_notifier_call(struct notifier_block *n, unsigned long action, void *data) { struct sa1111_dev *dev = SA1111_DEV(data); switch (action) { case BUS_NOTIFY_ADD_DEVICE: if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) { int ret = dmabounce_register_dev(&dev->dev, 1024, 4096, sa1111_needs_bounce); if (ret) dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret); } break; case BUS_NOTIFY_DEL_DEVICE: if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) dmabounce_unregister_dev(&dev->dev); break; } return NOTIFY_OK; } static struct notifier_block sa1111_bus_notifier = { .notifier_call = sa1111_notifier_call, }; #endif static int __init sa1111_init(void) { int ret = bus_register(&sa1111_bus_type); #ifdef CONFIG_DMABOUNCE if (ret == 0) bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier); #endif if (ret == 0) platform_driver_register(&sa1111_device_driver); return ret; } static void __exit sa1111_exit(void) { platform_driver_unregister(&sa1111_device_driver); #ifdef CONFIG_DMABOUNCE bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier); #endif bus_unregister(&sa1111_bus_type); } subsys_initcall(sa1111_init); module_exit(sa1111_exit); MODULE_DESCRIPTION("Intel Corporation SA1111 core driver"); MODULE_LICENSE("GPL");
gpl-2.0
aeroevan/vivo_w-kernel
drivers/media/video/vp27smpx.c
941
5408
/* * vp27smpx - driver version 0.0.1 * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * * Based on a tvaudio patch from Takahiro Adachi <tadachi@tadachi-net.com> * and Kazuhiko Kawakami <kazz-0@mail.goo.ne.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-i2c-drv.h> MODULE_DESCRIPTION("vp27smpx driver"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); /* ----------------------------------------------------------------------- */ struct vp27smpx_state { struct v4l2_subdev sd; int radio; u32 audmode; }; static inline struct vp27smpx_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct vp27smpx_state, sd); } static void vp27smpx_set_audmode(struct v4l2_subdev *sd, u32 audmode) { struct vp27smpx_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 data[3] = { 0x00, 0x00, 0x04 }; switch (audmode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_LANG1: break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1_LANG2: data[1] = 0x01; break; case V4L2_TUNER_MODE_LANG2: data[1] = 0x02; break; } if (i2c_master_send(client, data, sizeof(data)) != sizeof(data)) v4l2_err(sd, "I/O error setting audmode\n"); else state->audmode = audmode; } static int vp27smpx_s_radio(struct v4l2_subdev *sd) { struct vp27smpx_state *state = to_state(sd); state->radio = 1; return 0; } static int vp27smpx_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct vp27smpx_state *state = to_state(sd); state->radio = 0; return 0; } static int vp27smpx_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct vp27smpx_state *state = to_state(sd); if (!state->radio) vp27smpx_set_audmode(sd, vt->audmode); return 0; } static int vp27smpx_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct vp27smpx_state *state = to_state(sd); if (state->radio) return 0; vt->audmode = state->audmode; vt->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; vt->rxsubchans = V4L2_TUNER_SUB_MONO; return 0; } static int vp27smpx_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_VP27SMPX, 0); } static int vp27smpx_log_status(struct v4l2_subdev *sd) { struct vp27smpx_state *state = to_state(sd); v4l2_info(sd, "Audio Mode: %u%s\n", state->audmode, state->radio ? " (Radio)" : ""); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops vp27smpx_core_ops = { .log_status = vp27smpx_log_status, .g_chip_ident = vp27smpx_g_chip_ident, .s_std = vp27smpx_s_std, }; static const struct v4l2_subdev_tuner_ops vp27smpx_tuner_ops = { .s_radio = vp27smpx_s_radio, .s_tuner = vp27smpx_s_tuner, .g_tuner = vp27smpx_g_tuner, }; static const struct v4l2_subdev_ops vp27smpx_ops = { .core = &vp27smpx_core_ops, .tuner = &vp27smpx_tuner_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ static int vp27smpx_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct vp27smpx_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct vp27smpx_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &vp27smpx_ops); state->audmode = V4L2_TUNER_MODE_STEREO; /* initialize vp27smpx */ vp27smpx_set_audmode(sd, state->audmode); return 0; } static int vp27smpx_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id vp27smpx_id[] = { { "vp27smpx", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, vp27smpx_id); static struct v4l2_i2c_driver_data v4l2_i2c_data = { .name = "vp27smpx", .probe = vp27smpx_probe, .remove = vp27smpx_remove, .id_table = vp27smpx_id, };
gpl-2.0
OptimusG-Dev-Team/caf-clean-kernel
drivers/hwmon/qpnp-adc-common.c
1197
18127
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/of.h> #include <linux/err.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/hwmon.h> #include <linux/module.h> #include <linux/debugfs.h> #include <linux/spmi.h> #include <linux/of_irq.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/qpnp/qpnp-adc.h> #include <linux/platform_device.h> /* Min ADC code represets 0V */ #define QPNP_VADC_MIN_ADC_CODE 0x6000 /* Max ADC code represents full-scale range of 1.8V */ #define QPNP_VADC_MAX_ADC_CODE 0xA800 #define KELVINMIL_DEGMIL 273160 /* Units for temperature below (on x axis) is in 0.1DegC as required by the battery driver. Note the resolution used here to compute the table was done for DegC to milli-volts. In consideration to limit the size of the table for the given temperature range below, the result is linearly interpolated and provided to the battery driver in the units desired for their framework which is 0.1DegC. True resolution of 0.1DegC will result in the below table size to increase by 10 times */ static const struct qpnp_vadc_map_pt adcmap_btm_threshold[] = { {-300, 1642}, {-200, 1544}, {-100, 1414}, {0, 1260}, {10, 1244}, {20, 1228}, {30, 1212}, {40, 1195}, {50, 1179}, {60, 1162}, {70, 1146}, {80, 1129}, {90, 1113}, {100, 1097}, {110, 1080}, {120, 1064}, {130, 1048}, {140, 1032}, {150, 1016}, {160, 1000}, {170, 985}, {180, 969}, {190, 954}, {200, 939}, {210, 924}, {220, 909}, {230, 894}, {240, 880}, {250, 866}, {260, 852}, {270, 838}, {280, 824}, {290, 811}, {300, 798}, {310, 785}, {320, 773}, {330, 760}, {340, 748}, {350, 736}, {360, 725}, {370, 713}, {380, 702}, {390, 691}, {400, 681}, {410, 670}, {420, 660}, {430, 650}, {440, 640}, {450, 631}, {460, 622}, {470, 613}, {480, 604}, {490, 595}, {500, 587}, {510, 579}, {520, 571}, {530, 563}, {540, 556}, {550, 548}, {560, 541}, {570, 534}, {580, 527}, {590, 521}, {600, 514}, {610, 508}, {620, 502}, {630, 496}, {640, 490}, {650, 485}, {660, 281}, {670, 274}, {680, 267}, {690, 260}, {700, 254}, {710, 247}, {720, 241}, {730, 235}, {740, 229}, {750, 224}, {760, 218}, {770, 213}, {780, 208}, {790, 203} }; static const struct qpnp_vadc_map_pt adcmap_ntcg_104ef_104fb[] = { {696483, -40960}, {649148, -39936}, {605368, -38912}, {564809, -37888}, {527215, -36864}, {492322, -35840}, {460007, -34816}, {429982, -33792}, {402099, -32768}, {376192, -31744}, {352075, -30720}, {329714, -29696}, {308876, -28672}, {289480, -27648}, {271417, -26624}, {254574, -25600}, {238903, -24576}, {224276, -23552}, {210631, -22528}, {197896, -21504}, {186007, -20480}, {174899, -19456}, {164521, -18432}, {154818, -17408}, {145744, -16384}, {137265, -15360}, {129307, -14336}, {121866, -13312}, {114896, -12288}, {108365, -11264}, {102252, -10240}, {96499, -9216}, {91111, -8192}, {86055, -7168}, {81308, -6144}, {76857, -5120}, {72660, -4096}, {68722, -3072}, {65020, -2048}, {61538, -1024}, {58261, 0}, {55177, 1024}, {52274, 2048}, {49538, 3072}, {46962, 4096}, {44531, 5120}, {42243, 6144}, {40083, 7168}, {38045, 8192}, {36122, 9216}, {34308, 10240}, {32592, 11264}, {30972, 12288}, {29442, 13312}, {27995, 14336}, {26624, 15360}, {25333, 16384}, {24109, 17408}, {22951, 18432}, {21854, 19456}, {20807, 20480}, {19831, 21504}, {18899, 22528}, {18016, 23552}, {17178, 24576}, {16384, 25600}, {15631, 26624}, {14916, 27648}, {14237, 28672}, {13593, 29696}, {12976, 30720}, {12400, 31744}, {11848, 32768}, {11324, 33792}, {10825, 34816}, {10354, 35840}, {9900, 36864}, {9471, 37888}, {9062, 38912}, {8674, 39936}, {8306, 40960}, {7951, 41984}, {7616, 43008}, {7296, 44032}, {6991, 45056}, {6701, 46080}, {6424, 47104}, {6160, 48128}, {5908, 49152}, {5667, 50176}, {5439, 51200}, {5219, 52224}, {5010, 53248}, {4810, 54272}, {4619, 55296}, {4440, 56320}, {4263, 57344}, {4097, 58368}, {3938, 59392}, {3785, 60416}, {3637, 61440}, {3501, 62464}, {3368, 63488}, {3240, 64512}, {3118, 65536}, {2998, 66560}, {2889, 67584}, {2782, 68608}, {2680, 69632}, {2581, 70656}, {2490, 71680}, {2397, 72704}, {2310, 73728}, {2227, 74752}, {2147, 75776}, {2064, 76800}, {1998, 77824}, {1927, 78848}, {1860, 79872}, {1795, 80896}, {1736, 81920}, {1673, 82944}, {1615, 83968}, {1560, 84992}, {1507, 86016}, {1456, 87040}, {1407, 88064}, {1360, 89088}, {1314, 90112}, {1271, 91136}, {1228, 92160}, {1189, 93184}, {1150, 94208}, {1112, 95232}, {1076, 96256}, {1042, 97280}, {1008, 98304}, {976, 99328}, {945, 100352}, {915, 101376}, {886, 102400}, {859, 103424}, {832, 104448}, {807, 105472}, {782, 106496}, {756, 107520}, {735, 108544}, {712, 109568}, {691, 110592}, {670, 111616}, {650, 112640}, {631, 113664}, {612, 114688}, {594, 115712}, {577, 116736}, {560, 117760}, {544, 118784}, {528, 119808}, {513, 120832}, {498, 121856}, {483, 122880}, {470, 123904}, {457, 124928}, {444, 125952}, {431, 126976}, {419, 128000} }; static int32_t qpnp_adc_map_linear(const struct qpnp_vadc_map_pt *pts, uint32_t tablesize, int32_t input, int64_t *output) { bool descending = 1; uint32_t i = 0; if ((pts == NULL) || (output == NULL)) return -EINVAL; /* Check if table is descending or ascending */ if (tablesize > 1) { if (pts[0].x < pts[1].x) descending = 0; } while (i < tablesize) { if ((descending == 1) && (pts[i].x < input)) { /* table entry is less than measured value and table is descending, stop */ break; } else if ((descending == 0) && (pts[i].x > input)) { /* table entry is greater than measured value and table is ascending, stop */ break; } else { i++; } } if (i == 0) *output = pts[0].y; else if (i == tablesize) *output = pts[tablesize-1].y; else { /* result is between search_index and search_index-1 */ /* interpolate linearly */ *output = (((int32_t) ((pts[i].y - pts[i-1].y)* (input - pts[i-1].x))/ (pts[i].x - pts[i-1].x))+ pts[i-1].y); } return 0; } static int32_t qpnp_adc_map_batt_therm(const struct qpnp_vadc_map_pt *pts, uint32_t tablesize, int32_t input, int64_t *output) { bool descending = 1; uint32_t i = 0; if ((pts == NULL) || (output == NULL)) return -EINVAL; /* Check if table is descending or ascending */ if (tablesize > 1) { if (pts[0].y < pts[1].y) descending = 0; } while (i < tablesize) { if ((descending == 1) && (pts[i].y < input)) { /* table entry is less than measured value and table is descending, stop */ break; } else if ((descending == 0) && (pts[i].y > input)) { /* table entry is greater than measured value and table is ascending, stop */ break; } else { i++; } } if (i == 0) { *output = pts[0].x; } else if (i == tablesize) { *output = pts[tablesize-1].x; } else { /* result is between search_index and search_index-1 */ /* interpolate linearly */ *output = (((int32_t) ((pts[i].x - pts[i-1].x)* (input - pts[i-1].y))/ (pts[i].y - pts[i-1].y))+ pts[i-1].x); } return 0; } static int64_t qpnp_adc_scale_ratiometric_calib(int32_t adc_code, const struct qpnp_adc_properties *adc_properties, const struct qpnp_vadc_chan_properties *chan_properties) { int64_t adc_voltage = 0; bool negative_offset = 0; if (!chan_properties || !chan_properties->offset_gain_numerator || !chan_properties->offset_gain_denominator || !adc_properties) return -EINVAL; adc_voltage = (adc_code - chan_properties->adc_graph[CALIB_RATIOMETRIC].adc_gnd) * adc_properties->adc_vdd_reference; if (adc_voltage < 0) { negative_offset = 1; adc_voltage = -adc_voltage; } do_div(adc_voltage, chan_properties->adc_graph[CALIB_RATIOMETRIC].dy); if (negative_offset) adc_voltage = -adc_voltage; return adc_voltage; } int32_t qpnp_adc_scale_pmic_therm(int32_t adc_code, const struct qpnp_adc_properties *adc_properties, const struct qpnp_vadc_chan_properties *chan_properties, struct qpnp_vadc_result *adc_chan_result) { int64_t pmic_voltage = 0; bool negative_offset = 0; if (!chan_properties || !chan_properties->offset_gain_numerator || !chan_properties->offset_gain_denominator || !adc_properties || !adc_chan_result) return -EINVAL; pmic_voltage = (adc_code - chan_properties->adc_graph[CALIB_ABSOLUTE].adc_gnd) * chan_properties->adc_graph[CALIB_ABSOLUTE].dx; if (pmic_voltage < 0) { negative_offset = 1; pmic_voltage = -pmic_voltage; } do_div(pmic_voltage, chan_properties->adc_graph[CALIB_ABSOLUTE].dy); if (negative_offset) pmic_voltage = -pmic_voltage; pmic_voltage += chan_properties->adc_graph[CALIB_ABSOLUTE].dx; if (pmic_voltage > 0) { /* 2mV/K */ adc_chan_result->measurement = pmic_voltage* chan_properties->offset_gain_denominator; do_div(adc_chan_result->measurement, chan_properties->offset_gain_numerator * 2); } else { adc_chan_result->measurement = 0; } /* Change to .001 deg C */ adc_chan_result->measurement -= KELVINMIL_DEGMIL; adc_chan_result->physical = (int32_t)adc_chan_result->measurement; return 0; } EXPORT_SYMBOL_GPL(qpnp_adc_scale_pmic_therm); /* Scales the ADC code to 0.001 degrees C using the map * table for the XO thermistor. */ int32_t qpnp_adc_tdkntcg_therm(int32_t adc_code, const struct qpnp_adc_properties *adc_properties, const struct qpnp_vadc_chan_properties *chan_properties, struct qpnp_vadc_result *adc_chan_result) { int64_t xo_thm = 0; if (!chan_properties || !chan_properties->offset_gain_numerator || !chan_properties->offset_gain_denominator || !adc_properties || !adc_chan_result) return -EINVAL; xo_thm = qpnp_adc_scale_ratiometric_calib(adc_code, adc_properties, chan_properties); xo_thm <<= 4; qpnp_adc_map_linear(adcmap_ntcg_104ef_104fb, ARRAY_SIZE(adcmap_ntcg_104ef_104fb), xo_thm, &adc_chan_result->physical); return 0; } EXPORT_SYMBOL_GPL(qpnp_adc_tdkntcg_therm); int32_t qpnp_adc_scale_batt_therm(int32_t adc_code, const struct qpnp_adc_properties *adc_properties, const struct qpnp_vadc_chan_properties *chan_properties, struct qpnp_vadc_result *adc_chan_result) { int64_t bat_voltage = 0; bat_voltage = qpnp_adc_scale_ratiometric_calib(adc_code, adc_properties, chan_properties); return qpnp_adc_map_batt_therm( adcmap_btm_threshold, ARRAY_SIZE(adcmap_btm_threshold), bat_voltage, &adc_chan_result->physical); } EXPORT_SYMBOL_GPL(qpnp_adc_scale_batt_therm); int32_t qpnp_adc_scale_batt_id(int32_t adc_code, const struct qpnp_adc_properties *adc_properties, const struct qpnp_vadc_chan_properties *chan_properties, struct qpnp_vadc_result *adc_chan_result) { int64_t batt_id_voltage = 0; batt_id_voltage = qpnp_adc_scale_ratiometric_calib(adc_code, adc_properties, chan_properties); adc_chan_result->physical = batt_id_voltage; adc_chan_result->physical = adc_chan_result->measurement; return 0; } EXPORT_SYMBOL_GPL(qpnp_adc_scale_batt_id); int32_t qpnp_adc_scale_default(int32_t adc_code, const struct qpnp_adc_properties *adc_properties, const struct qpnp_vadc_chan_properties *chan_properties, struct qpnp_vadc_result *adc_chan_result) { bool negative_rawfromoffset = 0, negative_offset = 0; int64_t scale_voltage = 0; if (!chan_properties || !chan_properties->offset_gain_numerator || !chan_properties->offset_gain_denominator || !adc_properties || !adc_chan_result) return -EINVAL; scale_voltage = (adc_code - chan_properties->adc_graph[CALIB_ABSOLUTE].adc_gnd) * chan_properties->adc_graph[CALIB_ABSOLUTE].dx; if (scale_voltage < 0) { negative_offset = 1; scale_voltage = -scale_voltage; } do_div(scale_voltage, chan_properties->adc_graph[CALIB_ABSOLUTE].dy); if (negative_offset) scale_voltage = -scale_voltage; scale_voltage += chan_properties->adc_graph[CALIB_ABSOLUTE].dx; if (scale_voltage < 0) { if (adc_properties->bipolar) { scale_voltage = -scale_voltage; negative_rawfromoffset = 1; } else { scale_voltage = 0; } } adc_chan_result->measurement = scale_voltage * chan_properties->offset_gain_denominator; /* do_div only perform positive integer division! */ do_div(adc_chan_result->measurement, chan_properties->offset_gain_numerator); if (negative_rawfromoffset) adc_chan_result->measurement = -adc_chan_result->measurement; /* * Note: adc_chan_result->measurement is in the unit of * adc_properties.adc_reference. For generic channel processing, * channel measurement is a scale/ratio relative to the adc * reference input */ adc_chan_result->physical = adc_chan_result->measurement; return 0; } EXPORT_SYMBOL_GPL(qpnp_adc_scale_default); int32_t qpnp_vadc_check_result(int32_t *data) { if (*data < QPNP_VADC_MIN_ADC_CODE) *data = QPNP_VADC_MIN_ADC_CODE; else if (*data > QPNP_VADC_MAX_ADC_CODE) *data = QPNP_VADC_MAX_ADC_CODE; return 0; } EXPORT_SYMBOL_GPL(qpnp_vadc_check_result); int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi, struct qpnp_adc_drv *adc_qpnp) { struct device_node *node = spmi->dev.of_node; struct resource *res; struct device_node *child; struct qpnp_vadc_amux *adc_channel_list; struct qpnp_adc_properties *adc_prop; struct qpnp_adc_amux_properties *amux_prop; int count_adc_channel_list = 0, decimation, rc = 0, i = 0; if (!node) return -EINVAL; for_each_child_of_node(node, child) count_adc_channel_list++; if (!count_adc_channel_list) { pr_err("No channel listing\n"); return -EINVAL; } adc_qpnp->spmi = spmi; adc_prop = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_properties), GFP_KERNEL); if (!adc_prop) { dev_err(&spmi->dev, "Unable to allocate memory\n"); return -ENOMEM; } adc_channel_list = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_vadc_amux) * count_adc_channel_list, GFP_KERNEL); if (!adc_channel_list) { dev_err(&spmi->dev, "Unable to allocate memory\n"); return -ENOMEM; } amux_prop = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_amux_properties) + sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL); if (!amux_prop) { dev_err(&spmi->dev, "Unable to allocate memory\n"); return -ENOMEM; } adc_qpnp->adc_channels = adc_channel_list; adc_qpnp->amux_prop = amux_prop; for_each_child_of_node(node, child) { int channel_num, scaling, post_scaling, hw_settle_time; int fast_avg_setup, calib_type, rc; const char *calibration_param, *channel_name; channel_name = of_get_property(child, "label", NULL) ? : child->name; if (!channel_name) { pr_err("Invalid channel name\n"); return -EINVAL; } rc = of_property_read_u32(child, "qcom,channel-num", &channel_num); if (rc) { pr_err("Invalid channel num\n"); return -EINVAL; } rc = of_property_read_u32(child, "qcom,decimation", &decimation); if (rc) { pr_err("Invalid channel decimation property\n"); return -EINVAL; } rc = of_property_read_u32(child, "qcom,pre-div-channel-scaling", &scaling); if (rc) { pr_err("Invalid channel scaling property\n"); return -EINVAL; } rc = of_property_read_u32(child, "qcom,scale-function", &post_scaling); if (rc) { pr_err("Invalid channel post scaling property\n"); return -EINVAL; } rc = of_property_read_u32(child, "qcom,hw-settle-time", &hw_settle_time); if (rc) { pr_err("Invalid channel hw settle time property\n"); return -EINVAL; } rc = of_property_read_u32(child, "qcom,fast-avg-setup", &fast_avg_setup); if (rc) { pr_err("Invalid channel fast average setup\n"); return -EINVAL; } calibration_param = of_get_property(child, "qcom,calibration-type", NULL); if (!strncmp(calibration_param, "absolute", 8)) calib_type = CALIB_ABSOLUTE; else if (!strncmp(calibration_param, "ratiometric", 11)) calib_type = CALIB_RATIOMETRIC; else { pr_err("%s: Invalid calibration property\n", __func__); return -EINVAL; } /* Individual channel properties */ adc_channel_list[i].name = (char *)channel_name; adc_channel_list[i].channel_num = channel_num; adc_channel_list[i].chan_path_prescaling = scaling; adc_channel_list[i].adc_decimation = decimation; adc_channel_list[i].adc_scale_fn = post_scaling; adc_channel_list[i].hw_settle_time = hw_settle_time; adc_channel_list[i].fast_avg_setup = fast_avg_setup; i++; } /* Get the ADC VDD reference voltage and ADC bit resolution */ rc = of_property_read_u32(node, "qcom,adc-vdd-reference", &adc_prop->adc_vdd_reference); if (rc) { pr_err("Invalid adc vdd reference property\n"); return -EINVAL; } rc = of_property_read_u32(node, "qcom,adc-bit-resolution", &adc_prop->bitresolution); if (rc) { pr_err("Invalid adc bit resolution property\n"); return -EINVAL; } adc_qpnp->adc_prop = adc_prop; /* Get the peripheral address */ res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0); if (!res) { pr_err("No base address definition\n"); return -EINVAL; } adc_qpnp->slave = spmi->sid; adc_qpnp->offset = res->start; /* Register the ADC peripheral interrupt */ adc_qpnp->adc_irq = spmi_get_irq(spmi, 0, 0); if (adc_qpnp->adc_irq < 0) { pr_err("Invalid irq\n"); return -ENXIO; } init_completion(&adc_qpnp->adc_rslt_completion); mutex_init(&adc_qpnp->adc_lock); return 0; } EXPORT_SYMBOL(qpnp_adc_get_devicetree_data);
gpl-2.0
justindriggers/android_kernel_glass_glass-1
fs/xfs/xfs_iops.c
1197
29639
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_acl.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_rw.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" #include "xfs_inode_item.h" #include "xfs_trace.h" #include <linux/capability.h> #include <linux/xattr.h> #include <linux/namei.h> #include <linux/posix_acl.h> #include <linux/security.h> #include <linux/fiemap.h> #include <linux/slab.h> static int xfs_initxattrs( struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct xfs_inode *ip = XFS_I(inode); int error = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { error = xfs_attr_set(ip, xattr->name, xattr->value, xattr->value_len, ATTR_SECURE); if (error < 0) break; } return error; } /* * Hook in SELinux. This is not quite correct yet, what we really need * here (as we do for default ACLs) is a mechanism by which creation of * these attrs can be journalled at inode creation time (along with the * inode, of course, such that log replay can't cause these to be lost). */ STATIC int xfs_init_security( struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &xfs_initxattrs, NULL); } static void xfs_dentry_to_name( struct xfs_name *namep, struct dentry *dentry) { namep->name = dentry->d_name.name; namep->len = dentry->d_name.len; } STATIC void xfs_cleanup_inode( struct inode *dir, struct inode *inode, struct dentry *dentry) { struct xfs_name teardown; /* Oh, the horror. * If we can't add the ACL or we fail in * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ xfs_dentry_to_name(&teardown, dentry); xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); iput(inode); } STATIC int xfs_vn_mknod( struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; struct xfs_inode *ip = NULL; struct posix_acl *default_acl = NULL; struct xfs_name name; int error; /* * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ if (S_ISCHR(mode) || S_ISBLK(mode)) { if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) return -EINVAL; rdev = sysv_encode_dev(rdev); } else { rdev = 0; } if (IS_POSIXACL(dir)) { default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(default_acl)) return PTR_ERR(default_acl); if (!default_acl) mode &= ~current_umask(); } xfs_dentry_to_name(&name, dentry); error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); if (unlikely(error)) goto out_free_acl; inode = VFS_I(ip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; if (default_acl) { error = -xfs_inherit_acl(inode, default_acl); default_acl = NULL; if (unlikely(error)) goto out_cleanup_inode; } d_instantiate(dentry, inode); return -error; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out_free_acl: posix_acl_release(default_acl); return -error; } STATIC int xfs_vn_create( struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return xfs_vn_mknod(dir, dentry, mode, 0); } STATIC int xfs_vn_mkdir( struct inode *dir, struct dentry *dentry, umode_t mode) { return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); } STATIC struct dentry * xfs_vn_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct xfs_inode *cip; struct xfs_name name; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&name, dentry); error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); d_add(dentry, NULL); return NULL; } return d_splice_alias(VFS_I(cip), dentry); } STATIC struct dentry * xfs_vn_ci_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct xfs_inode *ip; struct xfs_name xname; struct xfs_name ci_name; struct qstr dname; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&xname, dentry); error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); /* * call d_add(dentry, NULL) here when d_drop_negative_children * is called in xfs_vn_mknod (ie. allow negative dentries * with CI filesystems). */ return NULL; } /* if exact match, just splice and exit */ if (!ci_name.name) return d_splice_alias(VFS_I(ip), dentry); /* else case-insensitive match... */ dname.name = ci_name.name; dname.len = ci_name.len; dentry = d_add_ci(dentry, VFS_I(ip), &dname); kmem_free(ci_name.name); return dentry; } STATIC int xfs_vn_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = xfs_link(XFS_I(dir), XFS_I(inode), &name); if (unlikely(error)) return -error; ihold(inode); d_instantiate(dentry, inode); return 0; } STATIC int xfs_vn_unlink( struct inode *dir, struct dentry *dentry) { struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); if (error) return error; /* * With unlink, the VFS makes the dentry "negative": no inode, * but still hashed. This is incompatible with case-insensitive * mode, so invalidate (unhash) the dentry in CI-mode. */ if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb)) d_invalidate(dentry); return 0; } STATIC int xfs_vn_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct xfs_inode *cip = NULL; struct xfs_name name; int error; umode_t mode; mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); xfs_dentry_to_name(&name, dentry); error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); if (unlikely(error)) goto out; inode = VFS_I(cip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; d_instantiate(dentry, inode); return 0; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out: return -error; } STATIC int xfs_vn_rename( struct inode *odir, struct dentry *odentry, struct inode *ndir, struct dentry *ndentry) { struct inode *new_inode = ndentry->d_inode; struct xfs_name oname; struct xfs_name nname; xfs_dentry_to_name(&oname, odentry); xfs_dentry_to_name(&nname, ndentry); return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), XFS_I(ndir), &nname, new_inode ? XFS_I(new_inode) : NULL); } /* * careful here - this function can get called recursively, so * we need to be very careful about how much stack we use. * uio is kmalloced for this reason... */ STATIC void * xfs_vn_follow_link( struct dentry *dentry, struct nameidata *nd) { char *link; int error = -ENOMEM; link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); if (!link) goto out_err; error = -xfs_readlink(XFS_I(dentry->d_inode), link); if (unlikely(error)) goto out_kfree; nd_set_link(nd, link); return NULL; out_kfree: kfree(link); out_err: nd_set_link(nd, ERR_PTR(error)); return NULL; } STATIC void xfs_vn_put_link( struct dentry *dentry, struct nameidata *nd, void *p) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); } STATIC int xfs_vn_getattr( struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; trace_xfs_getattr(ip); if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); stat->size = XFS_ISIZE(ip); stat->dev = inode->i_sb->s_dev; stat->mode = ip->i_d.di_mode; stat->nlink = ip->i_d.di_nlink; stat->uid = ip->i_d.di_uid; stat->gid = ip->i_d.di_gid; stat->ino = ip->i_ino; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; stat->blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: stat->blksize = BLKDEV_IOSIZE; stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: if (XFS_IS_REALTIME_INODE(ip)) { /* * If the file blocks are being allocated from a * realtime volume, then return the inode's realtime * extent size or the realtime volume's extent size. */ stat->blksize = xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; } else stat->blksize = xfs_preferred_iosize(mp); stat->rdev = 0; break; } return 0; } static void xfs_setattr_mode( struct xfs_trans *tp, struct xfs_inode *ip, struct iattr *iattr) { struct inode *inode = VFS_I(ip); umode_t mode = iattr->ia_mode; ASSERT(tp); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; ip->i_d.di_mode &= S_IFMT; ip->i_d.di_mode |= mode & ~S_IFMT; inode->i_mode &= S_IFMT; inode->i_mode |= mode & ~S_IFMT; } int xfs_setattr_nonsize( struct xfs_inode *ip, struct iattr *iattr, int flags) { xfs_mount_t *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_trans_t *tp; int error; uid_t uid = 0, iuid = 0; gid_t gid = 0, igid = 0; struct xfs_dquot *udqp = NULL, *gdqp = NULL; struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT((mask & ATTR_SIZE) == 0); /* * If disk quotas is on, we make sure that the dquots do exist on disk, * before we start any other transactions. Trying to do this later * is messy. We don't care to take a readlock to look at the ids * in inode here, because we can't hold it across the trans_reserve. * If the IDs do change before we take the ilock, we're covered * because the i_*dquot fields will get updated anyway. */ if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { uint qflags = 0; if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { uid = iattr->ia_uid; qflags |= XFS_QMOPT_UQUOTA; } else { uid = ip->i_d.di_uid; } if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { gid = iattr->ia_gid; qflags |= XFS_QMOPT_GQUOTA; } else { gid = ip->i_d.di_gid; } /* * We take a reference when we initialize udqp and gdqp, * so it is important that we never blindly double trip on * the same variable. See xfs_create() for an example. */ ASSERT(udqp == NULL); ASSERT(gdqp == NULL); error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), qflags, &udqp, &gdqp); if (error) return error; } tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); if (error) goto out_dqrele; xfs_ilock(ip, XFS_ILOCK_EXCL); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * These IDs could have changed since we last looked at them. * But, we're assured that if the ownership did change * while we didn't have the inode locked, inode's dquot(s) * would have changed also. */ iuid = ip->i_d.di_uid; igid = ip->i_d.di_gid; gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; /* * Do a quota reservation only if uid/gid is actually * going to change. */ if (XFS_IS_QUOTA_RUNNING(mp) && ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { ASSERT(tp); error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); if (error) /* out of quota */ goto out_trans_cancel; } } xfs_trans_ijoin(tp, ip, 0); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * CAP_FSETID overrides the following restrictions: * * The set-user-ID and set-group-ID bits of a file will be * cleared upon successful return from chown() */ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && !capable(CAP_FSETID)) ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); /* * Change the ownerships and register quota modifications * in the transaction. */ if (iuid != uid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(mask & ATTR_UID); ASSERT(udqp); olddquot1 = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp); } ip->i_d.di_uid = uid; inode->i_uid = uid; } if (igid != gid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { ASSERT(!XFS_IS_PQUOTA_ON(mp)); ASSERT(mask & ATTR_GID); ASSERT(gdqp); olddquot2 = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp); } ip->i_d.di_gid = gid; inode->i_gid = gid; } } /* * Change file access modes. */ if (mask & ATTR_MODE) xfs_setattr_mode(tp, ip, iattr); /* * Change file access or modified times. */ if (mask & ATTR_ATIME) { inode->i_atime = iattr->ia_atime; ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); /* * Release any dquot(s) the inode had kept before chown. */ xfs_qm_dqrele(olddquot1); xfs_qm_dqrele(olddquot2); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (error) return XFS_ERROR(error); /* * XXX(hch): Updating the ACL entries is not atomic vs the i_mode * update. We could avoid this with linked transactions * and passing down the transaction pointer all the way * to attr_set. No previous user of the generic * Posix ACL code seems to care about this issue either. */ if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { error = -xfs_acl_chmod(inode); if (error) return XFS_ERROR(error); } return 0; out_trans_cancel: xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); out_dqrele: xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); return error; } /* * Truncate file. Must have write permission and not be a directory. */ int xfs_setattr_size( struct xfs_inode *ip, struct iattr *iattr, int flags) { struct xfs_mount *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_off_t oldsize, newsize; struct xfs_trans *tp; int error; uint lock_flags; uint commit_flags = 0; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT(S_ISREG(ip->i_d.di_mode)); ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); lock_flags = XFS_ILOCK_EXCL; if (!(flags & XFS_ATTR_NOLOCK)) lock_flags |= XFS_IOLOCK_EXCL; xfs_ilock(ip, lock_flags); oldsize = inode->i_size; newsize = iattr->ia_size; /* * Short circuit the truncate case for zero length files. */ if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { if (!(mask & (ATTR_CTIME|ATTR_MTIME))) goto out_unlock; /* * Use the regular setattr path to update the timestamps. */ xfs_iunlock(ip, lock_flags); iattr->ia_valid &= ~ATTR_SIZE; return xfs_setattr_nonsize(ip, iattr, 0); } /* * Make sure that the dquots are attached to the inode. */ error = xfs_qm_dqattach_locked(ip, 0); if (error) goto out_unlock; /* * Now we can make the changes. Before we join the inode to the * transaction, take care of the part of the truncation that must be * done without the inode lock. This needs to be done before joining * the inode to the transaction, because the inode cannot be unlocked * once it is a part of the transaction. */ if (newsize > oldsize) { /* * Do the first part of growing a file: zero any data in the * last block that is beyond the old EOF. We need to do this * before the inode is joined to the transaction to modify * i_size. */ error = xfs_zero_eof(ip, newsize, oldsize); if (error) goto out_unlock; } xfs_iunlock(ip, XFS_ILOCK_EXCL); lock_flags &= ~XFS_ILOCK_EXCL; /* * We are going to log the inode size change in this transaction so * any previous writes that are beyond the on disk EOF and the new * EOF that have not been written out need to be written here. If we * do not write the data out, we expose ourselves to the null files * problem. * * Only flush from the on disk size to the smaller of the in memory * file size or the new size as that's the range we really care about * here and prevents waiting for other data not within the range we * care about here. */ if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { error = xfs_flush_pages(ip, ip->i_d.di_size, newsize, 0, FI_NONE); if (error) goto out_unlock; } /* * Wait for all direct I/O to complete. */ inode_dio_wait(inode); error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); if (error) goto out_unlock; tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) goto out_trans_cancel; truncate_setsize(inode, newsize); commit_flags = XFS_TRANS_RELEASE_LOG_RES; lock_flags |= XFS_ILOCK_EXCL; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); /* * Only change the c/mtime if we are changing the size or we are * explicitly asked to change it. This handles the semantic difference * between truncate() and ftruncate() as implemented in the VFS. * * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a * special case where we need to update the times despite not having * these flags set. For all other operations the VFS set these flags * explicitly if it wants a timestamp update. */ if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { iattr->ia_ctime = iattr->ia_mtime = current_fs_time(inode->i_sb); mask |= ATTR_CTIME | ATTR_MTIME; } /* * The first thing we do is set the size to new_size permanently on * disk. This way we don't have to worry about anyone ever being able * to look at the data being freed even in the face of a crash. * What we're getting around here is the case where we free a block, it * is allocated to another file, it is written to, and then we crash. * If the new data gets written to the file but the log buffers * containing the free and reallocation don't, then we'd end up with * garbage in the blocks being freed. As long as we make the new size * permanent before actually freeing any blocks it doesn't matter if * they get written to. */ ip->i_d.di_size = newsize; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (newsize <= oldsize) { error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize); if (error) goto out_trans_abort; /* * Truncated "down", so we're removing references to old data * here - if we delay flushing for a long time, we expose * ourselves unduly to the notorious NULL files problem. So, * we mark this inode and flush it when the file is closed, * and do not wait the usual (long) time for writeout. */ xfs_iflags_set(ip, XFS_ITRUNCATED); } /* * Change file access modes. */ if (mask & ATTR_MODE) xfs_setattr_mode(tp, ip, iattr); if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: if (lock_flags) xfs_iunlock(ip, lock_flags); return error; out_trans_abort: commit_flags |= XFS_TRANS_ABORT; out_trans_cancel: xfs_trans_cancel(tp, commit_flags); goto out_unlock; } STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { if (iattr->ia_valid & ATTR_SIZE) return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0); return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); } #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) /* * Call fiemap helper to fill in user data. * Returns positive errors to xfs_getbmap. */ STATIC int xfs_fiemap_format( void **arg, struct getbmapx *bmv, int *full) { int error; struct fiemap_extent_info *fieinfo = *arg; u32 fiemap_flags = 0; u64 logical, physical, length; /* Do nothing for a hole */ if (bmv->bmv_block == -1LL) return 0; logical = BBTOB(bmv->bmv_offset); physical = BBTOB(bmv->bmv_block); length = BBTOB(bmv->bmv_length); if (bmv->bmv_oflags & BMV_OF_PREALLOC) fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { fiemap_flags |= FIEMAP_EXTENT_DELALLOC; physical = 0; /* no block yet */ } if (bmv->bmv_oflags & BMV_OF_LAST) fiemap_flags |= FIEMAP_EXTENT_LAST; error = fiemap_fill_next_extent(fieinfo, logical, physical, length, fiemap_flags); if (error > 0) { error = 0; *full = 1; /* user array now full */ } return -error; } STATIC int xfs_vn_fiemap( struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 length) { xfs_inode_t *ip = XFS_I(inode); struct getbmapx bm; int error; error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS); if (error) return error; /* Set up bmap header for xfs internal routine */ bm.bmv_offset = BTOBB(start); /* Special case for whole file */ if (length == FIEMAP_MAX_OFFSET) bm.bmv_length = -1LL; else bm.bmv_length = BTOBB(length); /* We add one because in getbmap world count includes the header */ bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : fieinfo->fi_extents_max + 1; bm.bmv_count = min_t(__s32, bm.bmv_count, (PAGE_SIZE * 16 / sizeof(struct getbmapx))); bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) bm.bmv_iflags |= BMV_IF_ATTRFORK; if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) bm.bmv_iflags |= BMV_IF_DELALLOC; error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); if (error) return -error; return 0; } static const struct inode_operations xfs_inode_operations = { .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .fiemap = xfs_vn_fiemap, }; static const struct inode_operations xfs_dir_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; static const struct inode_operations xfs_dir_ci_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_ci_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; static const struct inode_operations xfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = xfs_vn_follow_link, .put_link = xfs_vn_put_link, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; STATIC void xfs_diflags_to_iflags( struct inode *inode, struct xfs_inode *ip) { if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) inode->i_flags |= S_APPEND; else inode->i_flags &= ~S_APPEND; if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) inode->i_flags |= S_SYNC; else inode->i_flags &= ~S_SYNC; if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) inode->i_flags |= S_NOATIME; else inode->i_flags &= ~S_NOATIME; } /* * Initialize the Linux inode, set up the operation vectors and * unlock the inode. * * When reading existing inodes from disk this is called directly * from xfs_iget, when creating a new inode it is called from * xfs_ialloc after setting up the inode. * * We are always called with an uninitialised linux inode here. * We need to initialise the necessary fields and take a reference * on it. */ void xfs_setup_inode( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; inode->i_ino = ip->i_ino; inode->i_state = I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ hlist_add_fake(&inode->i_hash); inode->i_mode = ip->i_d.di_mode; set_nlink(inode, ip->i_d.di_nlink); inode->i_uid = ip->i_d.di_uid; inode->i_gid = ip->i_d.di_gid; switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: inode->i_rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: inode->i_rdev = 0; break; } inode->i_generation = ip->i_d.di_gen; i_size_write(inode, ip->i_d.di_size); inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_diflags_to_iflags(inode, ip); switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: inode->i_op = &xfs_symlink_inode_operations; if (!(ip->i_df.if_flags & XFS_IFINLINE)) inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. */ if (!XFS_IFORK_Q(ip)) { inode_has_no_xattr(inode); cache_no_acl(inode); } xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(inode); }
gpl-2.0
cmenard/android_kernel_samsung_espresso10
arch/avr32/mach-at32ap/clock.c
2733
6426
/* * Clock management for AT32AP CPUs * * Copyright (C) 2006 Atmel Corporation * * Based on arch/arm/mach-at91/clock.c * Copyright (C) 2005 David Brownell * Copyright (C) 2005 Ivan Kokshaysky * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/device.h> #include <linux/string.h> #include <linux/list.h> #include <mach/chip.h> #include "clock.h" /* at32 clock list */ static LIST_HEAD(at32_clock_list); static DEFINE_SPINLOCK(clk_lock); static DEFINE_SPINLOCK(clk_list_lock); void at32_clk_register(struct clk *clk) { spin_lock(&clk_list_lock); /* add the new item to the end of the list */ list_add_tail(&clk->list, &at32_clock_list); spin_unlock(&clk_list_lock); } static struct clk *__clk_get(struct device *dev, const char *id) { struct clk *clk; list_for_each_entry(clk, &at32_clock_list, list) { if (clk->dev == dev && strcmp(id, clk->name) == 0) { return clk; } } return ERR_PTR(-ENOENT); } struct clk *clk_get(struct device *dev, const char *id) { struct clk *clk; spin_lock(&clk_list_lock); clk = __clk_get(dev, id); spin_unlock(&clk_list_lock); return clk; } EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) { /* clocks are static for now, we can't free them */ } EXPORT_SYMBOL(clk_put); static void __clk_enable(struct clk *clk) { if (clk->parent) __clk_enable(clk->parent); if (clk->users++ == 0 && clk->mode) clk->mode(clk, 1); } int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clk_lock, flags); __clk_enable(clk); spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); static void __clk_disable(struct clk *clk) { if (clk->users == 0) { printk(KERN_ERR "%s: mismatched disable\n", clk->name); WARN_ON(1); return; } if (--clk->users == 0 && clk->mode) clk->mode(clk, 0); if (clk->parent) __clk_disable(clk->parent); } void clk_disable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clk_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clk_lock, flags); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { unsigned long flags; unsigned long rate; spin_lock_irqsave(&clk_lock, flags); rate = clk->get_rate(clk); spin_unlock_irqrestore(&clk_lock, flags); return rate; } EXPORT_SYMBOL(clk_get_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long flags, actual_rate; if (!clk->set_rate) return -ENOSYS; spin_lock_irqsave(&clk_lock, flags); actual_rate = clk->set_rate(clk, rate, 0); spin_unlock_irqrestore(&clk_lock, flags); return actual_rate; } EXPORT_SYMBOL(clk_round_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; long ret; if (!clk->set_rate) return -ENOSYS; spin_lock_irqsave(&clk_lock, flags); ret = clk->set_rate(clk, rate, 1); spin_unlock_irqrestore(&clk_lock, flags); return (ret < 0) ? ret : 0; } EXPORT_SYMBOL(clk_set_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; int ret; if (!clk->set_parent) return -ENOSYS; spin_lock_irqsave(&clk_lock, flags); ret = clk->set_parent(clk, parent); spin_unlock_irqrestore(&clk_lock, flags); return ret; } EXPORT_SYMBOL(clk_set_parent); struct clk *clk_get_parent(struct clk *clk) { return clk->parent; } EXPORT_SYMBOL(clk_get_parent); #ifdef CONFIG_DEBUG_FS /* /sys/kernel/debug/at32ap_clk */ #include <linux/io.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "pm.h" #define NEST_DELTA 2 #define NEST_MAX 6 struct clkinf { struct seq_file *s; unsigned nest; }; static void dump_clock(struct clk *parent, struct clkinf *r) { unsigned nest = r->nest; char buf[16 + NEST_MAX]; struct clk *clk; unsigned i; /* skip clocks coupled to devices that aren't registered */ if (parent->dev && !dev_name(parent->dev) && !parent->users) return; /* <nest spaces> name <pad to end> */ memset(buf, ' ', sizeof(buf) - 1); buf[sizeof(buf) - 1] = 0; i = strlen(parent->name); memcpy(buf + nest, parent->name, min(i, (unsigned)(sizeof(buf) - 1 - nest))); seq_printf(r->s, "%s%c users=%2d %-3s %9ld Hz", buf, parent->set_parent ? '*' : ' ', parent->users, parent->users ? "on" : "off", /* NOTE: not-paranoid!! */ clk_get_rate(parent)); if (parent->dev) seq_printf(r->s, ", for %s", dev_name(parent->dev)); seq_printf(r->s, "\n"); /* cost of this scan is small, but not linear... */ r->nest = nest + NEST_DELTA; list_for_each_entry(clk, &at32_clock_list, list) { if (clk->parent == parent) dump_clock(clk, r); } r->nest = nest; } static int clk_show(struct seq_file *s, void *unused) { struct clkinf r; int i; struct clk *clk; /* show all the power manager registers */ seq_printf(s, "MCCTRL = %8x\n", pm_readl(MCCTRL)); seq_printf(s, "CKSEL = %8x\n", pm_readl(CKSEL)); seq_printf(s, "CPUMASK = %8x\n", pm_readl(CPU_MASK)); seq_printf(s, "HSBMASK = %8x\n", pm_readl(HSB_MASK)); seq_printf(s, "PBAMASK = %8x\n", pm_readl(PBA_MASK)); seq_printf(s, "PBBMASK = %8x\n", pm_readl(PBB_MASK)); seq_printf(s, "PLL0 = %8x\n", pm_readl(PLL0)); seq_printf(s, "PLL1 = %8x\n", pm_readl(PLL1)); seq_printf(s, "IMR = %8x\n", pm_readl(IMR)); for (i = 0; i < 8; i++) { if (i == 5) continue; seq_printf(s, "GCCTRL%d = %8x\n", i, pm_readl(GCCTRL(i))); } seq_printf(s, "\n"); r.s = s; r.nest = 0; /* protected from changes on the list while dumping */ spin_lock(&clk_list_lock); /* show clock tree as derived from the three oscillators */ clk = __clk_get(NULL, "osc32k"); dump_clock(clk, &r); clk_put(clk); clk = __clk_get(NULL, "osc0"); dump_clock(clk, &r); clk_put(clk); clk = __clk_get(NULL, "osc1"); dump_clock(clk, &r); clk_put(clk); spin_unlock(&clk_list_lock); return 0; } static int clk_open(struct inode *inode, struct file *file) { return single_open(file, clk_show, NULL); } static const struct file_operations clk_operations = { .open = clk_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init clk_debugfs_init(void) { (void) debugfs_create_file("at32ap_clk", S_IFREG | S_IRUGO, NULL, NULL, &clk_operations); return 0; } postcore_initcall(clk_debugfs_init); #endif
gpl-2.0
jied83/kernel_presto
drivers/w1/slaves/w1_smem.c
4525
1792
/* * w1_smem.c * * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> * * * This program is free software; you can redistribute it and/or modify * it under the smems of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); static struct w1_family w1_smem_family_01 = { .fid = W1_FAMILY_SMEM_01, }; static struct w1_family w1_smem_family_81 = { .fid = W1_FAMILY_SMEM_81, }; static int __init w1_smem_init(void) { int err; err = w1_register_family(&w1_smem_family_01); if (err) return err; err = w1_register_family(&w1_smem_family_81); if (err) { w1_unregister_family(&w1_smem_family_01); return err; } return 0; } static void __exit w1_smem_fini(void) { w1_unregister_family(&w1_smem_family_01); w1_unregister_family(&w1_smem_family_81); } module_init(w1_smem_init); module_exit(w1_smem_fini);
gpl-2.0
jmztaylor/android_kernel_htc_a5dug
security/smack/smackfs.c
4781
37449
/* * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. * * Authors: * Casey Schaufler <casey@schaufler-ca.com> * Ahmed S. Darwish <darwish.07@gmail.com> * * Special thanks to the authors of selinuxfs. * * Karl MacMillan <kmacmillan@tresys.com> * James Morris <jmorris@redhat.com> * */ #include <linux/kernel.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/mutex.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/audit.h> #include "smack.h" /* * smackfs pseudo filesystem. */ enum smk_inos { SMK_ROOT_INO = 2, SMK_LOAD = 3, /* load policy */ SMK_CIPSO = 4, /* load label -> CIPSO mapping */ SMK_DOI = 5, /* CIPSO DOI */ SMK_DIRECT = 6, /* CIPSO level indicating direct label */ SMK_AMBIENT = 7, /* internet ambient label */ SMK_NETLBLADDR = 8, /* single label hosts */ SMK_ONLYCAP = 9, /* the only "capable" label */ SMK_LOGGING = 10, /* logging */ SMK_LOAD_SELF = 11, /* task specific rules */ SMK_ACCESSES = 12, /* access policy */ }; /* * List locks */ static DEFINE_MUTEX(smack_list_lock); static DEFINE_MUTEX(smack_cipso_lock); static DEFINE_MUTEX(smack_ambient_lock); static DEFINE_MUTEX(smk_netlbladdr_lock); /* * This is the "ambient" label for network traffic. * If it isn't somehow marked, use this. * It can be reset via smackfs/ambient */ char *smack_net_ambient = smack_known_floor.smk_known; /* * This is the level in a CIPSO header that indicates a * smack label is contained directly in the category set. * It can be reset via smackfs/direct */ int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT; /* * Unless a process is running with this label even * having CAP_MAC_OVERRIDE isn't enough to grant * privilege to violate MAC policy. If no label is * designated (the NULL case) capabilities apply to * everyone. It is expected that the hat (^) label * will be used if any label is used. */ char *smack_onlycap; /* * Certain IP addresses may be designated as single label hosts. * Packets are sent there unlabeled, but only from tasks that * can write to the specified label. */ LIST_HEAD(smk_netlbladdr_list); /* * Rule lists are maintained for each label. * This master list is just for reading /smack/load. */ struct smack_master_list { struct list_head list; struct smack_rule *smk_rule; }; LIST_HEAD(smack_rule_list); static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT; const char *smack_cipso_option = SMACK_CIPSO_OPTION; /* * Values for parsing cipso rules * SMK_DIGITLEN: Length of a digit field in a rule. * SMK_CIPSOMIN: Minimum possible cipso rule length. * SMK_CIPSOMAX: Maximum possible cipso rule length. */ #define SMK_DIGITLEN 4 #define SMK_CIPSOMIN (SMK_LABELLEN + 2 * SMK_DIGITLEN) #define SMK_CIPSOMAX (SMK_CIPSOMIN + SMACK_CIPSO_MAXCATNUM * SMK_DIGITLEN) /* * Values for parsing MAC rules * SMK_ACCESS: Maximum possible combination of access permissions * SMK_ACCESSLEN: Maximum length for a rule access field * SMK_LOADLEN: Smack rule length */ #define SMK_OACCESS "rwxa" #define SMK_ACCESS "rwxat" #define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1) #define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1) #define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN) #define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN) /** * smk_netlabel_audit_set - fill a netlbl_audit struct * @nap: structure to fill */ static void smk_netlabel_audit_set(struct netlbl_audit *nap) { nap->loginuid = audit_get_loginuid(current); nap->sessionid = audit_get_sessionid(current); nap->secid = smack_to_secid(smk_of_current()); } /* * Values for parsing single label host rules * "1.2.3.4 X" * "192.168.138.129/32 abcdefghijklmnopqrstuvw" */ #define SMK_NETLBLADDRMIN 9 #define SMK_NETLBLADDRMAX 42 /** * smk_set_access - add a rule to the rule list * @srp: the new rule to add * @rule_list: the list of rules * @rule_lock: the rule list lock * * Looks through the current subject/object/access list for * the subject/object pair and replaces the access that was * there. If the pair isn't found add it with the specified * access. * * Returns 1 if a rule was found to exist already, 0 if it is new * Returns 0 if nothing goes wrong or -ENOMEM if it fails * during the allocation of the new pair to add. */ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list, struct mutex *rule_lock) { struct smack_rule *sp; int found = 0; mutex_lock(rule_lock); /* * Because the object label is less likely to match * than the subject label check it first */ list_for_each_entry_rcu(sp, rule_list, list) { if (sp->smk_object == srp->smk_object && sp->smk_subject == srp->smk_subject) { found = 1; sp->smk_access = srp->smk_access; break; } } if (found == 0) list_add_rcu(&srp->list, rule_list); mutex_unlock(rule_lock); return found; } /** * smk_parse_rule - parse Smack rule from load string * @data: string to be parsed whose size is SMK_LOADLEN * @rule: Smack rule * @import: if non-zero, import labels */ static int smk_parse_rule(const char *data, struct smack_rule *rule, int import) { char smack[SMK_LABELLEN]; struct smack_known *skp; if (import) { rule->smk_subject = smk_import(data, 0); if (rule->smk_subject == NULL) return -1; rule->smk_object = smk_import(data + SMK_LABELLEN, 0); if (rule->smk_object == NULL) return -1; } else { smk_parse_smack(data, 0, smack); skp = smk_find_entry(smack); if (skp == NULL) return -1; rule->smk_subject = skp->smk_known; smk_parse_smack(data + SMK_LABELLEN, 0, smack); skp = smk_find_entry(smack); if (skp == NULL) return -1; rule->smk_object = skp->smk_known; } rule->smk_access = 0; switch (data[SMK_LABELLEN + SMK_LABELLEN]) { case '-': break; case 'r': case 'R': rule->smk_access |= MAY_READ; break; default: return -1; } switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) { case '-': break; case 'w': case 'W': rule->smk_access |= MAY_WRITE; break; default: return -1; } switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) { case '-': break; case 'x': case 'X': rule->smk_access |= MAY_EXEC; break; default: return -1; } switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) { case '-': break; case 'a': case 'A': rule->smk_access |= MAY_APPEND; break; default: return -1; } switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) { case '-': break; case 't': case 'T': rule->smk_access |= MAY_TRANSMUTE; break; default: return -1; } return 0; } /** * smk_write_load_list - write() for any /smack/load * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start - must be 0 * @rule_list: the list of rules to write to * @rule_lock: lock for the rule list * * Get one smack access rule from above. * The format is exactly: * char subject[SMK_LABELLEN] * char object[SMK_LABELLEN] * char access[SMK_ACCESSLEN] * * writes must be SMK_LABELLEN+SMK_LABELLEN+SMK_ACCESSLEN bytes. */ static ssize_t smk_write_load_list(struct file *file, const char __user *buf, size_t count, loff_t *ppos, struct list_head *rule_list, struct mutex *rule_lock) { struct smack_master_list *smlp; struct smack_known *skp; struct smack_rule *rule; char *data; int rc = -EINVAL; int load = 0; /* * No partial writes. * Enough data must be present. */ if (*ppos != 0) return -EINVAL; /* * Minor hack for backward compatibility */ if (count < (SMK_OLOADLEN) || count > SMK_LOADLEN) return -EINVAL; data = kzalloc(SMK_LOADLEN, GFP_KERNEL); if (data == NULL) return -ENOMEM; if (copy_from_user(data, buf, count) != 0) { rc = -EFAULT; goto out; } /* * More on the minor hack for backward compatibility */ if (count == (SMK_OLOADLEN)) data[SMK_OLOADLEN] = '-'; rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (rule == NULL) { rc = -ENOMEM; goto out; } if (smk_parse_rule(data, rule, 1)) goto out_free_rule; if (rule_list == NULL) { load = 1; skp = smk_find_entry(rule->smk_subject); rule_list = &skp->smk_rules; rule_lock = &skp->smk_rules_lock; } rc = count; /* * If this is "load" as opposed to "load-self" and a new rule * it needs to get added for reporting. * smk_set_access returns true if there was already a rule * for the subject/object pair, and false if it was new. */ if (load && !smk_set_access(rule, rule_list, rule_lock)) { smlp = kzalloc(sizeof(*smlp), GFP_KERNEL); if (smlp != NULL) { smlp->smk_rule = rule; list_add_rcu(&smlp->list, &smack_rule_list); } else rc = -ENOMEM; goto out; } out_free_rule: kfree(rule); out: kfree(data); return rc; } /* * Core logic for smackfs seq list operations. */ static void *smk_seq_start(struct seq_file *s, loff_t *pos, struct list_head *head) { struct list_head *list; /* * This is 0 the first time through. */ if (s->index == 0) s->private = head; if (s->private == NULL) return NULL; list = s->private; if (list_empty(list)) return NULL; if (s->index == 0) return list->next; return list; } static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos, struct list_head *head) { struct list_head *list = v; if (list_is_last(list, head)) { s->private = NULL; return NULL; } s->private = list->next; return list->next; } static void smk_seq_stop(struct seq_file *s, void *v) { /* No-op */ } /* * Seq_file read operations for /smack/load */ static void *load_seq_start(struct seq_file *s, loff_t *pos) { return smk_seq_start(s, pos, &smack_rule_list); } static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos) { return smk_seq_next(s, v, pos, &smack_rule_list); } static int load_seq_show(struct seq_file *s, void *v) { struct list_head *list = v; struct smack_master_list *smlp = list_entry(list, struct smack_master_list, list); struct smack_rule *srp = smlp->smk_rule; seq_printf(s, "%s %s", (char *)srp->smk_subject, (char *)srp->smk_object); seq_putc(s, ' '); if (srp->smk_access & MAY_READ) seq_putc(s, 'r'); if (srp->smk_access & MAY_WRITE) seq_putc(s, 'w'); if (srp->smk_access & MAY_EXEC) seq_putc(s, 'x'); if (srp->smk_access & MAY_APPEND) seq_putc(s, 'a'); if (srp->smk_access & MAY_TRANSMUTE) seq_putc(s, 't'); if (srp->smk_access == 0) seq_putc(s, '-'); seq_putc(s, '\n'); return 0; } static const struct seq_operations load_seq_ops = { .start = load_seq_start, .next = load_seq_next, .show = load_seq_show, .stop = smk_seq_stop, }; /** * smk_open_load - open() for /smack/load * @inode: inode structure representing file * @file: "load" file pointer * * For reading, use load_seq_* seq_file reading operations. */ static int smk_open_load(struct inode *inode, struct file *file) { return seq_open(file, &load_seq_ops); } /** * smk_write_load - write() for /smack/load * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start - must be 0 * */ static ssize_t smk_write_load(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* * Must have privilege. * No partial writes. * Enough data must be present. */ if (!capable(CAP_MAC_ADMIN)) return -EPERM; return smk_write_load_list(file, buf, count, ppos, NULL, NULL); } static const struct file_operations smk_load_ops = { .open = smk_open_load, .read = seq_read, .llseek = seq_lseek, .write = smk_write_load, .release = seq_release, }; /** * smk_cipso_doi - initialize the CIPSO domain */ static void smk_cipso_doi(void) { int rc; struct cipso_v4_doi *doip; struct netlbl_audit nai; smk_netlabel_audit_set(&nai); rc = netlbl_cfg_map_del(NULL, PF_INET, NULL, NULL, &nai); if (rc != 0) printk(KERN_WARNING "%s:%d remove rc = %d\n", __func__, __LINE__, rc); doip = kmalloc(sizeof(struct cipso_v4_doi), GFP_KERNEL); if (doip == NULL) panic("smack: Failed to initialize cipso DOI.\n"); doip->map.std = NULL; doip->doi = smk_cipso_doi_value; doip->type = CIPSO_V4_MAP_PASS; doip->tags[0] = CIPSO_V4_TAG_RBITMAP; for (rc = 1; rc < CIPSO_V4_TAG_MAXCNT; rc++) doip->tags[rc] = CIPSO_V4_TAG_INVALID; rc = netlbl_cfg_cipsov4_add(doip, &nai); if (rc != 0) { printk(KERN_WARNING "%s:%d cipso add rc = %d\n", __func__, __LINE__, rc); kfree(doip); return; } rc = netlbl_cfg_cipsov4_map_add(doip->doi, NULL, NULL, NULL, &nai); if (rc != 0) { printk(KERN_WARNING "%s:%d map add rc = %d\n", __func__, __LINE__, rc); kfree(doip); return; } } /** * smk_unlbl_ambient - initialize the unlabeled domain * @oldambient: previous domain string */ static void smk_unlbl_ambient(char *oldambient) { int rc; struct netlbl_audit nai; smk_netlabel_audit_set(&nai); if (oldambient != NULL) { rc = netlbl_cfg_map_del(oldambient, PF_INET, NULL, NULL, &nai); if (rc != 0) printk(KERN_WARNING "%s:%d remove rc = %d\n", __func__, __LINE__, rc); } rc = netlbl_cfg_unlbl_map_add(smack_net_ambient, PF_INET, NULL, NULL, &nai); if (rc != 0) printk(KERN_WARNING "%s:%d add rc = %d\n", __func__, __LINE__, rc); } /* * Seq_file read operations for /smack/cipso */ static void *cipso_seq_start(struct seq_file *s, loff_t *pos) { return smk_seq_start(s, pos, &smack_known_list); } static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos) { return smk_seq_next(s, v, pos, &smack_known_list); } /* * Print cipso labels in format: * label level[/cat[,cat]] */ static int cipso_seq_show(struct seq_file *s, void *v) { struct list_head *list = v; struct smack_known *skp = list_entry(list, struct smack_known, list); struct smack_cipso *scp = skp->smk_cipso; char *cbp; char sep = '/'; int cat = 1; int i; unsigned char m; if (scp == NULL) return 0; seq_printf(s, "%s %3d", (char *)&skp->smk_known, scp->smk_level); cbp = scp->smk_catset; for (i = 0; i < SMK_LABELLEN; i++) for (m = 0x80; m != 0; m >>= 1) { if (m & cbp[i]) { seq_printf(s, "%c%d", sep, cat); sep = ','; } cat++; } seq_putc(s, '\n'); return 0; } static const struct seq_operations cipso_seq_ops = { .start = cipso_seq_start, .next = cipso_seq_next, .show = cipso_seq_show, .stop = smk_seq_stop, }; /** * smk_open_cipso - open() for /smack/cipso * @inode: inode structure representing file * @file: "cipso" file pointer * * Connect our cipso_seq_* operations with /smack/cipso * file_operations */ static int smk_open_cipso(struct inode *inode, struct file *file) { return seq_open(file, &cipso_seq_ops); } /** * smk_write_cipso - write() for /smack/cipso * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start * * Accepts only one cipso rule per write call. * Returns number of bytes written or error code, as appropriate */ static ssize_t smk_write_cipso(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct smack_known *skp; struct smack_cipso *scp = NULL; char mapcatset[SMK_LABELLEN]; int maplevel; int cat; int catlen; ssize_t rc = -EINVAL; char *data = NULL; char *rule; int ret; int i; /* * Must have privilege. * No partial writes. * Enough data must be present. */ if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (*ppos != 0) return -EINVAL; if (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX) return -EINVAL; data = kzalloc(count + 1, GFP_KERNEL); if (data == NULL) return -ENOMEM; if (copy_from_user(data, buf, count) != 0) { rc = -EFAULT; goto unlockedout; } /* labels cannot begin with a '-' */ if (data[0] == '-') { rc = -EINVAL; goto unlockedout; } data[count] = '\0'; rule = data; /* * Only allow one writer at a time. Writes should be * quite rare and small in any case. */ mutex_lock(&smack_cipso_lock); skp = smk_import_entry(rule, 0); if (skp == NULL) goto out; rule += SMK_LABELLEN; ret = sscanf(rule, "%d", &maplevel); if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; rule += SMK_DIGITLEN; ret = sscanf(rule, "%d", &catlen); if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) goto out; if (count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN)) goto out; memset(mapcatset, 0, sizeof(mapcatset)); for (i = 0; i < catlen; i++) { rule += SMK_DIGITLEN; ret = sscanf(rule, "%d", &cat); if (ret != 1 || cat > SMACK_CIPSO_MAXCATVAL) goto out; smack_catset_bit(cat, mapcatset); } if (skp->smk_cipso == NULL) { scp = kzalloc(sizeof(struct smack_cipso), GFP_KERNEL); if (scp == NULL) { rc = -ENOMEM; goto out; } } spin_lock_bh(&skp->smk_cipsolock); if (scp == NULL) scp = skp->smk_cipso; else skp->smk_cipso = scp; scp->smk_level = maplevel; memcpy(scp->smk_catset, mapcatset, sizeof(mapcatset)); spin_unlock_bh(&skp->smk_cipsolock); rc = count; out: mutex_unlock(&smack_cipso_lock); unlockedout: kfree(data); return rc; } static const struct file_operations smk_cipso_ops = { .open = smk_open_cipso, .read = seq_read, .llseek = seq_lseek, .write = smk_write_cipso, .release = seq_release, }; /* * Seq_file read operations for /smack/netlabel */ static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos) { return smk_seq_start(s, pos, &smk_netlbladdr_list); } static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos) { return smk_seq_next(s, v, pos, &smk_netlbladdr_list); } #define BEBITS (sizeof(__be32) * 8) /* * Print host/label pairs */ static int netlbladdr_seq_show(struct seq_file *s, void *v) { struct list_head *list = v; struct smk_netlbladdr *skp = list_entry(list, struct smk_netlbladdr, list); unsigned char *hp = (char *) &skp->smk_host.sin_addr.s_addr; int maskn; u32 temp_mask = be32_to_cpu(skp->smk_mask.s_addr); for (maskn = 0; temp_mask; temp_mask <<= 1, maskn++); seq_printf(s, "%u.%u.%u.%u/%d %s\n", hp[0], hp[1], hp[2], hp[3], maskn, skp->smk_label); return 0; } static const struct seq_operations netlbladdr_seq_ops = { .start = netlbladdr_seq_start, .next = netlbladdr_seq_next, .show = netlbladdr_seq_show, .stop = smk_seq_stop, }; /** * smk_open_netlbladdr - open() for /smack/netlabel * @inode: inode structure representing file * @file: "netlabel" file pointer * * Connect our netlbladdr_seq_* operations with /smack/netlabel * file_operations */ static int smk_open_netlbladdr(struct inode *inode, struct file *file) { return seq_open(file, &netlbladdr_seq_ops); } /** * smk_netlbladdr_insert * @new : netlabel to insert * * This helper insert netlabel in the smack_netlbladdrs list * sorted by netmask length (longest to smallest) * locked by &smk_netlbladdr_lock in smk_write_netlbladdr * */ static void smk_netlbladdr_insert(struct smk_netlbladdr *new) { struct smk_netlbladdr *m, *m_next; if (list_empty(&smk_netlbladdr_list)) { list_add_rcu(&new->list, &smk_netlbladdr_list); return; } m = list_entry_rcu(smk_netlbladdr_list.next, struct smk_netlbladdr, list); /* the comparison '>' is a bit hacky, but works */ if (new->smk_mask.s_addr > m->smk_mask.s_addr) { list_add_rcu(&new->list, &smk_netlbladdr_list); return; } list_for_each_entry_rcu(m, &smk_netlbladdr_list, list) { if (list_is_last(&m->list, &smk_netlbladdr_list)) { list_add_rcu(&new->list, &m->list); return; } m_next = list_entry_rcu(m->list.next, struct smk_netlbladdr, list); if (new->smk_mask.s_addr > m_next->smk_mask.s_addr) { list_add_rcu(&new->list, &m->list); return; } } } /** * smk_write_netlbladdr - write() for /smack/netlabel * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start * * Accepts only one netlbladdr per write call. * Returns number of bytes written or error code, as appropriate */ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct smk_netlbladdr *skp; struct sockaddr_in newname; char smack[SMK_LABELLEN]; char *sp; char data[SMK_NETLBLADDRMAX + 1]; char *host = (char *)&newname.sin_addr.s_addr; int rc; struct netlbl_audit audit_info; struct in_addr mask; unsigned int m; int found; u32 mask_bits = (1<<31); __be32 nsa; u32 temp_mask; /* * Must have privilege. * No partial writes. * Enough data must be present. * "<addr/mask, as a.b.c.d/e><space><label>" * "<addr, as a.b.c.d><space><label>" */ if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (*ppos != 0) return -EINVAL; if (count < SMK_NETLBLADDRMIN || count > SMK_NETLBLADDRMAX) return -EINVAL; if (copy_from_user(data, buf, count) != 0) return -EFAULT; data[count] = '\0'; rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%d %s", &host[0], &host[1], &host[2], &host[3], &m, smack); if (rc != 6) { rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s", &host[0], &host[1], &host[2], &host[3], smack); if (rc != 5) return -EINVAL; m = BEBITS; } if (m > BEBITS) return -EINVAL; /* if smack begins with '-', its an option, don't import it */ if (smack[0] != '-') { sp = smk_import(smack, 0); if (sp == NULL) return -EINVAL; } else { /* check known options */ if (strcmp(smack, smack_cipso_option) == 0) sp = (char *)smack_cipso_option; else return -EINVAL; } for (temp_mask = 0; m > 0; m--) { temp_mask |= mask_bits; mask_bits >>= 1; } mask.s_addr = cpu_to_be32(temp_mask); newname.sin_addr.s_addr &= mask.s_addr; /* * Only allow one writer at a time. Writes should be * quite rare and small in any case. */ mutex_lock(&smk_netlbladdr_lock); nsa = newname.sin_addr.s_addr; /* try to find if the prefix is already in the list */ found = 0; list_for_each_entry_rcu(skp, &smk_netlbladdr_list, list) { if (skp->smk_host.sin_addr.s_addr == nsa && skp->smk_mask.s_addr == mask.s_addr) { found = 1; break; } } smk_netlabel_audit_set(&audit_info); if (found == 0) { skp = kzalloc(sizeof(*skp), GFP_KERNEL); if (skp == NULL) rc = -ENOMEM; else { rc = 0; skp->smk_host.sin_addr.s_addr = newname.sin_addr.s_addr; skp->smk_mask.s_addr = mask.s_addr; skp->smk_label = sp; smk_netlbladdr_insert(skp); } } else { /* we delete the unlabeled entry, only if the previous label * wasn't the special CIPSO option */ if (skp->smk_label != smack_cipso_option) rc = netlbl_cfg_unlbl_static_del(&init_net, NULL, &skp->smk_host.sin_addr, &skp->smk_mask, PF_INET, &audit_info); else rc = 0; skp->smk_label = sp; } /* * Now tell netlabel about the single label nature of * this host so that incoming packets get labeled. * but only if we didn't get the special CIPSO option */ if (rc == 0 && sp != smack_cipso_option) rc = netlbl_cfg_unlbl_static_add(&init_net, NULL, &skp->smk_host.sin_addr, &skp->smk_mask, PF_INET, smack_to_secid(skp->smk_label), &audit_info); if (rc == 0) rc = count; mutex_unlock(&smk_netlbladdr_lock); return rc; } static const struct file_operations smk_netlbladdr_ops = { .open = smk_open_netlbladdr, .read = seq_read, .llseek = seq_lseek, .write = smk_write_netlbladdr, .release = seq_release, }; /** * smk_read_doi - read() for /smack/doi * @filp: file pointer, not actually used * @buf: where to put the result * @count: maximum to send along * @ppos: where to start * * Returns number of bytes read or error code, as appropriate */ static ssize_t smk_read_doi(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char temp[80]; ssize_t rc; if (*ppos != 0) return 0; sprintf(temp, "%d", smk_cipso_doi_value); rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); return rc; } /** * smk_write_doi - write() for /smack/doi * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start * * Returns number of bytes written or error code, as appropriate */ static ssize_t smk_write_doi(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char temp[80]; int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (count >= sizeof(temp) || count == 0) return -EINVAL; if (copy_from_user(temp, buf, count) != 0) return -EFAULT; temp[count] = '\0'; if (sscanf(temp, "%d", &i) != 1) return -EINVAL; smk_cipso_doi_value = i; smk_cipso_doi(); return count; } static const struct file_operations smk_doi_ops = { .read = smk_read_doi, .write = smk_write_doi, .llseek = default_llseek, }; /** * smk_read_direct - read() for /smack/direct * @filp: file pointer, not actually used * @buf: where to put the result * @count: maximum to send along * @ppos: where to start * * Returns number of bytes read or error code, as appropriate */ static ssize_t smk_read_direct(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char temp[80]; ssize_t rc; if (*ppos != 0) return 0; sprintf(temp, "%d", smack_cipso_direct); rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); return rc; } /** * smk_write_direct - write() for /smack/direct * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start * * Returns number of bytes written or error code, as appropriate */ static ssize_t smk_write_direct(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char temp[80]; int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (count >= sizeof(temp) || count == 0) return -EINVAL; if (copy_from_user(temp, buf, count) != 0) return -EFAULT; temp[count] = '\0'; if (sscanf(temp, "%d", &i) != 1) return -EINVAL; smack_cipso_direct = i; return count; } static const struct file_operations smk_direct_ops = { .read = smk_read_direct, .write = smk_write_direct, .llseek = default_llseek, }; /** * smk_read_ambient - read() for /smack/ambient * @filp: file pointer, not actually used * @buf: where to put the result * @cn: maximum to send along * @ppos: where to start * * Returns number of bytes read or error code, as appropriate */ static ssize_t smk_read_ambient(struct file *filp, char __user *buf, size_t cn, loff_t *ppos) { ssize_t rc; int asize; if (*ppos != 0) return 0; /* * Being careful to avoid a problem in the case where * smack_net_ambient gets changed in midstream. */ mutex_lock(&smack_ambient_lock); asize = strlen(smack_net_ambient) + 1; if (cn >= asize) rc = simple_read_from_buffer(buf, cn, ppos, smack_net_ambient, asize); else rc = -EINVAL; mutex_unlock(&smack_ambient_lock); return rc; } /** * smk_write_ambient - write() for /smack/ambient * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start * * Returns number of bytes written or error code, as appropriate */ static ssize_t smk_write_ambient(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char in[SMK_LABELLEN]; char *oldambient; char *smack; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (count >= SMK_LABELLEN) return -EINVAL; if (copy_from_user(in, buf, count) != 0) return -EFAULT; smack = smk_import(in, count); if (smack == NULL) return -EINVAL; mutex_lock(&smack_ambient_lock); oldambient = smack_net_ambient; smack_net_ambient = smack; smk_unlbl_ambient(oldambient); mutex_unlock(&smack_ambient_lock); return count; } static const struct file_operations smk_ambient_ops = { .read = smk_read_ambient, .write = smk_write_ambient, .llseek = default_llseek, }; /** * smk_read_onlycap - read() for /smack/onlycap * @filp: file pointer, not actually used * @buf: where to put the result * @cn: maximum to send along * @ppos: where to start * * Returns number of bytes read or error code, as appropriate */ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf, size_t cn, loff_t *ppos) { char *smack = ""; ssize_t rc = -EINVAL; int asize; if (*ppos != 0) return 0; if (smack_onlycap != NULL) smack = smack_onlycap; asize = strlen(smack) + 1; if (cn >= asize) rc = simple_read_from_buffer(buf, cn, ppos, smack, asize); return rc; } /** * smk_write_onlycap - write() for /smack/onlycap * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start * * Returns number of bytes written or error code, as appropriate */ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char in[SMK_LABELLEN]; char *sp = smk_of_task(current->cred->security); if (!capable(CAP_MAC_ADMIN)) return -EPERM; /* * This can be done using smk_access() but is done * explicitly for clarity. The smk_access() implementation * would use smk_access(smack_onlycap, MAY_WRITE) */ if (smack_onlycap != NULL && smack_onlycap != sp) return -EPERM; if (count >= SMK_LABELLEN) return -EINVAL; if (copy_from_user(in, buf, count) != 0) return -EFAULT; /* * Should the null string be passed in unset the onlycap value. * This seems like something to be careful with as usually * smk_import only expects to return NULL for errors. It * is usually the case that a nullstring or "\n" would be * bad to pass to smk_import but in fact this is useful here. */ smack_onlycap = smk_import(in, count); return count; } static const struct file_operations smk_onlycap_ops = { .read = smk_read_onlycap, .write = smk_write_onlycap, .llseek = default_llseek, }; /** * smk_read_logging - read() for /smack/logging * @filp: file pointer, not actually used * @buf: where to put the result * @cn: maximum to send along * @ppos: where to start * * Returns number of bytes read or error code, as appropriate */ static ssize_t smk_read_logging(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char temp[32]; ssize_t rc; if (*ppos != 0) return 0; sprintf(temp, "%d\n", log_policy); rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); return rc; } /** * smk_write_logging - write() for /smack/logging * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start * * Returns number of bytes written or error code, as appropriate */ static ssize_t smk_write_logging(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char temp[32]; int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (count >= sizeof(temp) || count == 0) return -EINVAL; if (copy_from_user(temp, buf, count) != 0) return -EFAULT; temp[count] = '\0'; if (sscanf(temp, "%d", &i) != 1) return -EINVAL; if (i < 0 || i > 3) return -EINVAL; log_policy = i; return count; } static const struct file_operations smk_logging_ops = { .read = smk_read_logging, .write = smk_write_logging, .llseek = default_llseek, }; /* * Seq_file read operations for /smack/load-self */ static void *load_self_seq_start(struct seq_file *s, loff_t *pos) { struct task_smack *tsp = current_security(); return smk_seq_start(s, pos, &tsp->smk_rules); } static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct task_smack *tsp = current_security(); return smk_seq_next(s, v, pos, &tsp->smk_rules); } static int load_self_seq_show(struct seq_file *s, void *v) { struct list_head *list = v; struct smack_rule *srp = list_entry(list, struct smack_rule, list); seq_printf(s, "%s %s", (char *)srp->smk_subject, (char *)srp->smk_object); seq_putc(s, ' '); if (srp->smk_access & MAY_READ) seq_putc(s, 'r'); if (srp->smk_access & MAY_WRITE) seq_putc(s, 'w'); if (srp->smk_access & MAY_EXEC) seq_putc(s, 'x'); if (srp->smk_access & MAY_APPEND) seq_putc(s, 'a'); if (srp->smk_access & MAY_TRANSMUTE) seq_putc(s, 't'); if (srp->smk_access == 0) seq_putc(s, '-'); seq_putc(s, '\n'); return 0; } static const struct seq_operations load_self_seq_ops = { .start = load_self_seq_start, .next = load_self_seq_next, .show = load_self_seq_show, .stop = smk_seq_stop, }; /** * smk_open_load_self - open() for /smack/load-self * @inode: inode structure representing file * @file: "load" file pointer * * For reading, use load_seq_* seq_file reading operations. */ static int smk_open_load_self(struct inode *inode, struct file *file) { return seq_open(file, &load_self_seq_ops); } /** * smk_write_load_self - write() for /smack/load-self * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start - must be 0 * */ static ssize_t smk_write_load_self(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_smack *tsp = current_security(); return smk_write_load_list(file, buf, count, ppos, &tsp->smk_rules, &tsp->smk_rules_lock); } static const struct file_operations smk_load_self_ops = { .open = smk_open_load_self, .read = seq_read, .llseek = seq_lseek, .write = smk_write_load_self, .release = seq_release, }; /** * smk_write_access - handle access check transaction * @file: file pointer * @buf: data from user space * @count: bytes sent * @ppos: where to start - must be 0 */ static ssize_t smk_write_access(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct smack_rule rule; char *data; int res; data = simple_transaction_get(file, buf, count); if (IS_ERR(data)) return PTR_ERR(data); if (count < SMK_LOADLEN || smk_parse_rule(data, &rule, 0)) return -EINVAL; res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access, NULL); data[0] = res == 0 ? '1' : '0'; data[1] = '\0'; simple_transaction_set(file, 2); return SMK_LOADLEN; } static const struct file_operations smk_access_ops = { .write = smk_write_access, .read = simple_transaction_read, .release = simple_transaction_release, .llseek = generic_file_llseek, }; /** * smk_fill_super - fill the /smackfs superblock * @sb: the empty superblock * @data: unused * @silent: unused * * Fill in the well known entries for /smack * * Returns 0 on success, an error code on failure */ static int smk_fill_super(struct super_block *sb, void *data, int silent) { int rc; struct inode *root_inode; static struct tree_descr smack_files[] = { [SMK_LOAD] = { "load", &smk_load_ops, S_IRUGO|S_IWUSR}, [SMK_CIPSO] = { "cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR}, [SMK_DOI] = { "doi", &smk_doi_ops, S_IRUGO|S_IWUSR}, [SMK_DIRECT] = { "direct", &smk_direct_ops, S_IRUGO|S_IWUSR}, [SMK_AMBIENT] = { "ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR}, [SMK_NETLBLADDR] = { "netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR}, [SMK_ONLYCAP] = { "onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR}, [SMK_LOGGING] = { "logging", &smk_logging_ops, S_IRUGO|S_IWUSR}, [SMK_LOAD_SELF] = { "load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO}, [SMK_ACCESSES] = { "access", &smk_access_ops, S_IRUGO|S_IWUGO}, /* last one */ {""} }; rc = simple_fill_super(sb, SMACK_MAGIC, smack_files); if (rc != 0) { printk(KERN_ERR "%s failed %d while creating inodes\n", __func__, rc); return rc; } root_inode = sb->s_root->d_inode; root_inode->i_security = new_inode_smack(smack_known_floor.smk_known); return 0; } /** * smk_mount - get the smackfs superblock * @fs_type: passed along without comment * @flags: passed along without comment * @dev_name: passed along without comment * @data: passed along without comment * * Just passes everything along. * * Returns what the lower level code does. */ static struct dentry *smk_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, smk_fill_super); } static struct file_system_type smk_fs_type = { .name = "smackfs", .mount = smk_mount, .kill_sb = kill_litter_super, }; static struct vfsmount *smackfs_mount; /** * init_smk_fs - get the smackfs superblock * * register the smackfs * * Do not register smackfs if Smack wasn't enabled * on boot. We can not put this method normally under the * smack_init() code path since the security subsystem get * initialized before the vfs caches. * * Returns true if we were not chosen on boot or if * we were chosen and filesystem registration succeeded. */ static int __init init_smk_fs(void) { int err; if (!security_module_enable(&smack_ops)) return 0; err = register_filesystem(&smk_fs_type); if (!err) { smackfs_mount = kern_mount(&smk_fs_type); if (IS_ERR(smackfs_mount)) { printk(KERN_ERR "smackfs: could not mount!\n"); err = PTR_ERR(smackfs_mount); smackfs_mount = NULL; } } smk_cipso_doi(); smk_unlbl_ambient(NULL); return err; } __initcall(init_smk_fs);
gpl-2.0
kennethlyn/enclustra_zynq_linux
arch/ia64/hp/common/aml_nfw.c
4781
5604
/* * OpRegion handler to allow AML to call native firmware * * (c) Copyright 2007 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver implements HP Open Source Review Board proposal 1842, * which was approved on 9/20/2006. * * For technical documentation, see the HP SPPA Firmware EAS, Appendix F. * * ACPI does not define a mechanism for AML methods to call native firmware * interfaces such as PAL or SAL. This OpRegion handler adds such a mechanism. * After the handler is installed, an AML method can call native firmware by * storing the arguments and firmware entry point to specific offsets in the * OpRegion. When AML reads the "return value" offset from the OpRegion, this * handler loads up the arguments, makes the firmware call, and returns the * result. */ #include <linux/module.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <asm/sal.h> MODULE_AUTHOR("Bjorn Helgaas <bjorn.helgaas@hp.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ACPI opregion handler for native firmware calls"); static bool force_register; module_param_named(force, force_register, bool, 0); MODULE_PARM_DESC(force, "Install opregion handler even without HPQ5001 device"); #define AML_NFW_SPACE 0xA1 struct ia64_pdesc { void *ip; void *gp; }; /* * N.B. The layout of this structure is defined in the HP SPPA FW EAS, and * the member offsets are embedded in AML methods. */ struct ia64_nfw_context { u64 arg[8]; struct ia64_sal_retval ret; u64 ip; u64 gp; u64 pad[2]; }; static void *virt_map(u64 address) { if (address & (1UL << 63)) return (void *) (__IA64_UNCACHED_OFFSET | address); return __va(address); } static void aml_nfw_execute(struct ia64_nfw_context *c) { struct ia64_pdesc virt_entry; ia64_sal_handler entry; virt_entry.ip = virt_map(c->ip); virt_entry.gp = virt_map(c->gp); entry = (ia64_sal_handler) &virt_entry; IA64_FW_CALL(entry, c->ret, c->arg[0], c->arg[1], c->arg[2], c->arg[3], c->arg[4], c->arg[5], c->arg[6], c->arg[7]); } static void aml_nfw_read_arg(u8 *offset, u32 bit_width, u64 *value) { switch (bit_width) { case 8: *value = *(u8 *)offset; break; case 16: *value = *(u16 *)offset; break; case 32: *value = *(u32 *)offset; break; case 64: *value = *(u64 *)offset; break; } } static void aml_nfw_write_arg(u8 *offset, u32 bit_width, u64 *value) { switch (bit_width) { case 8: *(u8 *) offset = *value; break; case 16: *(u16 *) offset = *value; break; case 32: *(u32 *) offset = *value; break; case 64: *(u64 *) offset = *value; break; } } static acpi_status aml_nfw_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { struct ia64_nfw_context *context = handler_context; u8 *offset = (u8 *) context + address; if (bit_width != 8 && bit_width != 16 && bit_width != 32 && bit_width != 64) return AE_BAD_PARAMETER; if (address + (bit_width >> 3) > sizeof(struct ia64_nfw_context)) return AE_BAD_PARAMETER; switch (function) { case ACPI_READ: if (address == offsetof(struct ia64_nfw_context, ret)) aml_nfw_execute(context); aml_nfw_read_arg(offset, bit_width, value); break; case ACPI_WRITE: aml_nfw_write_arg(offset, bit_width, value); break; } return AE_OK; } static struct ia64_nfw_context global_context; static int global_handler_registered; static int aml_nfw_add_global_handler(void) { acpi_status status; if (global_handler_registered) return 0; status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, AML_NFW_SPACE, aml_nfw_handler, NULL, &global_context); if (ACPI_FAILURE(status)) return -ENODEV; global_handler_registered = 1; printk(KERN_INFO "Global 0x%02X opregion handler registered\n", AML_NFW_SPACE); return 0; } static int aml_nfw_remove_global_handler(void) { acpi_status status; if (!global_handler_registered) return 0; status = acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, AML_NFW_SPACE, aml_nfw_handler); if (ACPI_FAILURE(status)) return -ENODEV; global_handler_registered = 0; printk(KERN_INFO "Global 0x%02X opregion handler removed\n", AML_NFW_SPACE); return 0; } static int aml_nfw_add(struct acpi_device *device) { /* * We would normally allocate a new context structure and install * the address space handler for the specific device we found. * But the HP-UX implementation shares a single global context * and always puts the handler at the root, so we'll do the same. */ return aml_nfw_add_global_handler(); } static int aml_nfw_remove(struct acpi_device *device, int type) { return aml_nfw_remove_global_handler(); } static const struct acpi_device_id aml_nfw_ids[] = { {"HPQ5001", 0}, {"", 0} }; static struct acpi_driver acpi_aml_nfw_driver = { .name = "native firmware", .ids = aml_nfw_ids, .ops = { .add = aml_nfw_add, .remove = aml_nfw_remove, }, }; static int __init aml_nfw_init(void) { int result; if (force_register) aml_nfw_add_global_handler(); result = acpi_bus_register_driver(&acpi_aml_nfw_driver); if (result < 0) { aml_nfw_remove_global_handler(); return result; } return 0; } static void __exit aml_nfw_exit(void) { acpi_bus_unregister_driver(&acpi_aml_nfw_driver); aml_nfw_remove_global_handler(); } module_init(aml_nfw_init); module_exit(aml_nfw_exit);
gpl-2.0
byzvulture/android_kernel_zte_nx503a
arch/mips/alchemy/board-gpr.c
4781
6505
/* * GPR board platform device registration (Au1550) * * Copyright (C) 2010 Wolfgang Grandegger <wg@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/mach-au1x00/au1000.h> #include <prom.h> const char *get_system_type(void) { return "GPR"; } void __init prom_init(void) { unsigned char *memsize_str; unsigned long memsize; prom_argc = fw_arg0; prom_argv = (char **)fw_arg1; prom_envp = (char **)fw_arg2; prom_init_cmdline(); memsize_str = prom_getenv("memsize"); if (!memsize_str) memsize = 0x04000000; else strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); } void prom_putchar(unsigned char c) { alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } static void gpr_reset(char *c) { /* switch System-LED to orange (red# and green# on) */ alchemy_gpio_direction_output(4, 0); alchemy_gpio_direction_output(5, 0); /* trigger watchdog to reset board in 200ms */ printk(KERN_EMERG "Triggering watchdog soft reset...\n"); raw_local_irq_disable(); alchemy_gpio_direction_output(1, 0); udelay(1); alchemy_gpio_set_value(1, 1); while (1) cpu_wait(); } static void gpr_power_off(void) { while (1) cpu_wait(); } void __init board_setup(void) { printk(KERN_INFO "Trapeze ITS GPR board\n"); pm_power_off = gpr_power_off; _machine_halt = gpr_power_off; _machine_restart = gpr_reset; /* Enable UART1/3 */ alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); alchemy_uart_enable(AU1000_UART1_PHYS_ADDR); /* Take away Reset of UMTS-card */ alchemy_gpio_direction_output(215, 1); } /* * Watchdog */ static struct resource gpr_wdt_resource[] = { [0] = { .start = 1, .end = 1, .name = "gpr-adm6320-wdt", .flags = IORESOURCE_IRQ, } }; static struct platform_device gpr_wdt_device = { .name = "adm6320-wdt", .id = 0, .num_resources = ARRAY_SIZE(gpr_wdt_resource), .resource = gpr_wdt_resource, }; /* * FLASH * * 0x00000000-0x00200000 : "kernel" * 0x00200000-0x00a00000 : "rootfs" * 0x01d00000-0x01f00000 : "config" * 0x01c00000-0x01d00000 : "yamon" * 0x01d00000-0x01d40000 : "yamon env vars" * 0x00000000-0x00a00000 : "kernel+rootfs" */ static struct mtd_partition gpr_mtd_partitions[] = { { .name = "kernel", .size = 0x00200000, .offset = 0, }, { .name = "rootfs", .size = 0x00800000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "config", .size = 0x00200000, .offset = 0x01d00000, }, { .name = "yamon", .size = 0x00100000, .offset = 0x01c00000, }, { .name = "yamon env vars", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, }, { .name = "kernel+rootfs", .size = 0x00a00000, .offset = 0, }, }; static struct physmap_flash_data gpr_flash_data = { .width = 4, .nr_parts = ARRAY_SIZE(gpr_mtd_partitions), .parts = gpr_mtd_partitions, }; static struct resource gpr_mtd_resource = { .start = 0x1e000000, .end = 0x1fffffff, .flags = IORESOURCE_MEM, }; static struct platform_device gpr_mtd_device = { .name = "physmap-flash", .dev = { .platform_data = &gpr_flash_data, }, .num_resources = 1, .resource = &gpr_mtd_resource, }; /* * LEDs */ static struct gpio_led gpr_gpio_leds[] = { { /* green */ .name = "gpr:green", .gpio = 4, .active_low = 1, }, { /* red */ .name = "gpr:red", .gpio = 5, .active_low = 1, } }; static struct gpio_led_platform_data gpr_led_data = { .num_leds = ARRAY_SIZE(gpr_gpio_leds), .leds = gpr_gpio_leds, }; static struct platform_device gpr_led_devices = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpr_led_data, } }; /* * I2C */ static struct i2c_gpio_platform_data gpr_i2c_data = { .sda_pin = 209, .sda_is_open_drain = 1, .scl_pin = 210, .scl_is_open_drain = 1, .udelay = 2, /* ~100 kHz */ .timeout = HZ, }; static struct platform_device gpr_i2c_device = { .name = "i2c-gpio", .id = -1, .dev.platform_data = &gpr_i2c_data, }; static struct i2c_board_info gpr_i2c_info[] __initdata = { { I2C_BOARD_INFO("lm83", 0x18), .type = "lm83" } }; static struct resource alchemy_pci_host_res[] = { [0] = { .start = AU1500_PCI_PHYS_ADDR, .end = AU1500_PCI_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, }; static int gpr_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) { if ((slot == 0) && (pin == 1)) return AU1550_PCI_INTA; else if ((slot == 0) && (pin == 2)) return AU1550_PCI_INTB; return 0xff; } static struct alchemy_pci_platdata gpr_pci_pd = { .board_map_irq = gpr_map_pci_irq, .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | PCI_CONFIG_CH | #if defined(__MIPSEB__) PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, #else 0, #endif }; static struct platform_device gpr_pci_host_dev = { .dev.platform_data = &gpr_pci_pd, .name = "alchemy-pci", .id = 0, .num_resources = ARRAY_SIZE(alchemy_pci_host_res), .resource = alchemy_pci_host_res, }; static struct platform_device *gpr_devices[] __initdata = { &gpr_wdt_device, &gpr_mtd_device, &gpr_i2c_device, &gpr_led_devices, }; static int __init gpr_pci_init(void) { return platform_device_register(&gpr_pci_host_dev); } /* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */ arch_initcall(gpr_pci_init); static int __init gpr_dev_init(void) { i2c_register_board_info(0, gpr_i2c_info, ARRAY_SIZE(gpr_i2c_info)); return platform_add_devices(gpr_devices, ARRAY_SIZE(gpr_devices)); } device_initcall(gpr_dev_init);
gpl-2.0
willizambranoback/evolution_CM13
arch/mips/kernel/cevt-ds1287.c
4781
2853
/* * DS1287 clockevent driver * * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clockchips.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mc146818rtc.h> #include <linux/irq.h> #include <asm/time.h> int ds1287_timer_state(void) { return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0; } int ds1287_set_base_clock(unsigned int hz) { u8 rate; switch (hz) { case 128: rate = 0x9; break; case 256: rate = 0x8; break; case 1024: rate = 0x6; break; default: return -EINVAL; } CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A); return 0; } static int ds1287_set_next_event(unsigned long delta, struct clock_event_device *evt) { return -EINVAL; } static void ds1287_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { u8 val; spin_lock(&rtc_lock); val = CMOS_READ(RTC_REG_B); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: val |= RTC_PIE; break; default: val &= ~RTC_PIE; break; } CMOS_WRITE(val, RTC_REG_B); spin_unlock(&rtc_lock); } static void ds1287_event_handler(struct clock_event_device *dev) { } static struct clock_event_device ds1287_clockevent = { .name = "ds1287", .features = CLOCK_EVT_FEAT_PERIODIC, .set_next_event = ds1287_set_next_event, .set_mode = ds1287_set_mode, .event_handler = ds1287_event_handler, }; static irqreturn_t ds1287_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = &ds1287_clockevent; /* Ack the RTC interrupt. */ CMOS_READ(RTC_REG_C); cd->event_handler(cd); return IRQ_HANDLED; } static struct irqaction ds1287_irqaction = { .handler = ds1287_interrupt, .flags = IRQF_PERCPU | IRQF_TIMER, .name = "ds1287", }; int __init ds1287_clockevent_init(int irq) { struct clock_event_device *cd; cd = &ds1287_clockevent; cd->rating = 100; cd->irq = irq; clockevent_set_clock(cd, 32768); cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); cd->min_delta_ns = clockevent_delta2ns(0x300, cd); cd->cpumask = cpumask_of(0); clockevents_register_device(&ds1287_clockevent); return setup_irq(irq, &ds1287_irqaction); }
gpl-2.0
CyanogenMod/android_kernel_nvidia_shieldtablet
drivers/xen/manage.c
6573
6957
/* * Handle extern requests for shutdown, reboot and sysrq */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/reboot.h> #include <linux/sysrq.h> #include <linux/stop_machine.h> #include <linux/freezer.h> #include <linux/syscore_ops.h> #include <linux/export.h> #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/grant_table.h> #include <xen/events.h> #include <xen/hvc-console.h> #include <xen/xen-ops.h> #include <asm/xen/hypercall.h> #include <asm/xen/page.h> #include <asm/xen/hypervisor.h> enum shutdown_state { SHUTDOWN_INVALID = -1, SHUTDOWN_POWEROFF = 0, SHUTDOWN_SUSPEND = 2, /* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only report a crash, not be instructed to crash! HALT is the same as POWEROFF, as far as we're concerned. The tools use the distinction when we return the reason code to them. */ SHUTDOWN_HALT = 4, }; /* Ignore multiple shutdown requests. */ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; struct suspend_info { int cancelled; unsigned long arg; /* extra hypercall argument */ void (*pre)(void); void (*post)(int cancelled); }; static void xen_hvm_post_suspend(int cancelled) { xen_arch_hvm_post_suspend(cancelled); gnttab_resume(); } static void xen_pre_suspend(void) { xen_mm_pin_all(); gnttab_suspend(); xen_arch_pre_suspend(); } static void xen_post_suspend(int cancelled) { xen_arch_post_suspend(cancelled); gnttab_resume(); xen_mm_unpin_all(); } #ifdef CONFIG_HIBERNATE_CALLBACKS static int xen_suspend(void *data) { struct suspend_info *si = data; int err; BUG_ON(!irqs_disabled()); err = syscore_suspend(); if (err) { printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", err); return err; } if (si->pre) si->pre(); /* * This hypercall returns 1 if suspend was cancelled * or the domain was merely checkpointed, and 0 if it * is resuming in a new domain. */ si->cancelled = HYPERVISOR_suspend(si->arg); if (si->post) si->post(si->cancelled); if (!si->cancelled) { xen_irq_resume(); xen_console_resume(); xen_timer_resume(); } syscore_resume(); return 0; } static void do_suspend(void) { int err; struct suspend_info si; shutting_down = SHUTDOWN_SUSPEND; #ifdef CONFIG_PREEMPT /* If the kernel is preemptible, we need to freeze all the processes to prevent them from being in the middle of a pagetable update during suspend. */ err = freeze_processes(); if (err) { printk(KERN_ERR "xen suspend: freeze failed %d\n", err); goto out; } #endif err = dpm_suspend_start(PMSG_FREEZE); if (err) { printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); goto out_thaw; } printk(KERN_DEBUG "suspending xenstore...\n"); xs_suspend(); err = dpm_suspend_end(PMSG_FREEZE); if (err) { printk(KERN_ERR "dpm_suspend_end failed: %d\n", err); si.cancelled = 0; goto out_resume; } si.cancelled = 1; if (xen_hvm_domain()) { si.arg = 0UL; si.pre = NULL; si.post = &xen_hvm_post_suspend; } else { si.arg = virt_to_mfn(xen_start_info); si.pre = &xen_pre_suspend; si.post = &xen_post_suspend; } err = stop_machine(xen_suspend, &si, cpumask_of(0)); dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); if (err) { printk(KERN_ERR "failed to start xen_suspend: %d\n", err); si.cancelled = 1; } out_resume: if (!si.cancelled) { xen_arch_resume(); xs_resume(); } else xs_suspend_cancel(); dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); /* Make sure timer events get retriggered on all CPUs */ clock_was_set(); out_thaw: #ifdef CONFIG_PREEMPT thaw_processes(); out: #endif shutting_down = SHUTDOWN_INVALID; } #endif /* CONFIG_HIBERNATE_CALLBACKS */ struct shutdown_handler { const char *command; void (*cb)(void); }; static void do_poweroff(void) { shutting_down = SHUTDOWN_POWEROFF; orderly_poweroff(false); } static void do_reboot(void) { shutting_down = SHUTDOWN_POWEROFF; /* ? */ ctrl_alt_del(); } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char *str; struct xenbus_transaction xbt; int err; static struct shutdown_handler handlers[] = { { "poweroff", do_poweroff }, { "halt", do_poweroff }, { "reboot", do_reboot }, #ifdef CONFIG_HIBERNATE_CALLBACKS { "suspend", do_suspend }, #endif {NULL, NULL}, }; static struct shutdown_handler *handler; if (shutting_down != SHUTDOWN_INVALID) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } for (handler = &handlers[0]; handler->command; handler++) { if (strcmp(str, handler->command) == 0) break; } /* Only acknowledge commands which we are prepared to handle. */ if (handler->cb) xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (handler->cb) { handler->cb(); } else { printk(KERN_INFO "Ignoring shutdown request: %s\n", str); shutting_down = SHUTDOWN_INVALID; } kfree(str); } #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; if (sysrq_key != '\0') handle_sysrq(sysrq_key); } static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #endif static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static int setup_shutdown_watcher(void) { int err; err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } #ifdef CONFIG_MAGIC_SYSRQ err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } #endif return 0; } static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } int xen_setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; if (!xen_domain()) return -ENODEV; register_xenstore_notifier(&xenstore_notifier); return 0; } EXPORT_SYMBOL_GPL(xen_setup_shutdown_event); subsys_initcall(xen_setup_shutdown_event);
gpl-2.0
renesas/onekernel
arch/x86/kernel/paravirt_patch_64.c
10669
2303
#include <asm/paravirt.h> #include <asm/asm-offsets.h> #include <linux/stringify.h> DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); DEF_NATIVE(pv_cpu_ops, iret, "iretq"); DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); DEF_NATIVE(pv_cpu_ops, clts, "clts"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit"); DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) { return paravirt_patch_insns(insnbuf, len, start__mov32, end__mov32); } unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) { return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64); } unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) { const unsigned char *start, *end; unsigned ret; #define PATCH_SITE(ops, x) \ case PARAVIRT_PATCH(ops.x): \ start = start_##ops##_##x; \ end = end_##ops##_##x; \ goto patch_site switch(type) { PATCH_SITE(pv_irq_ops, restore_fl); PATCH_SITE(pv_irq_ops, save_fl); PATCH_SITE(pv_irq_ops, irq_enable); PATCH_SITE(pv_irq_ops, irq_disable); PATCH_SITE(pv_cpu_ops, iret); PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); PATCH_SITE(pv_cpu_ops, usergs_sysret32); PATCH_SITE(pv_cpu_ops, usergs_sysret64); PATCH_SITE(pv_cpu_ops, swapgs); PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); PATCH_SITE(pv_cpu_ops, clts); PATCH_SITE(pv_mmu_ops, flush_tlb_single); PATCH_SITE(pv_cpu_ops, wbinvd); patch_site: ret = paravirt_patch_insns(ibuf, len, start, end); break; default: ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); break; } #undef PATCH_SITE return ret; }
gpl-2.0
nnvt/android_kernel_oneplus_msm8974
arch/powerpc/boot/4xx.c
12973
20641
/* * Copyright 2007 David Gibson, IBM Corporation. * * Based on earlier code: * Matt Porter <mporter@kernel.crashing.org> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003, 2004 Zultys Technologies * * Copyright (C) 2009 Wind River Systems, Inc. * Updated for supporting PPC405EX on Kilauea. * Tiejun Chen <tiejun.chen@windriver.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stddef.h> #include "types.h" #include "string.h" #include "stdio.h" #include "ops.h" #include "reg.h" #include "dcr.h" static unsigned long chip_11_errata(unsigned long memsize) { unsigned long pvr; pvr = mfpvr(); switch (pvr & 0xf0000ff0) { case 0x40000850: case 0x400008d0: case 0x200008d0: memsize -= 4096; break; default: break; } return memsize; } /* Read the 4xx SDRAM controller to get size of system memory. */ void ibm4xx_sdram_fixup_memsize(void) { int i; unsigned long memsize, bank_config; memsize = 0; for (i = 0; i < ARRAY_SIZE(sdram_bxcr); i++) { bank_config = SDRAM0_READ(sdram_bxcr[i]); if (bank_config & SDRAM_CONFIG_BANK_ENABLE) memsize += SDRAM_CONFIG_BANK_SIZE(bank_config); } memsize = chip_11_errata(memsize); dt_fixup_memory(0, memsize); } /* Read the 440SPe MQ controller to get size of system memory. */ #define DCRN_MQ0_B0BAS 0x40 #define DCRN_MQ0_B1BAS 0x41 #define DCRN_MQ0_B2BAS 0x42 #define DCRN_MQ0_B3BAS 0x43 static u64 ibm440spe_decode_bas(u32 bas) { u64 base = ((u64)(bas & 0xFFE00000u)) << 2; /* open coded because I'm paranoid about invalid values */ switch ((bas >> 4) & 0xFFF) { case 0: return 0; case 0xffc: return base + 0x000800000ull; case 0xff8: return base + 0x001000000ull; case 0xff0: return base + 0x002000000ull; case 0xfe0: return base + 0x004000000ull; case 0xfc0: return base + 0x008000000ull; case 0xf80: return base + 0x010000000ull; case 0xf00: return base + 0x020000000ull; case 0xe00: return base + 0x040000000ull; case 0xc00: return base + 0x080000000ull; case 0x800: return base + 0x100000000ull; } printf("Memory BAS value 0x%08x unsupported !\n", bas); return 0; } void ibm440spe_fixup_memsize(void) { u64 banktop, memsize = 0; /* Ultimately, we should directly construct the memory node * so we are able to handle holes in the memory address space */ banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B0BAS)); if (banktop > memsize) memsize = banktop; banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B1BAS)); if (banktop > memsize) memsize = banktop; banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B2BAS)); if (banktop > memsize) memsize = banktop; banktop = ibm440spe_decode_bas(mfdcr(DCRN_MQ0_B3BAS)); if (banktop > memsize) memsize = banktop; dt_fixup_memory(0, memsize); } /* 4xx DDR1/2 Denali memory controller support */ /* DDR0 registers */ #define DDR0_02 2 #define DDR0_08 8 #define DDR0_10 10 #define DDR0_14 14 #define DDR0_42 42 #define DDR0_43 43 /* DDR0_02 */ #define DDR_START 0x1 #define DDR_START_SHIFT 0 #define DDR_MAX_CS_REG 0x3 #define DDR_MAX_CS_REG_SHIFT 24 #define DDR_MAX_COL_REG 0xf #define DDR_MAX_COL_REG_SHIFT 16 #define DDR_MAX_ROW_REG 0xf #define DDR_MAX_ROW_REG_SHIFT 8 /* DDR0_08 */ #define DDR_DDR2_MODE 0x1 #define DDR_DDR2_MODE_SHIFT 0 /* DDR0_10 */ #define DDR_CS_MAP 0x3 #define DDR_CS_MAP_SHIFT 8 /* DDR0_14 */ #define DDR_REDUC 0x1 #define DDR_REDUC_SHIFT 16 /* DDR0_42 */ #define DDR_APIN 0x7 #define DDR_APIN_SHIFT 24 /* DDR0_43 */ #define DDR_COL_SZ 0x7 #define DDR_COL_SZ_SHIFT 8 #define DDR_BANK8 0x1 #define DDR_BANK8_SHIFT 0 #define DDR_GET_VAL(val, mask, shift) (((val) >> (shift)) & (mask)) /* * Some U-Boot versions set the number of chipselects to two * for Sequoia/Rainier boards while they only have one chipselect * hardwired. Hardcode the number of chipselects to one * for sequioa/rainer board models or read the actual value * from the memory controller register DDR0_10 otherwise. */ static inline u32 ibm4xx_denali_get_cs(void) { void *devp; char model[64]; u32 val, cs; devp = finddevice("/"); if (!devp) goto read_cs; if (getprop(devp, "model", model, sizeof(model)) <= 0) goto read_cs; model[sizeof(model)-1] = 0; if (!strcmp(model, "amcc,sequoia") || !strcmp(model, "amcc,rainier")) return 1; read_cs: /* get CS value */ val = SDRAM0_READ(DDR0_10); val = DDR_GET_VAL(val, DDR_CS_MAP, DDR_CS_MAP_SHIFT); cs = 0; while (val) { if (val & 0x1) cs++; val = val >> 1; } return cs; } void ibm4xx_denali_fixup_memsize(void) { u32 val, max_cs, max_col, max_row; u32 cs, col, row, bank, dpath; unsigned long memsize; val = SDRAM0_READ(DDR0_02); if (!DDR_GET_VAL(val, DDR_START, DDR_START_SHIFT)) fatal("DDR controller is not initialized\n"); /* get maximum cs col and row values */ max_cs = DDR_GET_VAL(val, DDR_MAX_CS_REG, DDR_MAX_CS_REG_SHIFT); max_col = DDR_GET_VAL(val, DDR_MAX_COL_REG, DDR_MAX_COL_REG_SHIFT); max_row = DDR_GET_VAL(val, DDR_MAX_ROW_REG, DDR_MAX_ROW_REG_SHIFT); cs = ibm4xx_denali_get_cs(); if (!cs) fatal("No memory installed\n"); if (cs > max_cs) fatal("DDR wrong CS configuration\n"); /* get data path bytes */ val = SDRAM0_READ(DDR0_14); if (DDR_GET_VAL(val, DDR_REDUC, DDR_REDUC_SHIFT)) dpath = 4; /* 32 bits */ else dpath = 8; /* 64 bits */ /* get address pins (rows) */ val = SDRAM0_READ(DDR0_42); row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT); if (row > max_row) fatal("DDR wrong APIN configuration\n"); row = max_row - row; /* get collomn size and banks */ val = SDRAM0_READ(DDR0_43); col = DDR_GET_VAL(val, DDR_COL_SZ, DDR_COL_SZ_SHIFT); if (col > max_col) fatal("DDR wrong COL configuration\n"); col = max_col - col; if (DDR_GET_VAL(val, DDR_BANK8, DDR_BANK8_SHIFT)) bank = 8; /* 8 banks */ else bank = 4; /* 4 banks */ memsize = cs * (1 << (col+row)) * bank * dpath; memsize = chip_11_errata(memsize); dt_fixup_memory(0, memsize); } #define SPRN_DBCR0_40X 0x3F2 #define SPRN_DBCR0_44X 0x134 #define DBCR0_RST_SYSTEM 0x30000000 void ibm44x_dbcr_reset(void) { unsigned long tmp; asm volatile ( "mfspr %0,%1\n" "oris %0,%0,%2@h\n" "mtspr %1,%0" : "=&r"(tmp) : "i"(SPRN_DBCR0_44X), "i"(DBCR0_RST_SYSTEM) ); } void ibm40x_dbcr_reset(void) { unsigned long tmp; asm volatile ( "mfspr %0,%1\n" "oris %0,%0,%2@h\n" "mtspr %1,%0" : "=&r"(tmp) : "i"(SPRN_DBCR0_40X), "i"(DBCR0_RST_SYSTEM) ); } #define EMAC_RESET 0x20000000 void ibm4xx_quiesce_eth(u32 *emac0, u32 *emac1) { /* Quiesce the MAL and EMAC(s) since PIBS/OpenBIOS don't * do this for us */ if (emac0) *emac0 = EMAC_RESET; if (emac1) *emac1 = EMAC_RESET; mtdcr(DCRN_MAL0_CFG, MAL_RESET); while (mfdcr(DCRN_MAL0_CFG) & MAL_RESET) ; /* loop until reset takes effect */ } /* Read 4xx EBC bus bridge registers to get mappings of the peripheral * banks into the OPB address space */ void ibm4xx_fixup_ebc_ranges(const char *ebc) { void *devp; u32 bxcr; u32 ranges[EBC_NUM_BANKS*4]; u32 *p = ranges; int i; for (i = 0; i < EBC_NUM_BANKS; i++) { mtdcr(DCRN_EBC0_CFGADDR, EBC_BXCR(i)); bxcr = mfdcr(DCRN_EBC0_CFGDATA); if ((bxcr & EBC_BXCR_BU) != EBC_BXCR_BU_OFF) { *p++ = i; *p++ = 0; *p++ = bxcr & EBC_BXCR_BAS; *p++ = EBC_BXCR_BANK_SIZE(bxcr); } } devp = finddevice(ebc); if (! devp) fatal("Couldn't locate EBC node %s\n\r", ebc); setprop(devp, "ranges", ranges, (p - ranges) * sizeof(u32)); } /* Calculate 440GP clocks */ void ibm440gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk) { u32 sys0 = mfdcr(DCRN_CPC0_SYS0); u32 cr0 = mfdcr(DCRN_CPC0_CR0); u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; u32 opdv = CPC0_SYS0_OPDV(sys0); u32 epdv = CPC0_SYS0_EPDV(sys0); if (sys0 & CPC0_SYS0_BYPASS) { /* Bypass system PLL */ cpu = plb = sys_clk; } else { if (sys0 & CPC0_SYS0_EXTSL) /* PerClk */ m = CPC0_SYS0_FWDVB(sys0) * opdv * epdv; else /* CPU clock */ m = CPC0_SYS0_FBDV(sys0) * CPC0_SYS0_FWDVA(sys0); cpu = sys_clk * m / CPC0_SYS0_FWDVA(sys0); plb = sys_clk * m / CPC0_SYS0_FWDVB(sys0); } opb = plb / opdv; ebc = opb / epdv; /* FIXME: Check if this is for all 440GP, or just Ebony */ if ((mfpvr() & 0xf0000fff) == 0x40000440) /* Rev. B 440GP, use external system clock */ tb = sys_clk; else /* Rev. C 440GP, errata force us to use internal clock */ tb = cpu; if (cr0 & CPC0_CR0_U0EC) /* External UART clock */ uart0 = ser_clk; else /* Internal UART clock */ uart0 = plb / CPC0_CR0_UDIV(cr0); if (cr0 & CPC0_CR0_U1EC) /* External UART clock */ uart1 = ser_clk; else /* Internal UART clock */ uart1 = plb / CPC0_CR0_UDIV(cr0); printf("PPC440GP: SysClk = %dMHz (%x)\n\r", (sys_clk + 500000) / 1000000, sys_clk); dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/opb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@40000200", uart0); dt_fixup_clock("/plb/opb/serial@40000300", uart1); } #define SPRN_CCR1 0x378 static inline u32 __fix_zero(u32 v, u32 def) { return v ? v : def; } static unsigned int __ibm440eplike_fixup_clocks(unsigned int sys_clk, unsigned int tmr_clk, int per_clk_from_opb) { /* PLL config */ u32 pllc = CPR0_READ(DCRN_CPR0_PLLC); u32 plld = CPR0_READ(DCRN_CPR0_PLLD); /* Dividers */ u32 fbdv = __fix_zero((plld >> 24) & 0x1f, 32); u32 fwdva = __fix_zero((plld >> 16) & 0xf, 16); u32 fwdvb = __fix_zero((plld >> 8) & 7, 8); u32 lfbdv = __fix_zero(plld & 0x3f, 64); u32 pradv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PRIMAD) >> 24) & 7, 8); u32 prbdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PRIMBD) >> 24) & 7, 8); u32 opbdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_OPBD) >> 24) & 3, 4); u32 perdv0 = __fix_zero((CPR0_READ(DCRN_CPR0_PERD) >> 24) & 3, 4); /* Input clocks for primary dividers */ u32 clk_a, clk_b; /* Resulting clocks */ u32 cpu, plb, opb, ebc, vco; /* Timebase */ u32 ccr1, tb = tmr_clk; if (pllc & 0x40000000) { u32 m; /* Feedback path */ switch ((pllc >> 24) & 7) { case 0: /* PLLOUTx */ m = ((pllc & 0x20000000) ? fwdvb : fwdva) * lfbdv; break; case 1: /* CPU */ m = fwdva * pradv0; break; case 5: /* PERClk */ m = fwdvb * prbdv0 * opbdv0 * perdv0; break; default: printf("WARNING ! Invalid PLL feedback source !\n"); goto bypass; } m *= fbdv; vco = sys_clk * m; clk_a = vco / fwdva; clk_b = vco / fwdvb; } else { bypass: /* Bypass system PLL */ vco = 0; clk_a = clk_b = sys_clk; } cpu = clk_a / pradv0; plb = clk_b / prbdv0; opb = plb / opbdv0; ebc = (per_clk_from_opb ? opb : plb) / perdv0; /* Figure out timebase. Either CPU or default TmrClk */ ccr1 = mfspr(SPRN_CCR1); /* If passed a 0 tmr_clk, force CPU clock */ if (tb == 0) { ccr1 &= ~0x80u; mtspr(SPRN_CCR1, ccr1); } if ((ccr1 & 0x0080) == 0) tb = cpu; dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/opb/ebc", ebc); return plb; } static void eplike_fixup_uart_clk(int index, const char *path, unsigned int ser_clk, unsigned int plb_clk) { unsigned int sdr; unsigned int clock; switch (index) { case 0: sdr = SDR0_READ(DCRN_SDR0_UART0); break; case 1: sdr = SDR0_READ(DCRN_SDR0_UART1); break; case 2: sdr = SDR0_READ(DCRN_SDR0_UART2); break; case 3: sdr = SDR0_READ(DCRN_SDR0_UART3); break; default: return; } if (sdr & 0x00800000u) clock = ser_clk; else clock = plb_clk / __fix_zero(sdr & 0xff, 256); dt_fixup_clock(path, clock); } void ibm440ep_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk, unsigned int tmr_clk) { unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 0); /* serial clocks need fixup based on int/ext */ eplike_fixup_uart_clk(0, "/plb/opb/serial@ef600300", ser_clk, plb_clk); eplike_fixup_uart_clk(1, "/plb/opb/serial@ef600400", ser_clk, plb_clk); eplike_fixup_uart_clk(2, "/plb/opb/serial@ef600500", ser_clk, plb_clk); eplike_fixup_uart_clk(3, "/plb/opb/serial@ef600600", ser_clk, plb_clk); } void ibm440gx_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk, unsigned int tmr_clk) { unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1); /* serial clocks need fixup based on int/ext */ eplike_fixup_uart_clk(0, "/plb/opb/serial@40000200", ser_clk, plb_clk); eplike_fixup_uart_clk(1, "/plb/opb/serial@40000300", ser_clk, plb_clk); } void ibm440spe_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk, unsigned int tmr_clk) { unsigned int plb_clk = __ibm440eplike_fixup_clocks(sys_clk, tmr_clk, 1); /* serial clocks need fixup based on int/ext */ eplike_fixup_uart_clk(0, "/plb/opb/serial@f0000200", ser_clk, plb_clk); eplike_fixup_uart_clk(1, "/plb/opb/serial@f0000300", ser_clk, plb_clk); eplike_fixup_uart_clk(2, "/plb/opb/serial@f0000600", ser_clk, plb_clk); } void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk) { u32 pllmr = mfdcr(DCRN_CPC0_PLLMR); u32 cpc0_cr0 = mfdcr(DCRN_405_CPC0_CR0); u32 cpc0_cr1 = mfdcr(DCRN_405_CPC0_CR1); u32 psr = mfdcr(DCRN_405_CPC0_PSR); u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; u32 fwdv, fwdvb, fbdv, cbdv, opdv, epdv, ppdv, udiv; fwdv = (8 - ((pllmr & 0xe0000000) >> 29)); fbdv = (pllmr & 0x1e000000) >> 25; if (fbdv == 0) fbdv = 16; cbdv = ((pllmr & 0x00060000) >> 17) + 1; /* CPU:PLB */ opdv = ((pllmr & 0x00018000) >> 15) + 1; /* PLB:OPB */ ppdv = ((pllmr & 0x00001800) >> 13) + 1; /* PLB:PCI */ epdv = ((pllmr & 0x00001800) >> 11) + 2; /* PLB:EBC */ udiv = ((cpc0_cr0 & 0x3e) >> 1) + 1; /* check for 405GPr */ if ((mfpvr() & 0xfffffff0) == (0x50910951 & 0xfffffff0)) { fwdvb = 8 - (pllmr & 0x00000007); if (!(psr & 0x00001000)) /* PCI async mode enable == 0 */ if (psr & 0x00000020) /* New mode enable */ m = fwdvb * 2 * ppdv; else m = fwdvb * cbdv * ppdv; else if (psr & 0x00000020) /* New mode enable */ if (psr & 0x00000800) /* PerClk synch mode */ m = fwdvb * 2 * epdv; else m = fbdv * fwdv; else if (epdv == fbdv) m = fbdv * cbdv * epdv; else m = fbdv * fwdvb * cbdv; cpu = sys_clk * m / fwdv; plb = sys_clk * m / (fwdvb * cbdv); } else { m = fwdv * fbdv * cbdv; cpu = sys_clk * m / fwdv; plb = cpu / cbdv; } opb = plb / opdv; ebc = plb / epdv; if (cpc0_cr0 & 0x80) /* uart0 uses the external clock */ uart0 = ser_clk; else uart0 = cpu / udiv; if (cpc0_cr0 & 0x40) /* uart1 uses the external clock */ uart1 = ser_clk; else uart1 = cpu / udiv; /* setup the timebase clock to tick at the cpu frequency */ cpc0_cr1 = cpc0_cr1 & ~0x00800000; mtdcr(DCRN_405_CPC0_CR1, cpc0_cr1); tb = cpu; dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@ef600300", uart0); dt_fixup_clock("/plb/opb/serial@ef600400", uart1); } void ibm405ep_fixup_clocks(unsigned int sys_clk) { u32 pllmr0 = mfdcr(DCRN_CPC0_PLLMR0); u32 pllmr1 = mfdcr(DCRN_CPC0_PLLMR1); u32 cpc0_ucr = mfdcr(DCRN_CPC0_UCR); u32 cpu, plb, opb, ebc, uart0, uart1; u32 fwdva, fwdvb, fbdv, cbdv, opdv, epdv; u32 pllmr0_ccdv, tb, m; fwdva = 8 - ((pllmr1 & 0x00070000) >> 16); fwdvb = 8 - ((pllmr1 & 0x00007000) >> 12); fbdv = (pllmr1 & 0x00f00000) >> 20; if (fbdv == 0) fbdv = 16; cbdv = ((pllmr0 & 0x00030000) >> 16) + 1; /* CPU:PLB */ epdv = ((pllmr0 & 0x00000300) >> 8) + 2; /* PLB:EBC */ opdv = ((pllmr0 & 0x00003000) >> 12) + 1; /* PLB:OPB */ m = fbdv * fwdvb; pllmr0_ccdv = ((pllmr0 & 0x00300000) >> 20) + 1; if (pllmr1 & 0x80000000) cpu = sys_clk * m / (fwdva * pllmr0_ccdv); else cpu = sys_clk / pllmr0_ccdv; plb = cpu / cbdv; opb = plb / opdv; ebc = plb / epdv; tb = cpu; uart0 = cpu / (cpc0_ucr & 0x0000007f); uart1 = cpu / ((cpc0_ucr & 0x00007f00) >> 8); dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@ef600300", uart0); dt_fixup_clock("/plb/opb/serial@ef600400", uart1); } static u8 ibm405ex_fwdv_multi_bits[] = { /* values for: 1 - 16 */ 0x01, 0x02, 0x0e, 0x09, 0x04, 0x0b, 0x10, 0x0d, 0x0c, 0x05, 0x06, 0x0f, 0x0a, 0x07, 0x08, 0x03 }; u32 ibm405ex_get_fwdva(unsigned long cpr_fwdv) { u32 index; for (index = 0; index < ARRAY_SIZE(ibm405ex_fwdv_multi_bits); index++) if (cpr_fwdv == (u32)ibm405ex_fwdv_multi_bits[index]) return index + 1; return 0; } static u8 ibm405ex_fbdv_multi_bits[] = { /* values for: 1 - 100 */ 0x00, 0xff, 0x7e, 0xfd, 0x7a, 0xf5, 0x6a, 0xd5, 0x2a, 0xd4, 0x29, 0xd3, 0x26, 0xcc, 0x19, 0xb3, 0x67, 0xce, 0x1d, 0xbb, 0x77, 0xee, 0x5d, 0xba, 0x74, 0xe9, 0x52, 0xa5, 0x4b, 0x96, 0x2c, 0xd8, 0x31, 0xe3, 0x46, 0x8d, 0x1b, 0xb7, 0x6f, 0xde, 0x3d, 0xfb, 0x76, 0xed, 0x5a, 0xb5, 0x6b, 0xd6, 0x2d, 0xdb, 0x36, 0xec, 0x59, 0xb2, 0x64, 0xc9, 0x12, 0xa4, 0x48, 0x91, 0x23, 0xc7, 0x0e, 0x9c, 0x38, 0xf0, 0x61, 0xc2, 0x05, 0x8b, 0x17, 0xaf, 0x5f, 0xbe, 0x7c, 0xf9, 0x72, 0xe5, 0x4a, 0x95, 0x2b, 0xd7, 0x2e, 0xdc, 0x39, 0xf3, 0x66, 0xcd, 0x1a, 0xb4, 0x68, 0xd1, 0x22, 0xc4, 0x09, 0x93, 0x27, 0xcf, 0x1e, 0xbc, /* values for: 101 - 200 */ 0x78, 0xf1, 0x62, 0xc5, 0x0a, 0x94, 0x28, 0xd0, 0x21, 0xc3, 0x06, 0x8c, 0x18, 0xb0, 0x60, 0xc1, 0x02, 0x84, 0x08, 0x90, 0x20, 0xc0, 0x01, 0x83, 0x07, 0x8f, 0x1f, 0xbf, 0x7f, 0xfe, 0x7d, 0xfa, 0x75, 0xea, 0x55, 0xaa, 0x54, 0xa9, 0x53, 0xa6, 0x4c, 0x99, 0x33, 0xe7, 0x4e, 0x9d, 0x3b, 0xf7, 0x6e, 0xdd, 0x3a, 0xf4, 0x69, 0xd2, 0x25, 0xcb, 0x16, 0xac, 0x58, 0xb1, 0x63, 0xc6, 0x0d, 0x9b, 0x37, 0xef, 0x5e, 0xbd, 0x7b, 0xf6, 0x6d, 0xda, 0x35, 0xeb, 0x56, 0xad, 0x5b, 0xb6, 0x6c, 0xd9, 0x32, 0xe4, 0x49, 0x92, 0x24, 0xc8, 0x11, 0xa3, 0x47, 0x8e, 0x1c, 0xb8, 0x70, 0xe1, 0x42, 0x85, 0x0b, 0x97, 0x2f, 0xdf, /* values for: 201 - 255 */ 0x3e, 0xfc, 0x79, 0xf2, 0x65, 0xca, 0x15, 0xab, 0x57, 0xae, 0x5c, 0xb9, 0x73, 0xe6, 0x4d, 0x9a, 0x34, 0xe8, 0x51, 0xa2, 0x44, 0x89, 0x13, 0xa7, 0x4f, 0x9e, 0x3c, 0xf8, 0x71, 0xe2, 0x45, 0x8a, 0x14, 0xa8, 0x50, 0xa1, 0x43, 0x86, 0x0c, 0x98, 0x30, 0xe0, 0x41, 0x82, 0x04, 0x88, 0x10, 0xa0, 0x40, 0x81, 0x03, 0x87, 0x0f, 0x9f, 0x3f /* END */ }; u32 ibm405ex_get_fbdv(unsigned long cpr_fbdv) { u32 index; for (index = 0; index < ARRAY_SIZE(ibm405ex_fbdv_multi_bits); index++) if (cpr_fbdv == (u32)ibm405ex_fbdv_multi_bits[index]) return index + 1; return 0; } void ibm405ex_fixup_clocks(unsigned int sys_clk, unsigned int uart_clk) { /* PLL config */ u32 pllc = CPR0_READ(DCRN_CPR0_PLLC); u32 plld = CPR0_READ(DCRN_CPR0_PLLD); u32 cpud = CPR0_READ(DCRN_CPR0_PRIMAD); u32 plbd = CPR0_READ(DCRN_CPR0_PRIMBD); u32 opbd = CPR0_READ(DCRN_CPR0_OPBD); u32 perd = CPR0_READ(DCRN_CPR0_PERD); /* Dividers */ u32 fbdv = ibm405ex_get_fbdv(__fix_zero((plld >> 24) & 0xff, 1)); u32 fwdva = ibm405ex_get_fwdva(__fix_zero((plld >> 16) & 0x0f, 1)); u32 cpudv0 = __fix_zero((cpud >> 24) & 7, 8); /* PLBDV0 is hardwared to 010. */ u32 plbdv0 = 2; u32 plb2xdv0 = __fix_zero((plbd >> 16) & 7, 8); u32 opbdv0 = __fix_zero((opbd >> 24) & 3, 4); u32 perdv0 = __fix_zero((perd >> 24) & 3, 4); /* Resulting clocks */ u32 cpu, plb, opb, ebc, vco, tb, uart0, uart1; /* PLL's VCO is the source for primary forward ? */ if (pllc & 0x40000000) { u32 m; /* Feedback path */ switch ((pllc >> 24) & 7) { case 0: /* PLLOUTx */ m = fbdv; break; case 1: /* CPU */ m = fbdv * fwdva * cpudv0; break; case 5: /* PERClk */ m = fbdv * fwdva * plb2xdv0 * plbdv0 * opbdv0 * perdv0; break; default: printf("WARNING ! Invalid PLL feedback source !\n"); goto bypass; } vco = (unsigned int)(sys_clk * m); } else { bypass: /* Bypass system PLL */ vco = 0; } /* CPU = VCO / ( FWDVA x CPUDV0) */ cpu = vco / (fwdva * cpudv0); /* PLB = VCO / ( FWDVA x PLB2XDV0 x PLBDV0) */ plb = vco / (fwdva * plb2xdv0 * plbdv0); /* OPB = PLB / OPBDV0 */ opb = plb / opbdv0; /* EBC = OPB / PERDV0 */ ebc = opb / perdv0; tb = cpu; uart0 = uart1 = uart_clk; dt_fixup_cpu_clocks(cpu, tb, 0); dt_fixup_clock("/plb", plb); dt_fixup_clock("/plb/opb", opb); dt_fixup_clock("/plb/opb/ebc", ebc); dt_fixup_clock("/plb/opb/serial@ef600200", uart0); dt_fixup_clock("/plb/opb/serial@ef600300", uart1); }
gpl-2.0
zhjwpku/gsoc
drivers/tty/hvc/hvcs.c
430
46872
/* * IBM eServer Hypervisor Virtual Console Server Device Driver * Copyright (C) 2003, 2004 IBM Corp. * Ryan S. Arnold (rsa@us.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author(s) : Ryan S. Arnold <rsa@us.ibm.com> * * This is the device driver for the IBM Hypervisor Virtual Console Server, * "hvcs". The IBM hvcs provides a tty driver interface to allow Linux * user space applications access to the system consoles of logically * partitioned operating systems, e.g. Linux, running on the same partitioned * Power5 ppc64 system. Physical hardware consoles per partition are not * practical on this hardware so system consoles are accessed by this driver * using inter-partition firmware interfaces to virtual terminal devices. * * A vty is known to the HMC as a "virtual serial server adapter". It is a * virtual terminal device that is created by firmware upon partition creation * to act as a partitioned OS's console device. * * Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64 * Linux system upon their creation by the HMC or their exposure during boot. * The non-user interactive backend of this driver is implemented as a vio * device driver so that it can receive notification of vty-server lifetimes * after it registers with the vio bus to handle vty-server probe and remove * callbacks. * * Many vty-servers can be configured to connect to one vty, but a vty can * only be actively connected to by a single vty-server, in any manner, at one * time. If the HMC is currently hosting the console for a target Linux * partition; attempts to open the tty device to the partition's console using * the hvcs on any partition will return -EBUSY with every open attempt until * the HMC frees the connection between its vty-server and the desired * partition's vty device. Conversely, a vty-server may only be connected to * a single vty at one time even though it may have several configured vty * partner possibilities. * * Firmware does not provide notification of vty partner changes to this * driver. This means that an HMC Super Admin may add or remove partner vtys * from a vty-server's partner list but the changes will not be signaled to * the vty-server. Firmware only notifies the driver when a vty-server is * added or removed from the system. To compensate for this deficiency, this * driver implements a sysfs update attribute which provides a method for * rescanning partner information upon a user's request. * * Each vty-server, prior to being exposed to this driver is reference counted * using the 2.6 Linux kernel kref construct. * * For direction on installation and usage of this driver please reference * Documentation/powerpc/hvcs.txt. */ #include <linux/device.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/major.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <asm/hvconsole.h> #include <asm/hvcserver.h> #include <asm/uaccess.h> #include <asm/vio.h> /* * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00). * Removed braces around single statements following conditionals. Removed '= * 0' after static int declarations since these default to zero. Removed * list_for_each_safe() and replaced with list_for_each_entry() in * hvcs_get_by_index(). The 'safe' version is un-needed now that the driver is * using spinlocks. Changed spin_lock_irqsave() to spin_lock() when locking * hvcs_structs_lock and hvcs_pi_lock since these are not touched in an int * handler. Initialized hvcs_structs_lock and hvcs_pi_lock to * SPIN_LOCK_UNLOCKED at declaration time rather than in hvcs_module_init(). * Added spin_lock around list_del() in destroy_hvcs_struct() to protect the * list traversals from a deletion. Removed '= NULL' from pointer declaration * statements since they are initialized NULL by default. Removed wmb() * instances from hvcs_try_write(). They probably aren't needed with locking in * place. Added check and cleanup for hvcs_pi_buff = kmalloc() in * hvcs_module_init(). Exposed hvcs_struct.index via a sysfs attribute so that * the coupling between /dev/hvcs* and a vty-server can be automatically * determined. Moved kobject_put() in hvcs_open outside of the * spin_unlock_irqrestore(). * * 1.3.1 -> 1.3.2 Changed method for determining hvcs_struct->index and had it * align with how the tty layer always assigns the lowest index available. This * change resulted in a list of ints that denotes which indexes are available. * Device additions and removals use the new hvcs_get_index() and * hvcs_return_index() helper functions. The list is created with * hvsc_alloc_index_list() and it is destroyed with hvcs_free_index_list(). * Without these fixes hotplug vty-server adapter support goes crazy with this * driver if the user removes a vty-server adapter. Moved free_irq() outside of * the hvcs_final_close() function in order to get it out of the spinlock. * Rearranged hvcs_close(). Cleaned up some printks and did some housekeeping * on the changelog. Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from * arch/powerepc/include/asm/hvcserver.h * * 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to * prevent possible lockup with realtime scheduling as similarly pointed out by * akpm in hvc_console. Changed resulted in the removal of hvcs_final_close() * to reorder cleanup operations and prevent discarding of pending data during * an hvcs_close(). Removed spinlock protection of hvcs_struct data members in * hvcs_write_room() and hvcs_chars_in_buffer() because they aren't needed. */ #define HVCS_DRIVER_VERSION "1.3.3" MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>"); MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(HVCS_DRIVER_VERSION); /* * Wait this long per iteration while trying to push buffered data to the * hypervisor before allowing the tty to complete a close operation. */ #define HVCS_CLOSE_WAIT (HZ/100) /* 1/10 of a second */ /* * Since the Linux TTY code does not currently (2-04-2004) support dynamic * addition of tty derived devices and we shouldn't allocate thousands of * tty_device pointers when the number of vty-server & vty partner connections * will most often be much lower than this, we'll arbitrarily allocate * HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we * register the tty_driver. This can be overridden using an insmod parameter. */ #define HVCS_DEFAULT_SERVER_ADAPTERS 64 /* * The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device * nodes as a sanity check. Theoretically there can be over 1 Billion * vty-server & vty partner connections. */ #define HVCS_MAX_SERVER_ADAPTERS 1024 /* * We let Linux assign us a major number and we start the minors at zero. There * is no intuitive mapping between minor number and the target vty-server * adapter except that each new vty-server adapter is always assigned to the * smallest minor number available. */ #define HVCS_MINOR_START 0 /* * The hcall interface involves putting 8 chars into each of two registers. * We load up those 2 registers (in arch/powerpc/platforms/pseries/hvconsole.c) * by casting char[16] to long[2]. It would work without __ALIGNED__, but a * little (tiny) bit slower because an unaligned load is slower than aligned * load. */ #define __ALIGNED__ __attribute__((__aligned__(8))) /* * How much data can firmware send with each hvc_put_chars()? Maybe this * should be moved into an architecture specific area. */ #define HVCS_BUFF_LEN 16 /* * This is the maximum amount of data we'll let the user send us (hvcs_write) at * once in a chunk as a sanity check. */ #define HVCS_MAX_FROM_USER 4096 /* * Be careful when adding flags to this line discipline. Don't add anything * that will cause echoing or we'll go into recursive loop echoing chars back * and forth with the console drivers. */ static struct ktermios hvcs_tty_termios = { .c_iflag = IGNBRK | IGNPAR, .c_oflag = OPOST, .c_cflag = B38400 | CS8 | CREAD | HUPCL, .c_cc = INIT_C_CC, .c_ispeed = 38400, .c_ospeed = 38400 }; /* * This value is used to take the place of a command line parameter when the * module is inserted. It starts as -1 and stays as such if the user doesn't * specify a module insmod parameter. If they DO specify one then it is set to * the value of the integer passed in. */ static int hvcs_parm_num_devs = -1; module_param(hvcs_parm_num_devs, int, 0); static const char hvcs_driver_name[] = "hvcs"; static const char hvcs_device_node[] = "hvcs"; static const char hvcs_driver_string[] = "IBM hvcs (Hypervisor Virtual Console Server) Driver"; /* Status of partner info rescan triggered via sysfs. */ static int hvcs_rescan_status; static struct tty_driver *hvcs_tty_driver; /* * In order to be somewhat sane this driver always associates the hvcs_struct * index element with the numerically equal tty->index. This means that a * hotplugged vty-server adapter will always map to the lowest index valued * device node. If vty-servers were hotplug removed from the system and then * new ones added the new vty-server may have the largest slot number of all * the vty-server adapters in the partition but it may have the lowest dev node * index of all the adapters due to the hole left by the hotplug removed * adapter. There are a set of functions provided to get the lowest index for * a new device as well as return the index to the list. This list is allocated * with a number of elements equal to the number of device nodes requested when * the module was inserted. */ static int *hvcs_index_list; /* * How large is the list? This is kept for traversal since the list is * dynamically created. */ static int hvcs_index_count; /* * Used by the khvcsd to pick up I/O operations when the kernel_thread is * already awake but potentially shifted to TASK_INTERRUPTIBLE state. */ static int hvcs_kicked; /* * Use by the kthread construct for task operations like waking the sleeping * thread and stopping the kthread. */ static struct task_struct *hvcs_task; /* * We allocate this for the use of all of the hvcs_structs when they fetch * partner info. */ static unsigned long *hvcs_pi_buff; /* Only allow one hvcs_struct to use the hvcs_pi_buff at a time. */ static DEFINE_SPINLOCK(hvcs_pi_lock); /* One vty-server per hvcs_struct */ struct hvcs_struct { struct tty_port port; spinlock_t lock; /* * This index identifies this hvcs device as the complement to a * specific tty index. */ unsigned int index; /* * Used to tell the driver kernel_thread what operations need to take * place upon this hvcs_struct instance. */ int todo_mask; /* * This buffer is required so that when hvcs_write_room() reports that * it can send HVCS_BUFF_LEN characters that it will buffer the full * HVCS_BUFF_LEN characters if need be. This is essential for opost * writes since they do not do high level buffering and expect to be * able to send what the driver commits to sending buffering * [e.g. tab to space conversions in n_tty.c opost()]. */ char buffer[HVCS_BUFF_LEN]; int chars_in_buffer; /* * Any variable below is valid before a tty is connected and * stays valid after the tty is disconnected. These shouldn't be * whacked until the kobject refcount reaches zero though some entries * may be changed via sysfs initiatives. */ int connected; /* is the vty-server currently connected to a vty? */ uint32_t p_unit_address; /* partner unit address */ uint32_t p_partition_ID; /* partner partition ID */ char p_location_code[HVCS_CLC_LENGTH + 1]; /* CLC + Null Term */ struct list_head next; /* list management */ struct vio_dev *vdev; }; static LIST_HEAD(hvcs_structs); static DEFINE_SPINLOCK(hvcs_structs_lock); static DEFINE_MUTEX(hvcs_init_mutex); static void hvcs_unthrottle(struct tty_struct *tty); static void hvcs_throttle(struct tty_struct *tty); static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance); static int hvcs_write(struct tty_struct *tty, const unsigned char *buf, int count); static int hvcs_write_room(struct tty_struct *tty); static int hvcs_chars_in_buffer(struct tty_struct *tty); static int hvcs_has_pi(struct hvcs_struct *hvcsd); static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd); static int hvcs_get_pi(struct hvcs_struct *hvcsd); static int hvcs_rescan_devices_list(void); static int hvcs_partner_connect(struct hvcs_struct *hvcsd); static void hvcs_partner_free(struct hvcs_struct *hvcsd); static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address, unsigned int irq, struct vio_dev *dev); static int hvcs_open(struct tty_struct *tty, struct file *filp); static void hvcs_close(struct tty_struct *tty, struct file *filp); static void hvcs_hangup(struct tty_struct * tty); static int hvcs_probe(struct vio_dev *dev, const struct vio_device_id *id); static int hvcs_remove(struct vio_dev *dev); static int __init hvcs_module_init(void); static void __exit hvcs_module_exit(void); static int hvcs_initialize(void); #define HVCS_SCHED_READ 0x00000001 #define HVCS_QUICK_READ 0x00000002 #define HVCS_TRY_WRITE 0x00000004 #define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ) static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) { return dev_get_drvdata(&viod->dev); } /* The sysfs interface for the driver and devices */ static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *viod = to_vio_dev(dev); struct hvcs_struct *hvcsd = from_vio_dev(viod); unsigned long flags; int retval; spin_lock_irqsave(&hvcsd->lock, flags); retval = sprintf(buf, "%X\n", hvcsd->p_unit_address); spin_unlock_irqrestore(&hvcsd->lock, flags); return retval; } static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL); static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *viod = to_vio_dev(dev); struct hvcs_struct *hvcsd = from_vio_dev(viod); unsigned long flags; int retval; spin_lock_irqsave(&hvcsd->lock, flags); retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); spin_unlock_irqrestore(&hvcsd->lock, flags); return retval; } static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL); static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf, size_t count) { /* * Don't need this feature at the present time because firmware doesn't * yet support multiple partners. */ printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n"); return -EPERM; } static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *viod = to_vio_dev(dev); struct hvcs_struct *hvcsd = from_vio_dev(viod); unsigned long flags; int retval; spin_lock_irqsave(&hvcsd->lock, flags); retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); spin_unlock_irqrestore(&hvcsd->lock, flags); return retval; } static DEVICE_ATTR(current_vty, S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store); static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vio_dev *viod = to_vio_dev(dev); struct hvcs_struct *hvcsd = from_vio_dev(viod); unsigned long flags; /* writing a '0' to this sysfs entry will result in the disconnect. */ if (simple_strtol(buf, NULL, 0) != 0) return -EINVAL; spin_lock_irqsave(&hvcsd->lock, flags); if (hvcsd->port.count > 0) { spin_unlock_irqrestore(&hvcsd->lock, flags); printk(KERN_INFO "HVCS: vterm state unchanged. " "The hvcs device node is still in use.\n"); return -EPERM; } if (hvcsd->connected == 0) { spin_unlock_irqrestore(&hvcsd->lock, flags); printk(KERN_INFO "HVCS: vterm state unchanged. The" " vty-server is not connected to a vty.\n"); return -EPERM; } hvcs_partner_free(hvcsd); printk(KERN_INFO "HVCS: Closed vty-server@%X and" " partner vty@%X:%d connection.\n", hvcsd->vdev->unit_address, hvcsd->p_unit_address, (uint32_t)hvcsd->p_partition_ID); spin_unlock_irqrestore(&hvcsd->lock, flags); return count; } static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *viod = to_vio_dev(dev); struct hvcs_struct *hvcsd = from_vio_dev(viod); unsigned long flags; int retval; spin_lock_irqsave(&hvcsd->lock, flags); retval = sprintf(buf, "%d\n", hvcsd->connected); spin_unlock_irqrestore(&hvcsd->lock, flags); return retval; } static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR, hvcs_vterm_state_show, hvcs_vterm_state_store); static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vio_dev *viod = to_vio_dev(dev); struct hvcs_struct *hvcsd = from_vio_dev(viod); unsigned long flags; int retval; spin_lock_irqsave(&hvcsd->lock, flags); retval = sprintf(buf, "%d\n", hvcsd->index); spin_unlock_irqrestore(&hvcsd->lock, flags); return retval; } static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL); static struct attribute *hvcs_attrs[] = { &dev_attr_partner_vtys.attr, &dev_attr_partner_clcs.attr, &dev_attr_current_vty.attr, &dev_attr_vterm_state.attr, &dev_attr_index.attr, NULL, }; static struct attribute_group hvcs_attr_group = { .attrs = hvcs_attrs, }; static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf) { /* A 1 means it is updating, a 0 means it is done updating */ return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status); } static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf, size_t count) { if ((simple_strtol(buf, NULL, 0) != 1) && (hvcs_rescan_status != 0)) return -EINVAL; hvcs_rescan_status = 1; printk(KERN_INFO "HVCS: rescanning partner info for all" " vty-servers.\n"); hvcs_rescan_devices_list(); hvcs_rescan_status = 0; return count; } static DRIVER_ATTR(rescan, S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store); static void hvcs_kick(void) { hvcs_kicked = 1; wmb(); wake_up_process(hvcs_task); } static void hvcs_unthrottle(struct tty_struct *tty) { struct hvcs_struct *hvcsd = tty->driver_data; unsigned long flags; spin_lock_irqsave(&hvcsd->lock, flags); hvcsd->todo_mask |= HVCS_SCHED_READ; spin_unlock_irqrestore(&hvcsd->lock, flags); hvcs_kick(); } static void hvcs_throttle(struct tty_struct *tty) { struct hvcs_struct *hvcsd = tty->driver_data; unsigned long flags; spin_lock_irqsave(&hvcsd->lock, flags); vio_disable_interrupts(hvcsd->vdev); spin_unlock_irqrestore(&hvcsd->lock, flags); } /* * If the device is being removed we don't have to worry about this interrupt * handler taking any further interrupts because they are disabled which means * the hvcs_struct will always be valid in this handler. */ static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance) { struct hvcs_struct *hvcsd = dev_instance; spin_lock(&hvcsd->lock); vio_disable_interrupts(hvcsd->vdev); hvcsd->todo_mask |= HVCS_SCHED_READ; spin_unlock(&hvcsd->lock); hvcs_kick(); return IRQ_HANDLED; } /* This function must be called with the hvcsd->lock held */ static void hvcs_try_write(struct hvcs_struct *hvcsd) { uint32_t unit_address = hvcsd->vdev->unit_address; struct tty_struct *tty = hvcsd->port.tty; int sent; if (hvcsd->todo_mask & HVCS_TRY_WRITE) { /* won't send partial writes */ sent = hvc_put_chars(unit_address, &hvcsd->buffer[0], hvcsd->chars_in_buffer ); if (sent > 0) { hvcsd->chars_in_buffer = 0; /* wmb(); */ hvcsd->todo_mask &= ~(HVCS_TRY_WRITE); /* wmb(); */ /* * We are still obligated to deliver the data to the * hypervisor even if the tty has been closed because * we committed to delivering it. But don't try to wake * a non-existent tty. */ if (tty) { tty_wakeup(tty); } } } } static int hvcs_io(struct hvcs_struct *hvcsd) { uint32_t unit_address; struct tty_struct *tty; char buf[HVCS_BUFF_LEN] __ALIGNED__; unsigned long flags; int got = 0; spin_lock_irqsave(&hvcsd->lock, flags); unit_address = hvcsd->vdev->unit_address; tty = hvcsd->port.tty; hvcs_try_write(hvcsd); if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) { hvcsd->todo_mask &= ~(HVCS_READ_MASK); goto bail; } else if (!(hvcsd->todo_mask & (HVCS_READ_MASK))) goto bail; /* remove the read masks */ hvcsd->todo_mask &= ~(HVCS_READ_MASK); if (tty_buffer_request_room(&hvcsd->port, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) { got = hvc_get_chars(unit_address, &buf[0], HVCS_BUFF_LEN); tty_insert_flip_string(&hvcsd->port, buf, got); } /* Give the TTY time to process the data we just sent. */ if (got) hvcsd->todo_mask |= HVCS_QUICK_READ; spin_unlock_irqrestore(&hvcsd->lock, flags); /* This is synch because tty->low_latency == 1 */ if(got) tty_flip_buffer_push(&hvcsd->port); if (!got) { /* Do this _after_ the flip_buffer_push */ spin_lock_irqsave(&hvcsd->lock, flags); vio_enable_interrupts(hvcsd->vdev); spin_unlock_irqrestore(&hvcsd->lock, flags); } return hvcsd->todo_mask; bail: spin_unlock_irqrestore(&hvcsd->lock, flags); return hvcsd->todo_mask; } static int khvcsd(void *unused) { struct hvcs_struct *hvcsd; int hvcs_todo_mask; __set_current_state(TASK_RUNNING); do { hvcs_todo_mask = 0; hvcs_kicked = 0; wmb(); spin_lock(&hvcs_structs_lock); list_for_each_entry(hvcsd, &hvcs_structs, next) { hvcs_todo_mask |= hvcs_io(hvcsd); } spin_unlock(&hvcs_structs_lock); /* * If any of the hvcs adapters want to try a write or quick read * don't schedule(), yield a smidgen then execute the hvcs_io * thread again for those that want the write. */ if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) { yield(); continue; } set_current_state(TASK_INTERRUPTIBLE); if (!hvcs_kicked) schedule(); __set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); return 0; } static struct vio_device_id hvcs_driver_table[] = { {"serial-server", "hvterm2"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, hvcs_driver_table); static void hvcs_return_index(int index) { /* Paranoia check */ if (!hvcs_index_list) return; if (index < 0 || index >= hvcs_index_count) return; if (hvcs_index_list[index] == -1) return; else hvcs_index_list[index] = -1; } static void hvcs_destruct_port(struct tty_port *p) { struct hvcs_struct *hvcsd = container_of(p, struct hvcs_struct, port); struct vio_dev *vdev; unsigned long flags; spin_lock(&hvcs_structs_lock); spin_lock_irqsave(&hvcsd->lock, flags); /* the list_del poisons the pointers */ list_del(&(hvcsd->next)); if (hvcsd->connected == 1) { hvcs_partner_free(hvcsd); printk(KERN_INFO "HVCS: Closed vty-server@%X and" " partner vty@%X:%d connection.\n", hvcsd->vdev->unit_address, hvcsd->p_unit_address, (uint32_t)hvcsd->p_partition_ID); } printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n", hvcsd->vdev->unit_address); vdev = hvcsd->vdev; hvcsd->vdev = NULL; hvcsd->p_unit_address = 0; hvcsd->p_partition_ID = 0; hvcs_return_index(hvcsd->index); memset(&hvcsd->p_location_code[0], 0x00, HVCS_CLC_LENGTH + 1); spin_unlock_irqrestore(&hvcsd->lock, flags); spin_unlock(&hvcs_structs_lock); sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group); kfree(hvcsd); } static const struct tty_port_operations hvcs_port_ops = { .destruct = hvcs_destruct_port, }; static int hvcs_get_index(void) { int i; /* Paranoia check */ if (!hvcs_index_list) { printk(KERN_ERR "HVCS: hvcs_index_list NOT valid!.\n"); return -EFAULT; } /* Find the numerically lowest first free index. */ for(i = 0; i < hvcs_index_count; i++) { if (hvcs_index_list[i] == -1) { hvcs_index_list[i] = 0; return i; } } return -1; } static int hvcs_probe( struct vio_dev *dev, const struct vio_device_id *id) { struct hvcs_struct *hvcsd; int index, rc; int retval; if (!dev || !id) { printk(KERN_ERR "HVCS: probed with invalid parameter.\n"); return -EPERM; } /* Make sure we are properly initialized */ rc = hvcs_initialize(); if (rc) { pr_err("HVCS: Failed to initialize core driver.\n"); return rc; } /* early to avoid cleanup on failure */ index = hvcs_get_index(); if (index < 0) { return -EFAULT; } hvcsd = kzalloc(sizeof(*hvcsd), GFP_KERNEL); if (!hvcsd) return -ENODEV; tty_port_init(&hvcsd->port); hvcsd->port.ops = &hvcs_port_ops; spin_lock_init(&hvcsd->lock); hvcsd->vdev = dev; dev_set_drvdata(&dev->dev, hvcsd); hvcsd->index = index; /* hvcsd->index = ++hvcs_struct_count; */ hvcsd->chars_in_buffer = 0; hvcsd->todo_mask = 0; hvcsd->connected = 0; /* * This will populate the hvcs_struct's partner info fields for the * first time. */ if (hvcs_get_pi(hvcsd)) { printk(KERN_ERR "HVCS: Failed to fetch partner" " info for vty-server@%X on device probe.\n", hvcsd->vdev->unit_address); } /* * If a user app opens a tty that corresponds to this vty-server before * the hvcs_struct has been added to the devices list then the user app * will get -ENODEV. */ spin_lock(&hvcs_structs_lock); list_add_tail(&(hvcsd->next), &hvcs_structs); spin_unlock(&hvcs_structs_lock); retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group); if (retval) { printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n", hvcsd->vdev->unit_address); return retval; } printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address); /* * DON'T enable interrupts here because there is no user to receive the * data. */ return 0; } static int hvcs_remove(struct vio_dev *dev) { struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev); unsigned long flags; struct tty_struct *tty; if (!hvcsd) return -ENODEV; /* By this time the vty-server won't be getting any more interrupts */ spin_lock_irqsave(&hvcsd->lock, flags); tty = hvcsd->port.tty; spin_unlock_irqrestore(&hvcsd->lock, flags); /* * Let the last holder of this object cause it to be removed, which * would probably be tty_hangup below. */ tty_port_put(&hvcsd->port); /* * The hangup is a scheduled function which will auto chain call * hvcs_hangup. The tty should always be valid at this time unless a * simultaneous tty close already cleaned up the hvcs_struct. */ if (tty) tty_hangup(tty); printk(KERN_INFO "HVCS: vty-server@%X removed from the" " vio bus.\n", dev->unit_address); return 0; }; static struct vio_driver hvcs_vio_driver = { .id_table = hvcs_driver_table, .probe = hvcs_probe, .remove = hvcs_remove, .name = hvcs_driver_name, }; /* Only called from hvcs_get_pi please */ static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd) { hvcsd->p_unit_address = pi->unit_address; hvcsd->p_partition_ID = pi->partition_ID; /* copy the null-term char too */ strlcpy(&hvcsd->p_location_code[0], &pi->location_code[0], sizeof(hvcsd->p_location_code)); } /* * Traverse the list and add the partner info that is found to the hvcs_struct * struct entry. NOTE: At this time I know that partner info will return a * single entry but in the future there may be multiple partner info entries per * vty-server and you'll want to zero out that list and reset it. If for some * reason you have an old version of this driver but there IS more than one * partner info then hvcsd->p_* will hold the last partner info data from the * firmware query. A good way to update this code would be to replace the three * partner info fields in hvcs_struct with a list of hvcs_partner_info * instances. * * This function must be called with the hvcsd->lock held. */ static int hvcs_get_pi(struct hvcs_struct *hvcsd) { struct hvcs_partner_info *pi; uint32_t unit_address = hvcsd->vdev->unit_address; struct list_head head; int retval; spin_lock(&hvcs_pi_lock); if (!hvcs_pi_buff) { spin_unlock(&hvcs_pi_lock); return -EFAULT; } retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff); spin_unlock(&hvcs_pi_lock); if (retval) { printk(KERN_ERR "HVCS: Failed to fetch partner" " info for vty-server@%x.\n", unit_address); return retval; } /* nixes the values if the partner vty went away */ hvcsd->p_unit_address = 0; hvcsd->p_partition_ID = 0; list_for_each_entry(pi, &head, node) hvcs_set_pi(pi, hvcsd); hvcs_free_partner_info(&head); return 0; } /* * This function is executed by the driver "rescan" sysfs entry. It shouldn't * be executed elsewhere, in order to prevent deadlock issues. */ static int hvcs_rescan_devices_list(void) { struct hvcs_struct *hvcsd; unsigned long flags; spin_lock(&hvcs_structs_lock); list_for_each_entry(hvcsd, &hvcs_structs, next) { spin_lock_irqsave(&hvcsd->lock, flags); hvcs_get_pi(hvcsd); spin_unlock_irqrestore(&hvcsd->lock, flags); } spin_unlock(&hvcs_structs_lock); return 0; } /* * Farm this off into its own function because it could be more complex once * multiple partners support is added. This function should be called with * the hvcsd->lock held. */ static int hvcs_has_pi(struct hvcs_struct *hvcsd) { if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID)) return 0; return 1; } /* * NOTE: It is possible that the super admin removed a partner vty and then * added a different vty as the new partner. * * This function must be called with the hvcsd->lock held. */ static int hvcs_partner_connect(struct hvcs_struct *hvcsd) { int retval; unsigned int unit_address = hvcsd->vdev->unit_address; /* * If there wasn't any pi when the device was added it doesn't meant * there isn't any now. This driver isn't notified when a new partner * vty is added to a vty-server so we discover changes on our own. * Please see comments in hvcs_register_connection() for justification * of this bizarre code. */ retval = hvcs_register_connection(unit_address, hvcsd->p_partition_ID, hvcsd->p_unit_address); if (!retval) { hvcsd->connected = 1; return 0; } else if (retval != -EINVAL) return retval; /* * As per the spec re-get the pi and try again if -EINVAL after the * first connection attempt. */ if (hvcs_get_pi(hvcsd)) return -ENOMEM; if (!hvcs_has_pi(hvcsd)) return -ENODEV; retval = hvcs_register_connection(unit_address, hvcsd->p_partition_ID, hvcsd->p_unit_address); if (retval != -EINVAL) { hvcsd->connected = 1; return retval; } /* * EBUSY is the most likely scenario though the vty could have been * removed or there really could be an hcall error due to the parameter * data but thanks to ambiguous firmware return codes we can't really * tell. */ printk(KERN_INFO "HVCS: vty-server or partner" " vty is busy. Try again later.\n"); return -EBUSY; } /* This function must be called with the hvcsd->lock held */ static void hvcs_partner_free(struct hvcs_struct *hvcsd) { int retval; do { retval = hvcs_free_connection(hvcsd->vdev->unit_address); } while (retval == -EBUSY); hvcsd->connected = 0; } /* This helper function must be called WITHOUT the hvcsd->lock held */ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address, unsigned int irq, struct vio_dev *vdev) { unsigned long flags; int rc; /* * It is possible that the vty-server was removed between the time that * the conn was registered and now. */ if (!(rc = request_irq(irq, &hvcs_handle_interrupt, 0, "ibmhvcs", hvcsd))) { /* * It is possible the vty-server was removed after the irq was * requested but before we have time to enable interrupts. */ if (vio_enable_interrupts(vdev) == H_SUCCESS) return 0; else { printk(KERN_ERR "HVCS: int enable failed for" " vty-server@%X.\n", unit_address); free_irq(irq, hvcsd); } } else printk(KERN_ERR "HVCS: irq req failed for" " vty-server@%X.\n", unit_address); spin_lock_irqsave(&hvcsd->lock, flags); hvcs_partner_free(hvcsd); spin_unlock_irqrestore(&hvcsd->lock, flags); return rc; } /* * This always increments the kref ref count if the call is successful. * Please remember to dec when you are done with the instance. * * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when * calling this function or you will get deadlock. */ static struct hvcs_struct *hvcs_get_by_index(int index) { struct hvcs_struct *hvcsd; unsigned long flags; spin_lock(&hvcs_structs_lock); list_for_each_entry(hvcsd, &hvcs_structs, next) { spin_lock_irqsave(&hvcsd->lock, flags); if (hvcsd->index == index) { tty_port_get(&hvcsd->port); spin_unlock_irqrestore(&hvcsd->lock, flags); spin_unlock(&hvcs_structs_lock); return hvcsd; } spin_unlock_irqrestore(&hvcsd->lock, flags); } spin_unlock(&hvcs_structs_lock); return NULL; } static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty) { struct hvcs_struct *hvcsd; struct vio_dev *vdev; unsigned long unit_address, flags; unsigned int irq; int retval; /* * Is there a vty-server that shares the same index? * This function increments the kref index. */ hvcsd = hvcs_get_by_index(tty->index); if (!hvcsd) { printk(KERN_WARNING "HVCS: open failed, no device associated" " with tty->index %d.\n", tty->index); return -ENODEV; } spin_lock_irqsave(&hvcsd->lock, flags); if (hvcsd->connected == 0) { retval = hvcs_partner_connect(hvcsd); if (retval) { spin_unlock_irqrestore(&hvcsd->lock, flags); printk(KERN_WARNING "HVCS: partner connect failed.\n"); goto err_put; } } hvcsd->port.count = 0; hvcsd->port.tty = tty; tty->driver_data = hvcsd; memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN); /* * Save these in the spinlock for the enable operations that need them * outside of the spinlock. */ irq = hvcsd->vdev->irq; vdev = hvcsd->vdev; unit_address = hvcsd->vdev->unit_address; hvcsd->todo_mask |= HVCS_SCHED_READ; spin_unlock_irqrestore(&hvcsd->lock, flags); /* * This must be done outside of the spinlock because it requests irqs * and will grab the spinlock and free the connection if it fails. */ retval = hvcs_enable_device(hvcsd, unit_address, irq, vdev); if (retval) { printk(KERN_WARNING "HVCS: enable device failed.\n"); goto err_put; } retval = tty_port_install(&hvcsd->port, driver, tty); if (retval) goto err_irq; return 0; err_irq: spin_lock_irqsave(&hvcsd->lock, flags); vio_disable_interrupts(hvcsd->vdev); spin_unlock_irqrestore(&hvcsd->lock, flags); free_irq(irq, hvcsd); err_put: tty_port_put(&hvcsd->port); return retval; } /* * This is invoked via the tty_open interface when a user app connects to the * /dev node. */ static int hvcs_open(struct tty_struct *tty, struct file *filp) { struct hvcs_struct *hvcsd = tty->driver_data; unsigned long flags; spin_lock_irqsave(&hvcsd->lock, flags); hvcsd->port.count++; hvcsd->todo_mask |= HVCS_SCHED_READ; spin_unlock_irqrestore(&hvcsd->lock, flags); hvcs_kick(); printk(KERN_INFO "HVCS: vty-server@%X connection opened.\n", hvcsd->vdev->unit_address ); return 0; } static void hvcs_close(struct tty_struct *tty, struct file *filp) { struct hvcs_struct *hvcsd; unsigned long flags; int irq; /* * Is someone trying to close the file associated with this device after * we have hung up? If so tty->driver_data wouldn't be valid. */ if (tty_hung_up_p(filp)) return; /* * No driver_data means that this close was probably issued after a * failed hvcs_open by the tty layer's release_dev() api and we can just * exit cleanly. */ if (!tty->driver_data) return; hvcsd = tty->driver_data; spin_lock_irqsave(&hvcsd->lock, flags); if (--hvcsd->port.count == 0) { vio_disable_interrupts(hvcsd->vdev); /* * NULL this early so that the kernel_thread doesn't try to * execute any operations on the TTY even though it is obligated * to deliver any pending I/O to the hypervisor. */ hvcsd->port.tty = NULL; irq = hvcsd->vdev->irq; spin_unlock_irqrestore(&hvcsd->lock, flags); tty_wait_until_sent_from_close(tty, HVCS_CLOSE_WAIT); /* * This line is important because it tells hvcs_open that this * device needs to be re-configured the next time hvcs_open is * called. */ tty->driver_data = NULL; free_irq(irq, hvcsd); return; } else if (hvcsd->port.count < 0) { printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" " is missmanaged.\n", hvcsd->vdev->unit_address, hvcsd->port.count); } spin_unlock_irqrestore(&hvcsd->lock, flags); } static void hvcs_cleanup(struct tty_struct * tty) { struct hvcs_struct *hvcsd = tty->driver_data; tty_port_put(&hvcsd->port); } static void hvcs_hangup(struct tty_struct * tty) { struct hvcs_struct *hvcsd = tty->driver_data; unsigned long flags; int temp_open_count; int irq; spin_lock_irqsave(&hvcsd->lock, flags); /* Preserve this so that we know how many kref refs to put */ temp_open_count = hvcsd->port.count; /* * Don't kref put inside the spinlock because the destruction * callback may use the spinlock and it may get called before the * spinlock has been released. */ vio_disable_interrupts(hvcsd->vdev); hvcsd->todo_mask = 0; /* I don't think the tty needs the hvcs_struct pointer after a hangup */ tty->driver_data = NULL; hvcsd->port.tty = NULL; hvcsd->port.count = 0; /* This will drop any buffered data on the floor which is OK in a hangup * scenario. */ memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN); hvcsd->chars_in_buffer = 0; irq = hvcsd->vdev->irq; spin_unlock_irqrestore(&hvcsd->lock, flags); free_irq(irq, hvcsd); /* * We need to kref_put() for every open_count we have since the * tty_hangup() function doesn't invoke a close per open connection on a * non-console device. */ while(temp_open_count) { --temp_open_count; /* * The final put will trigger destruction of the hvcs_struct. * NOTE: If this hangup was signaled from user space then the * final put will never happen. */ tty_port_put(&hvcsd->port); } } /* * NOTE: This is almost always from_user since user level apps interact with the * /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by * hvcs_remove (which removes the target device and executes tty_hangup()) that * tty_hangup will allow hvcs_write time to complete execution before it * terminates our device. */ static int hvcs_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct hvcs_struct *hvcsd = tty->driver_data; unsigned int unit_address; const unsigned char *charbuf; unsigned long flags; int total_sent = 0; int tosend = 0; int result = 0; /* * If they don't check the return code off of their open they may * attempt this even if there is no connected device. */ if (!hvcsd) return -ENODEV; /* Reasonable size to prevent user level flooding */ if (count > HVCS_MAX_FROM_USER) { printk(KERN_WARNING "HVCS write: count being truncated to" " HVCS_MAX_FROM_USER.\n"); count = HVCS_MAX_FROM_USER; } charbuf = buf; spin_lock_irqsave(&hvcsd->lock, flags); /* * Somehow an open succeeded but the device was removed or the * connection terminated between the vty-server and partner vty during * the middle of a write operation? This is a crummy place to do this * but we want to keep it all in the spinlock. */ if (hvcsd->port.count <= 0) { spin_unlock_irqrestore(&hvcsd->lock, flags); return -ENODEV; } unit_address = hvcsd->vdev->unit_address; while (count > 0) { tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer)); /* * No more space, this probably means that the last call to * hvcs_write() didn't succeed and the buffer was filled up. */ if (!tosend) break; memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer], &charbuf[total_sent], tosend); hvcsd->chars_in_buffer += tosend; result = 0; /* * If this is true then we don't want to try writing to the * hypervisor because that is the kernel_threads job now. We'll * just add to the buffer. */ if (!(hvcsd->todo_mask & HVCS_TRY_WRITE)) /* won't send partial writes */ result = hvc_put_chars(unit_address, &hvcsd->buffer[0], hvcsd->chars_in_buffer); /* * Since we know we have enough room in hvcsd->buffer for * tosend we record that it was sent regardless of whether the * hypervisor actually took it because we have it buffered. */ total_sent+=tosend; count-=tosend; if (result == 0) { hvcsd->todo_mask |= HVCS_TRY_WRITE; hvcs_kick(); break; } hvcsd->chars_in_buffer = 0; /* * Test after the chars_in_buffer reset otherwise this could * deadlock our writes if hvc_put_chars fails. */ if (result < 0) break; } spin_unlock_irqrestore(&hvcsd->lock, flags); if (result == -1) return -EIO; else return total_sent; } /* * This is really asking how much can we guarantee that we can send or that we * absolutely WILL BUFFER if we can't send it. This driver MUST honor the * return value, hence the reason for hvcs_struct buffering. */ static int hvcs_write_room(struct tty_struct *tty) { struct hvcs_struct *hvcsd = tty->driver_data; if (!hvcsd || hvcsd->port.count <= 0) return 0; return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; } static int hvcs_chars_in_buffer(struct tty_struct *tty) { struct hvcs_struct *hvcsd = tty->driver_data; return hvcsd->chars_in_buffer; } static const struct tty_operations hvcs_ops = { .install = hvcs_install, .open = hvcs_open, .close = hvcs_close, .cleanup = hvcs_cleanup, .hangup = hvcs_hangup, .write = hvcs_write, .write_room = hvcs_write_room, .chars_in_buffer = hvcs_chars_in_buffer, .unthrottle = hvcs_unthrottle, .throttle = hvcs_throttle, }; static int hvcs_alloc_index_list(int n) { int i; hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL); if (!hvcs_index_list) return -ENOMEM; hvcs_index_count = n; for (i = 0; i < hvcs_index_count; i++) hvcs_index_list[i] = -1; return 0; } static void hvcs_free_index_list(void) { /* Paranoia check to be thorough. */ kfree(hvcs_index_list); hvcs_index_list = NULL; hvcs_index_count = 0; } static int hvcs_initialize(void) { int rc, num_ttys_to_alloc; mutex_lock(&hvcs_init_mutex); if (hvcs_task) { mutex_unlock(&hvcs_init_mutex); return 0; } /* Has the user specified an overload with an insmod param? */ if (hvcs_parm_num_devs <= 0 || (hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) { num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS; } else num_ttys_to_alloc = hvcs_parm_num_devs; hvcs_tty_driver = alloc_tty_driver(num_ttys_to_alloc); if (!hvcs_tty_driver) { mutex_unlock(&hvcs_init_mutex); return -ENOMEM; } if (hvcs_alloc_index_list(num_ttys_to_alloc)) { rc = -ENOMEM; goto index_fail; } hvcs_tty_driver->driver_name = hvcs_driver_name; hvcs_tty_driver->name = hvcs_device_node; /* * We'll let the system assign us a major number, indicated by leaving * it blank. */ hvcs_tty_driver->minor_start = HVCS_MINOR_START; hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; /* * We role our own so that we DONT ECHO. We can't echo because the * device we are connecting to already echoes by default and this would * throw us into a horrible recursive echo-echo-echo loop. */ hvcs_tty_driver->init_termios = hvcs_tty_termios; hvcs_tty_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(hvcs_tty_driver, &hvcs_ops); /* * The following call will result in sysfs entries that denote the * dynamically assigned major and minor numbers for our devices. */ if (tty_register_driver(hvcs_tty_driver)) { printk(KERN_ERR "HVCS: registration as a tty driver failed.\n"); rc = -EIO; goto register_fail; } hvcs_pi_buff = (unsigned long *) __get_free_page(GFP_KERNEL); if (!hvcs_pi_buff) { rc = -ENOMEM; goto buff_alloc_fail; } hvcs_task = kthread_run(khvcsd, NULL, "khvcsd"); if (IS_ERR(hvcs_task)) { printk(KERN_ERR "HVCS: khvcsd creation failed.\n"); rc = -EIO; goto kthread_fail; } mutex_unlock(&hvcs_init_mutex); return 0; kthread_fail: free_page((unsigned long)hvcs_pi_buff); buff_alloc_fail: tty_unregister_driver(hvcs_tty_driver); register_fail: hvcs_free_index_list(); index_fail: put_tty_driver(hvcs_tty_driver); hvcs_tty_driver = NULL; mutex_unlock(&hvcs_init_mutex); return rc; } static int __init hvcs_module_init(void) { int rc = vio_register_driver(&hvcs_vio_driver); if (rc) { printk(KERN_ERR "HVCS: can't register vio driver\n"); return rc; } pr_info("HVCS: Driver registered.\n"); /* This needs to be done AFTER the vio_register_driver() call or else * the kobjects won't be initialized properly. */ rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan); if (rc) pr_warning("HVCS: Failed to create rescan file (err %d)\n", rc); return 0; } static void __exit hvcs_module_exit(void) { /* * This driver receives hvcs_remove callbacks for each device upon * module removal. */ vio_unregister_driver(&hvcs_vio_driver); if (!hvcs_task) return; /* * This synchronous operation will wake the khvcsd kthread if it is * asleep and will return when khvcsd has terminated. */ kthread_stop(hvcs_task); spin_lock(&hvcs_pi_lock); free_page((unsigned long)hvcs_pi_buff); hvcs_pi_buff = NULL; spin_unlock(&hvcs_pi_lock); driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan); tty_unregister_driver(hvcs_tty_driver); hvcs_free_index_list(); put_tty_driver(hvcs_tty_driver); printk(KERN_INFO "HVCS: driver module removed.\n"); } module_init(hvcs_module_init); module_exit(hvcs_module_exit);
gpl-2.0
nismoryco/kernel-msm
arch/arm/mach-msm/ocmem_api.c
1198
11099
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <mach/ocmem_priv.h> static DEFINE_MUTEX(ocmem_eviction_lock); static DECLARE_BITMAP(evicted, OCMEM_CLIENT_MAX); static struct ocmem_handle *generate_handle(void) { struct ocmem_handle *handle = NULL; handle = kzalloc(sizeof(struct ocmem_handle), GFP_KERNEL); if (!handle) { pr_err("ocmem: Unable to generate buffer handle\n"); return NULL; } mutex_init(&handle->handle_mutex); return handle; } static int free_handle(struct ocmem_handle *handle) { if (!handle) return -EINVAL; mutex_destroy(&handle->handle_mutex); kfree(handle); handle = NULL; return 0; } static int __ocmem_free(int id, struct ocmem_buf *buf) { int ret = 0; struct ocmem_handle *handle = buffer_to_handle(buf); if (!handle) return -EINVAL; mutex_lock(&handle->handle_mutex); ret = process_free(id, handle); mutex_unlock(&handle->handle_mutex); if (ret) return -EINVAL; free_handle(handle); return 0; } static int __ocmem_shrink(int id, struct ocmem_buf *buf, unsigned long len) { int ret = 0; struct ocmem_handle *handle = buffer_to_handle(buf); if (!handle) return -EINVAL; mutex_lock(&handle->handle_mutex); ret = process_shrink(id, handle, len); mutex_unlock(&handle->handle_mutex); if (ret) return -EINVAL; return 0; } static struct ocmem_buf *__ocmem_allocate_range(int id, unsigned long min, unsigned long max, unsigned long step, bool block, bool wait) { struct ocmem_handle *handle = NULL; int ret = 0; handle = generate_handle(); if (!handle) { pr_err("ocmem: Unable to generate handle\n"); return NULL; } mutex_lock(&handle->handle_mutex); ret = process_allocate(id, handle, min, max, step, block, wait); mutex_unlock(&handle->handle_mutex); if (ret) { pr_err("ocmem allocation failed\n"); free_handle(handle); return NULL; } else return handle_to_buffer(handle); } struct ocmem_buf *ocmem_allocate(int client_id, unsigned long size) { bool can_block = false; bool can_wait = true; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return NULL; } if (!zone_active(client_id)) { pr_err("ocmem: Client %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return NULL; } if (size < OCMEM_MIN_ALLOC) { pr_err("ocmem: requested size %lx must be at least %x\n", size, OCMEM_MIN_ALLOC); return NULL; } if (!IS_ALIGNED(size, OCMEM_MIN_ALIGN)) { pr_err("ocmem: Invalid alignment, size must be %x aligned\n", OCMEM_MIN_ALIGN); return NULL; } return __ocmem_allocate_range(client_id, size, size, size, can_block, can_wait); } EXPORT_SYMBOL(ocmem_allocate); struct ocmem_buf *ocmem_allocate_nowait(int client_id, unsigned long size) { bool can_block = false; bool can_wait = false; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return NULL; } if (!zone_active(client_id)) { pr_err("ocmem: Client %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return NULL; } if (size < OCMEM_MIN_ALLOC) { pr_err("ocmem: requested size %lx must be at least %x\n", size, OCMEM_MIN_ALLOC); return NULL; } if (!IS_ALIGNED(size, OCMEM_MIN_ALIGN)) { pr_err("ocmem: Invalid alignment, size must be %x aligned\n", OCMEM_MIN_ALIGN); return NULL; } return __ocmem_allocate_range(client_id, size, size, size, can_block, can_wait); } EXPORT_SYMBOL(ocmem_allocate_nowait); struct ocmem_buf *ocmem_allocate_range(int client_id, unsigned long min, unsigned long goal, unsigned long step) { bool can_block = true; bool can_wait = false; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return NULL; } if (!zone_active(client_id)) { pr_err("ocmem: Client %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return NULL; } /* Asynchronous API requires notifier registration */ if (!check_notifier(client_id)) { pr_err("ocmem: No notifier registered for client %d\n", client_id); return NULL; } if (min < OCMEM_MIN_ALLOC) { pr_err("ocmem: requested min size %lx must be at least %x\n", min, OCMEM_MIN_ALLOC); return NULL; } if (!IS_ALIGNED(min | goal | step, OCMEM_MIN_ALIGN)) { pr_err("ocmem: Invalid alignment, args must be %x aligned\n", OCMEM_MIN_ALIGN); return NULL; } return __ocmem_allocate_range(client_id, min, goal, step, can_block, can_wait); } EXPORT_SYMBOL(ocmem_allocate_range); struct ocmem_buf *ocmem_allocate_nb(int client_id, unsigned long size) { bool can_block = true; bool can_wait = false; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return NULL; } /* Asynchronous API requires notifier registration */ if (!check_notifier(client_id)) { pr_err("ocmem: No notifier registered for client %d\n", client_id); return NULL; } if (!zone_active(client_id)) { pr_err("ocmem: Client %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return NULL; } if (size < OCMEM_MIN_ALLOC) { pr_err("ocmem: requested size %lx must be at least %x\n", size, OCMEM_MIN_ALLOC); return NULL; } if (!IS_ALIGNED(size, OCMEM_MIN_ALIGN)) { pr_err("ocmem: Invalid alignment, args must be %x aligned\n", OCMEM_MIN_ALIGN); return NULL; } return __ocmem_allocate_range(client_id, 0, size, size, can_block, can_wait); } EXPORT_SYMBOL(ocmem_allocate_nb); int ocmem_free(int client_id, struct ocmem_buf *buffer) { if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return -EINVAL; } if (!zone_active(client_id)) { pr_err("ocmem: Client %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return -EINVAL; } if (!buffer) { pr_err("ocmem: Invalid buffer\n"); return -EINVAL; } return __ocmem_free(client_id, buffer); } EXPORT_SYMBOL(ocmem_free); int ocmem_shrink(int client_id, struct ocmem_buf *buffer, unsigned long len) { if (!buffer) return -EINVAL; if (len >= buffer->len) return -EINVAL; if (!zone_active(client_id)) { pr_err("ocmem: Client id: %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return -EINVAL; } return __ocmem_shrink(client_id, buffer, len); } EXPORT_SYMBOL(ocmem_shrink); int pre_validate_chunk_list(struct ocmem_map_list *list) { int i = 0; struct ocmem_chunk *chunks; if (!list) return -EINVAL; if (list->num_chunks > OCMEM_MAX_CHUNKS || list->num_chunks == 0) return -EINVAL; chunks = list->chunks; if (!chunks) return -EINVAL; for (i = 0; i < list->num_chunks; i++) { if (!chunks[i].ddr_paddr || chunks[i].size < MIN_CHUNK_SIZE || !IS_ALIGNED(chunks[i].size, MIN_CHUNK_SIZE)) { pr_err("Invalid ocmem chunk at index %d (p: %lx, size %lx)\n", i, chunks[i].ddr_paddr, chunks[i].size); return -EINVAL; } } return 0; } int ocmem_map(int client_id, struct ocmem_buf *buffer, struct ocmem_map_list *list) { int ret = 0; struct ocmem_handle *handle = NULL; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return -EINVAL; } if (!zone_active(client_id)) { pr_err("ocmem: Client id: %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return -EINVAL; } /* Asynchronous API requires notifier registration */ if (!check_notifier(client_id)) { pr_err("ocmem: No notifier registered for client %d\n", client_id); return -EINVAL; } if (!buffer) { pr_err("ocmem: Invalid buffer\n"); return -EINVAL; } if (pre_validate_chunk_list(list) != 0) return -EINVAL; handle = buffer_to_handle(buffer); if (!handle) return -EINVAL; mutex_lock(&handle->handle_mutex); ret = process_xfer(client_id, handle, list, TO_OCMEM); mutex_unlock(&handle->handle_mutex); return ret; } EXPORT_SYMBOL(ocmem_map); int ocmem_unmap(int client_id, struct ocmem_buf *buffer, struct ocmem_map_list *list) { int ret = 0; struct ocmem_handle *handle = NULL; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return -EINVAL; } if (!zone_active(client_id)) { pr_err("ocmem: Client id: %s (id: %d) not allowed to use OCMEM\n", get_name(client_id), client_id); return -EINVAL; } /* Asynchronous API requires notifier registration */ if (!check_notifier(client_id)) { pr_err("ocmem: No notifier registered for client %d\n", client_id); return -EINVAL; } if (!buffer) { pr_err("ocmem: Invalid buffer\n"); return -EINVAL; } if (pre_validate_chunk_list(list) != 0) return -EINVAL; handle = buffer_to_handle(buffer); mutex_lock(&handle->handle_mutex); ret = process_xfer(client_id, handle, list, TO_DDR); mutex_unlock(&handle->handle_mutex); return ret; } EXPORT_SYMBOL(ocmem_unmap); unsigned long get_max_quota(int client_id) { if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return 0x0; } return process_quota(client_id); } /* Synchronous eviction/restore calls */ /* Only a single eviction or restoration is allowed */ /* Evictions/Restorations cannot be concurrent with other maps */ int ocmem_evict(int client_id) { int ret = 0; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return -EINVAL; } mutex_lock(&ocmem_eviction_lock); if (test_bit(client_id, evicted)) { pr_err("ocmem: Previous eviction was not restored by %d\n", client_id); mutex_unlock(&ocmem_eviction_lock); return -EINVAL; } ret = process_evict(client_id); if (ret == 0) set_bit(client_id, evicted); mutex_unlock(&ocmem_eviction_lock); return ret; } EXPORT_SYMBOL(ocmem_evict); int ocmem_restore(int client_id) { int ret = 0; if (!check_id(client_id)) { pr_err("ocmem: Invalid client id: %d\n", client_id); return -EINVAL; } mutex_lock(&ocmem_eviction_lock); if (!test_bit(client_id, evicted)) { pr_err("ocmem: No previous eviction by %d\n", client_id); mutex_unlock(&ocmem_eviction_lock); return -EINVAL; } ret = process_restore(client_id); clear_bit(client_id, evicted); mutex_unlock(&ocmem_eviction_lock); return ret; } EXPORT_SYMBOL(ocmem_restore); /* Wrappers until power control is transitioned to clients */ enum ocmem_power_state ocmem_get_power_state(int client_id, struct ocmem_buf *buffer) { return 0; } int ocmem_set_power_state(int client_id, struct ocmem_buf *buffer, enum ocmem_power_state new_state) { return 0; } struct ocmem_vectors *ocmem_get_vectors(int client_id, struct ocmem_buf *buffer) { return NULL; }
gpl-2.0
Radium-Devices/Radium_shamu
arch/sh/boards/board-sh7757lcr.c
2222
16338
/* * Renesas R0P7757LC0012RL Support. * * Copyright (C) 2009 - 2010 Renesas Solutions Corp. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/io.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/sh_eth.h> #include <linux/sh_intc.h> #include <linux/usb/renesas_usbhs.h> #include <cpu/sh7757.h> #include <asm/heartbeat.h> static struct resource heartbeat_resource = { .start = 0xffec005c, /* PUDR */ .end = 0xffec005c, .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, }; static unsigned char heartbeat_bit_pos[] = { 0, 1, 2, 3 }; static struct heartbeat_data heartbeat_data = { .bit_pos = heartbeat_bit_pos, .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), .flags = HEARTBEAT_INVERTED, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = 1, .resource = &heartbeat_resource, }; /* Fast Ethernet */ #define GBECONT 0xffc10100 #define GBECONT_RMII1 BIT(17) #define GBECONT_RMII0 BIT(16) static void sh7757_eth_set_mdio_gate(void *addr) { if (((unsigned long)addr & 0x00000fff) < 0x0800) writel(readl(GBECONT) | GBECONT_RMII0, GBECONT); else writel(readl(GBECONT) | GBECONT_RMII1, GBECONT); } static struct resource sh_eth0_resources[] = { { .start = 0xfef00000, .end = 0xfef001ff, .flags = IORESOURCE_MEM, }, { .start = evt2irq(0xc80), .end = evt2irq(0xc80), .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth0_pdata = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_FAST_SH4, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; static struct platform_device sh7757_eth0_device = { .name = "sh-eth", .resource = sh_eth0_resources, .id = 0, .num_resources = ARRAY_SIZE(sh_eth0_resources), .dev = { .platform_data = &sh7757_eth0_pdata, }, }; static struct resource sh_eth1_resources[] = { { .start = 0xfef00800, .end = 0xfef009ff, .flags = IORESOURCE_MEM, }, { .start = evt2irq(0xc80), .end = evt2irq(0xc80), .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth1_pdata = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_FAST_SH4, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; static struct platform_device sh7757_eth1_device = { .name = "sh-eth", .resource = sh_eth1_resources, .id = 1, .num_resources = ARRAY_SIZE(sh_eth1_resources), .dev = { .platform_data = &sh7757_eth1_pdata, }, }; static void sh7757_eth_giga_set_mdio_gate(void *addr) { if (((unsigned long)addr & 0x00000fff) < 0x0800) { gpio_set_value(GPIO_PTT4, 1); writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT); } else { gpio_set_value(GPIO_PTT4, 0); writel(readl(GBECONT) & ~GBECONT_RMII1, GBECONT); } } static struct resource sh_eth_giga0_resources[] = { { .start = 0xfee00000, .end = 0xfee007ff, .flags = IORESOURCE_MEM, }, { /* TSU */ .start = 0xfee01800, .end = 0xfee01fff, .flags = IORESOURCE_MEM, }, { .start = evt2irq(0x2960), .end = evt2irq(0x2960), .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { .phy = 18, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_GIGABIT, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; static struct platform_device sh7757_eth_giga0_device = { .name = "sh-eth", .resource = sh_eth_giga0_resources, .id = 2, .num_resources = ARRAY_SIZE(sh_eth_giga0_resources), .dev = { .platform_data = &sh7757_eth_giga0_pdata, }, }; static struct resource sh_eth_giga1_resources[] = { { .start = 0xfee00800, .end = 0xfee00fff, .flags = IORESOURCE_MEM, }, { /* TSU */ .start = 0xfee01800, .end = 0xfee01fff, .flags = IORESOURCE_MEM, }, { .start = evt2irq(0x2980), .end = evt2irq(0x2980), .flags = IORESOURCE_IRQ, }, }; static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { .phy = 19, .edmac_endian = EDMAC_LITTLE_ENDIAN, .register_type = SH_ETH_REG_GIGABIT, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; static struct platform_device sh7757_eth_giga1_device = { .name = "sh-eth", .resource = sh_eth_giga1_resources, .id = 3, .num_resources = ARRAY_SIZE(sh_eth_giga1_resources), .dev = { .platform_data = &sh7757_eth_giga1_pdata, }, }; /* Fixed 3.3V regulator to be used by SDHI0, MMCIF */ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), }; /* SH_MMCIF */ static struct resource sh_mmcif_resources[] = { [0] = { .start = 0xffcb0000, .end = 0xffcb00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x1c60), .flags = IORESOURCE_IRQ, }, [2] = { .start = evt2irq(0x1c80), .flags = IORESOURCE_IRQ, }, }; static struct sh_mmcif_plat_data sh_mmcif_plat = { .sup_pclk = 0x0f, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, .ocr = MMC_VDD_32_33 | MMC_VDD_33_34, .slave_id_tx = SHDMA_SLAVE_MMCIF_TX, .slave_id_rx = SHDMA_SLAVE_MMCIF_RX, }; static struct platform_device sh_mmcif_device = { .name = "sh_mmcif", .id = 0, .dev = { .platform_data = &sh_mmcif_plat, }, .num_resources = ARRAY_SIZE(sh_mmcif_resources), .resource = sh_mmcif_resources, }; /* SDHI0 */ static struct sh_mobile_sdhi_info sdhi_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI_RX, .tmio_caps = MMC_CAP_SD_HIGHSPEED, }; static struct resource sdhi_resources[] = { [0] = { .start = 0xffe50000, .end = 0xffe501ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x480), .flags = IORESOURCE_IRQ, }, }; static struct platform_device sdhi_device = { .name = "sh_mobile_sdhi", .num_resources = ARRAY_SIZE(sdhi_resources), .resource = sdhi_resources, .id = 0, .dev = { .platform_data = &sdhi_info, }, }; static int usbhs0_get_id(struct platform_device *pdev) { return USBHS_GADGET; } static struct renesas_usbhs_platform_info usb0_data = { .platform_callback = { .get_id = usbhs0_get_id, }, .driver_param = { .buswait_bwait = 5, } }; static struct resource usb0_resources[] = { [0] = { .start = 0xfe450000, .end = 0xfe4501ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x840), .end = evt2irq(0x840), .flags = IORESOURCE_IRQ, }, }; static struct platform_device usb0_device = { .name = "renesas_usbhs", .id = 0, .dev = { .platform_data = &usb0_data, }, .num_resources = ARRAY_SIZE(usb0_resources), .resource = usb0_resources, }; static struct platform_device *sh7757lcr_devices[] __initdata = { &heartbeat_device, &sh7757_eth0_device, &sh7757_eth1_device, &sh7757_eth_giga0_device, &sh7757_eth_giga1_device, &sh_mmcif_device, &sdhi_device, &usb0_device, }; static struct flash_platform_data spi_flash_data = { .name = "m25p80", .type = "m25px64", }; static struct spi_board_info spi_board_info[] = { { .modalias = "m25p80", .max_speed_hz = 25000000, .bus_num = 0, .chip_select = 1, .platform_data = &spi_flash_data, }, }; static int __init sh7757lcr_devices_setup(void) { regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, ARRAY_SIZE(fixed3v3_power_consumers), 3300000); /* RGMII (PTA) */ gpio_request(GPIO_FN_ET0_MDC, NULL); gpio_request(GPIO_FN_ET0_MDIO, NULL); gpio_request(GPIO_FN_ET1_MDC, NULL); gpio_request(GPIO_FN_ET1_MDIO, NULL); /* ONFI (PTB, PTZ) */ gpio_request(GPIO_FN_ON_NRE, NULL); gpio_request(GPIO_FN_ON_NWE, NULL); gpio_request(GPIO_FN_ON_NWP, NULL); gpio_request(GPIO_FN_ON_NCE0, NULL); gpio_request(GPIO_FN_ON_R_B0, NULL); gpio_request(GPIO_FN_ON_ALE, NULL); gpio_request(GPIO_FN_ON_CLE, NULL); gpio_request(GPIO_FN_ON_DQ7, NULL); gpio_request(GPIO_FN_ON_DQ6, NULL); gpio_request(GPIO_FN_ON_DQ5, NULL); gpio_request(GPIO_FN_ON_DQ4, NULL); gpio_request(GPIO_FN_ON_DQ3, NULL); gpio_request(GPIO_FN_ON_DQ2, NULL); gpio_request(GPIO_FN_ON_DQ1, NULL); gpio_request(GPIO_FN_ON_DQ0, NULL); /* IRQ8 to 0 (PTB, PTC) */ gpio_request(GPIO_FN_IRQ8, NULL); gpio_request(GPIO_FN_IRQ7, NULL); gpio_request(GPIO_FN_IRQ6, NULL); gpio_request(GPIO_FN_IRQ5, NULL); gpio_request(GPIO_FN_IRQ4, NULL); gpio_request(GPIO_FN_IRQ3, NULL); gpio_request(GPIO_FN_IRQ2, NULL); gpio_request(GPIO_FN_IRQ1, NULL); gpio_request(GPIO_FN_IRQ0, NULL); /* SPI0 (PTD) */ gpio_request(GPIO_FN_SP0_MOSI, NULL); gpio_request(GPIO_FN_SP0_MISO, NULL); gpio_request(GPIO_FN_SP0_SCK, NULL); gpio_request(GPIO_FN_SP0_SCK_FB, NULL); gpio_request(GPIO_FN_SP0_SS0, NULL); gpio_request(GPIO_FN_SP0_SS1, NULL); gpio_request(GPIO_FN_SP0_SS2, NULL); gpio_request(GPIO_FN_SP0_SS3, NULL); /* RMII 0/1 (PTE, PTF) */ gpio_request(GPIO_FN_RMII0_CRS_DV, NULL); gpio_request(GPIO_FN_RMII0_TXD1, NULL); gpio_request(GPIO_FN_RMII0_TXD0, NULL); gpio_request(GPIO_FN_RMII0_TXEN, NULL); gpio_request(GPIO_FN_RMII0_REFCLK, NULL); gpio_request(GPIO_FN_RMII0_RXD1, NULL); gpio_request(GPIO_FN_RMII0_RXD0, NULL); gpio_request(GPIO_FN_RMII0_RX_ER, NULL); gpio_request(GPIO_FN_RMII1_CRS_DV, NULL); gpio_request(GPIO_FN_RMII1_TXD1, NULL); gpio_request(GPIO_FN_RMII1_TXD0, NULL); gpio_request(GPIO_FN_RMII1_TXEN, NULL); gpio_request(GPIO_FN_RMII1_REFCLK, NULL); gpio_request(GPIO_FN_RMII1_RXD1, NULL); gpio_request(GPIO_FN_RMII1_RXD0, NULL); gpio_request(GPIO_FN_RMII1_RX_ER, NULL); /* eMMC (PTG) */ gpio_request(GPIO_FN_MMCCLK, NULL); gpio_request(GPIO_FN_MMCCMD, NULL); gpio_request(GPIO_FN_MMCDAT7, NULL); gpio_request(GPIO_FN_MMCDAT6, NULL); gpio_request(GPIO_FN_MMCDAT5, NULL); gpio_request(GPIO_FN_MMCDAT4, NULL); gpio_request(GPIO_FN_MMCDAT3, NULL); gpio_request(GPIO_FN_MMCDAT2, NULL); gpio_request(GPIO_FN_MMCDAT1, NULL); gpio_request(GPIO_FN_MMCDAT0, NULL); /* LPC (PTG, PTH, PTQ, PTU) */ gpio_request(GPIO_FN_SERIRQ, NULL); gpio_request(GPIO_FN_LPCPD, NULL); gpio_request(GPIO_FN_LDRQ, NULL); gpio_request(GPIO_FN_WP, NULL); gpio_request(GPIO_FN_FMS0, NULL); gpio_request(GPIO_FN_LAD3, NULL); gpio_request(GPIO_FN_LAD2, NULL); gpio_request(GPIO_FN_LAD1, NULL); gpio_request(GPIO_FN_LAD0, NULL); gpio_request(GPIO_FN_LFRAME, NULL); gpio_request(GPIO_FN_LRESET, NULL); gpio_request(GPIO_FN_LCLK, NULL); gpio_request(GPIO_FN_LGPIO7, NULL); gpio_request(GPIO_FN_LGPIO6, NULL); gpio_request(GPIO_FN_LGPIO5, NULL); gpio_request(GPIO_FN_LGPIO4, NULL); /* SPI1 (PTH) */ gpio_request(GPIO_FN_SP1_MOSI, NULL); gpio_request(GPIO_FN_SP1_MISO, NULL); gpio_request(GPIO_FN_SP1_SCK, NULL); gpio_request(GPIO_FN_SP1_SCK_FB, NULL); gpio_request(GPIO_FN_SP1_SS0, NULL); gpio_request(GPIO_FN_SP1_SS1, NULL); /* SDHI (PTI) */ gpio_request(GPIO_FN_SD_WP, NULL); gpio_request(GPIO_FN_SD_CD, NULL); gpio_request(GPIO_FN_SD_CLK, NULL); gpio_request(GPIO_FN_SD_CMD, NULL); gpio_request(GPIO_FN_SD_D3, NULL); gpio_request(GPIO_FN_SD_D2, NULL); gpio_request(GPIO_FN_SD_D1, NULL); gpio_request(GPIO_FN_SD_D0, NULL); /* SCIF3/4 (PTJ, PTW) */ gpio_request(GPIO_FN_RTS3, NULL); gpio_request(GPIO_FN_CTS3, NULL); gpio_request(GPIO_FN_TXD3, NULL); gpio_request(GPIO_FN_RXD3, NULL); gpio_request(GPIO_FN_RTS4, NULL); gpio_request(GPIO_FN_RXD4, NULL); gpio_request(GPIO_FN_TXD4, NULL); gpio_request(GPIO_FN_CTS4, NULL); /* SERMUX (PTK, PTL, PTO, PTV) */ gpio_request(GPIO_FN_COM2_TXD, NULL); gpio_request(GPIO_FN_COM2_RXD, NULL); gpio_request(GPIO_FN_COM2_RTS, NULL); gpio_request(GPIO_FN_COM2_CTS, NULL); gpio_request(GPIO_FN_COM2_DTR, NULL); gpio_request(GPIO_FN_COM2_DSR, NULL); gpio_request(GPIO_FN_COM2_DCD, NULL); gpio_request(GPIO_FN_COM2_RI, NULL); gpio_request(GPIO_FN_RAC_RXD, NULL); gpio_request(GPIO_FN_RAC_RTS, NULL); gpio_request(GPIO_FN_RAC_CTS, NULL); gpio_request(GPIO_FN_RAC_DTR, NULL); gpio_request(GPIO_FN_RAC_DSR, NULL); gpio_request(GPIO_FN_RAC_DCD, NULL); gpio_request(GPIO_FN_RAC_TXD, NULL); gpio_request(GPIO_FN_COM1_TXD, NULL); gpio_request(GPIO_FN_COM1_RXD, NULL); gpio_request(GPIO_FN_COM1_RTS, NULL); gpio_request(GPIO_FN_COM1_CTS, NULL); writeb(0x10, 0xfe470000); /* SMR0: SerMux mode 0 */ /* IIC (PTM, PTR, PTS) */ gpio_request(GPIO_FN_SDA7, NULL); gpio_request(GPIO_FN_SCL7, NULL); gpio_request(GPIO_FN_SDA6, NULL); gpio_request(GPIO_FN_SCL6, NULL); gpio_request(GPIO_FN_SDA5, NULL); gpio_request(GPIO_FN_SCL5, NULL); gpio_request(GPIO_FN_SDA4, NULL); gpio_request(GPIO_FN_SCL4, NULL); gpio_request(GPIO_FN_SDA3, NULL); gpio_request(GPIO_FN_SCL3, NULL); gpio_request(GPIO_FN_SDA2, NULL); gpio_request(GPIO_FN_SCL2, NULL); gpio_request(GPIO_FN_SDA1, NULL); gpio_request(GPIO_FN_SCL1, NULL); gpio_request(GPIO_FN_SDA0, NULL); gpio_request(GPIO_FN_SCL0, NULL); /* USB (PTN) */ gpio_request(GPIO_FN_VBUS_EN, NULL); gpio_request(GPIO_FN_VBUS_OC, NULL); /* SGPIO1/0 (PTN, PTO) */ gpio_request(GPIO_FN_SGPIO1_CLK, NULL); gpio_request(GPIO_FN_SGPIO1_LOAD, NULL); gpio_request(GPIO_FN_SGPIO1_DI, NULL); gpio_request(GPIO_FN_SGPIO1_DO, NULL); gpio_request(GPIO_FN_SGPIO0_CLK, NULL); gpio_request(GPIO_FN_SGPIO0_LOAD, NULL); gpio_request(GPIO_FN_SGPIO0_DI, NULL); gpio_request(GPIO_FN_SGPIO0_DO, NULL); /* WDT (PTN) */ gpio_request(GPIO_FN_SUB_CLKIN, NULL); /* System (PTT) */ gpio_request(GPIO_FN_STATUS1, NULL); gpio_request(GPIO_FN_STATUS0, NULL); /* PWMX (PTT) */ gpio_request(GPIO_FN_PWMX1, NULL); gpio_request(GPIO_FN_PWMX0, NULL); /* R-SPI (PTV) */ gpio_request(GPIO_FN_R_SPI_MOSI, NULL); gpio_request(GPIO_FN_R_SPI_MISO, NULL); gpio_request(GPIO_FN_R_SPI_RSPCK, NULL); gpio_request(GPIO_FN_R_SPI_SSL0, NULL); gpio_request(GPIO_FN_R_SPI_SSL1, NULL); /* EVC (PTV, PTW) */ gpio_request(GPIO_FN_EVENT7, NULL); gpio_request(GPIO_FN_EVENT6, NULL); gpio_request(GPIO_FN_EVENT5, NULL); gpio_request(GPIO_FN_EVENT4, NULL); gpio_request(GPIO_FN_EVENT3, NULL); gpio_request(GPIO_FN_EVENT2, NULL); gpio_request(GPIO_FN_EVENT1, NULL); gpio_request(GPIO_FN_EVENT0, NULL); /* LED for heartbeat */ gpio_request(GPIO_PTU3, NULL); gpio_direction_output(GPIO_PTU3, 1); gpio_request(GPIO_PTU2, NULL); gpio_direction_output(GPIO_PTU2, 1); gpio_request(GPIO_PTU1, NULL); gpio_direction_output(GPIO_PTU1, 1); gpio_request(GPIO_PTU0, NULL); gpio_direction_output(GPIO_PTU0, 1); /* control for MDIO of Gigabit Ethernet */ gpio_request(GPIO_PTT4, NULL); gpio_direction_output(GPIO_PTT4, 1); /* control for eMMC */ gpio_request(GPIO_PTT7, NULL); /* eMMC_RST# */ gpio_direction_output(GPIO_PTT7, 0); gpio_request(GPIO_PTT6, NULL); /* eMMC_INDEX# */ gpio_direction_output(GPIO_PTT6, 0); gpio_request(GPIO_PTT5, NULL); /* eMMC_PRST# */ gpio_direction_output(GPIO_PTT5, 1); /* register SPI device information */ spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); /* General platform */ return platform_add_devices(sh7757lcr_devices, ARRAY_SIZE(sh7757lcr_devices)); } arch_initcall(sh7757lcr_devices_setup); /* Initialize IRQ setting */ void __init init_sh7757lcr_IRQ(void) { plat_irq_setup_pins(IRQ_MODE_IRQ7654); plat_irq_setup_pins(IRQ_MODE_IRQ3210); } /* Initialize the board */ static void __init sh7757lcr_setup(char **cmdline_p) { printk(KERN_INFO "Renesas R0P7757LC0012RL support.\n"); } static int sh7757lcr_mode_pins(void) { int value = 0; /* These are the factory default settings of S3 (Low active). * If you change these dip switches then you will need to * adjust the values below as well. */ value |= MODE_PIN0; /* Clock Mode: 1 */ return value; } /* The Machine Vector */ static struct sh_machine_vector mv_sh7757lcr __initmv = { .mv_name = "SH7757LCR", .mv_setup = sh7757lcr_setup, .mv_init_irq = init_sh7757lcr_IRQ, .mv_mode_pins = sh7757lcr_mode_pins, };
gpl-2.0
beroid/android_kernel_cyanogen_msm8916
arch/mips/alchemy/devboards/db1200.c
2478
23640
/* * DBAu1200/PBAu1200 board platform device registration * * Copyright (C) 2008-2011 Manuel Lauss * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/mmc/host.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/smc91x.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1100_mmc.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1200fb.h> #include <asm/mach-au1x00/au1550_spi.h> #include <asm/mach-db1x00/bcsr.h> #include <asm/mach-db1x00/db1200.h> #include "platform.h" const char *get_system_type(void); static int __init db1200_detect_board(void) { int bid; /* try the DB1200 first */ bcsr_init(DB1200_BCSR_PHYS_ADDR, DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); if (BCSR_WHOAMI_DB1200 == BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { unsigned short t = bcsr_read(BCSR_HEXLEDS); bcsr_write(BCSR_HEXLEDS, ~t); if (bcsr_read(BCSR_HEXLEDS) != t) { bcsr_write(BCSR_HEXLEDS, t); return 0; } } /* okay, try the PB1200 then */ bcsr_init(PB1200_BCSR_PHYS_ADDR, PB1200_BCSR_PHYS_ADDR + PB1200_BCSR_HEXLED_OFS); bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); if ((bid == BCSR_WHOAMI_PB1200_DDR1) || (bid == BCSR_WHOAMI_PB1200_DDR2)) { unsigned short t = bcsr_read(BCSR_HEXLEDS); bcsr_write(BCSR_HEXLEDS, ~t); if (bcsr_read(BCSR_HEXLEDS) != t) { bcsr_write(BCSR_HEXLEDS, t); return 0; } } return 1; /* it's neither */ } int __init db1200_board_setup(void) { unsigned long freq0, clksrc, div, pfc; unsigned short whoami; if (db1200_detect_board()) return -ENODEV; whoami = bcsr_read(BCSR_WHOAMI); printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d" " Board-ID %d Daughtercard ID %d\n", get_system_type(), (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf); /* SMBus/SPI on PSC0, Audio on PSC1 */ pfc = __raw_readl((void __iomem *)SYS_PINFUNC); pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B); pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3); pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */ __raw_writel(pfc, (void __iomem *)SYS_PINFUNC); wmb(); /* Clock configurations: PSC0: ~50MHz via Clkgen0, derived from * CPU clock; all other clock generators off/unused. */ div = (get_au1x00_speed() + 25000000) / 50000000; if (div & 1) div++; div = ((div >> 1) - 1) & 0xff; freq0 = div << SYS_FC_FRDIV0_BIT; __raw_writel(freq0, (void __iomem *)SYS_FREQCTRL0); wmb(); freq0 |= SYS_FC_FE0; /* enable F0 */ __raw_writel(freq0, (void __iomem *)SYS_FREQCTRL0); wmb(); /* psc0_intclk comes 1:1 from F0 */ clksrc = SYS_CS_MUX_FQ0 << SYS_CS_ME0_BIT; __raw_writel(clksrc, (void __iomem *)SYS_CLKSRC); wmb(); return 0; } /******************************************************************************/ static struct mtd_partition db1200_spiflash_parts[] = { { .name = "spi_flash", .offset = 0, .size = MTDPART_SIZ_FULL, }, }; static struct flash_platform_data db1200_spiflash_data = { .name = "s25fl001", .parts = db1200_spiflash_parts, .nr_parts = ARRAY_SIZE(db1200_spiflash_parts), .type = "m25p10", }; static struct spi_board_info db1200_spi_devs[] __initdata = { { /* TI TMP121AIDBVR temp sensor */ .modalias = "tmp121", .max_speed_hz = 2000000, .bus_num = 0, .chip_select = 0, .mode = 0, }, { /* Spansion S25FL001D0FMA SPI flash */ .modalias = "m25p80", .max_speed_hz = 50000000, .bus_num = 0, .chip_select = 1, .mode = 0, .platform_data = &db1200_spiflash_data, }, }; static struct i2c_board_info db1200_i2c_devs[] __initdata = { { I2C_BOARD_INFO("24c04", 0x52), }, /* AT24C04-10 I2C eeprom */ { I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */ { I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec WM8731 */ }; /**********************************************************************/ static void au1200_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long ioaddr = (unsigned long)this->IO_ADDR_W; ioaddr &= 0xffffff00; if (ctrl & NAND_CLE) { ioaddr += MEM_STNAND_CMD; } else if (ctrl & NAND_ALE) { ioaddr += MEM_STNAND_ADDR; } else { /* assume we want to r/w real data by default */ ioaddr += MEM_STNAND_DATA; } this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr; if (cmd != NAND_CMD_NONE) { __raw_writeb(cmd, this->IO_ADDR_W); wmb(); } } static int au1200_nand_device_ready(struct mtd_info *mtd) { return __raw_readl((void __iomem *)MEM_STSTAT) & 1; } static struct mtd_partition db1200_nand_parts[] = { { .name = "NAND FS 0", .offset = 0, .size = 8 * 1024 * 1024, }, { .name = "NAND FS 1", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL }, }; struct platform_nand_data db1200_nand_platdata = { .chip = { .nr_chips = 1, .chip_offset = 0, .nr_partitions = ARRAY_SIZE(db1200_nand_parts), .partitions = db1200_nand_parts, .chip_delay = 20, }, .ctrl = { .dev_ready = au1200_nand_device_ready, .cmd_ctrl = au1200_nand_cmd_ctrl, }, }; static struct resource db1200_nand_res[] = { [0] = { .start = DB1200_NAND_PHYS_ADDR, .end = DB1200_NAND_PHYS_ADDR + 0xff, .flags = IORESOURCE_MEM, }, }; static struct platform_device db1200_nand_dev = { .name = "gen_nand", .num_resources = ARRAY_SIZE(db1200_nand_res), .resource = db1200_nand_res, .id = -1, .dev = { .platform_data = &db1200_nand_platdata, } }; /**********************************************************************/ static struct smc91x_platdata db1200_eth_data = { .flags = SMC91X_NOWAIT | SMC91X_USE_16BIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource db1200_eth_res[] = { [0] = { .start = DB1200_ETH_PHYS_ADDR, .end = DB1200_ETH_PHYS_ADDR + 0xf, .flags = IORESOURCE_MEM, }, [1] = { .start = DB1200_ETH_INT, .end = DB1200_ETH_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device db1200_eth_dev = { .dev = { .platform_data = &db1200_eth_data, }, .name = "smc91x", .id = -1, .num_resources = ARRAY_SIZE(db1200_eth_res), .resource = db1200_eth_res, }; /**********************************************************************/ static struct resource db1200_ide_res[] = { [0] = { .start = DB1200_IDE_PHYS_ADDR, .end = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = DB1200_IDE_INT, .end = DB1200_IDE_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1200_DSCR_CMD0_DMA_REQ1, .end = AU1200_DSCR_CMD0_DMA_REQ1, .flags = IORESOURCE_DMA, }, }; static u64 au1200_ide_dmamask = DMA_BIT_MASK(32); static struct platform_device db1200_ide_dev = { .name = "au1200-ide", .id = 0, .dev = { .dma_mask = &au1200_ide_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(db1200_ide_res), .resource = db1200_ide_res, }; /**********************************************************************/ /* SD carddetects: they're supposed to be edge-triggered, but ack * doesn't seem to work (CPLD Rev 2). Instead, the screaming one * is disabled and its counterpart enabled. The 500ms timeout is * because the carddetect isn't debounced in hardware. */ static irqreturn_t db1200_mmc_cd(int irq, void *ptr) { void(*mmc_cd)(struct mmc_host *, unsigned long); if (irq == DB1200_SD0_INSERT_INT) { disable_irq_nosync(DB1200_SD0_INSERT_INT); enable_irq(DB1200_SD0_EJECT_INT); } else { disable_irq_nosync(DB1200_SD0_EJECT_INT); enable_irq(DB1200_SD0_INSERT_INT); } /* link against CONFIG_MMC=m */ mmc_cd = symbol_get(mmc_detect_change); if (mmc_cd) { mmc_cd(ptr, msecs_to_jiffies(500)); symbol_put(mmc_detect_change); } return IRQ_HANDLED; } static int db1200_mmc_cd_setup(void *mmc_host, int en) { int ret; if (en) { ret = request_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd, 0, "sd_insert", mmc_host); if (ret) goto out; ret = request_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd, 0, "sd_eject", mmc_host); if (ret) { free_irq(DB1200_SD0_INSERT_INT, mmc_host); goto out; } if (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT) enable_irq(DB1200_SD0_EJECT_INT); else enable_irq(DB1200_SD0_INSERT_INT); } else { free_irq(DB1200_SD0_INSERT_INT, mmc_host); free_irq(DB1200_SD0_EJECT_INT, mmc_host); } ret = 0; out: return ret; } static void db1200_mmc_set_power(void *mmc_host, int state) { if (state) { bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD0PWR); msleep(400); /* stabilization time */ } else bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD0PWR, 0); } static int db1200_mmc_card_readonly(void *mmc_host) { return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP) ? 1 : 0; } static int db1200_mmc_card_inserted(void *mmc_host) { return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT) ? 1 : 0; } static void db1200_mmcled_set(struct led_classdev *led, enum led_brightness brightness) { if (brightness != LED_OFF) bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0); else bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0); } static struct led_classdev db1200_mmc_led = { .brightness_set = db1200_mmcled_set, }; /* -- */ static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr) { void(*mmc_cd)(struct mmc_host *, unsigned long); if (irq == PB1200_SD1_INSERT_INT) { disable_irq_nosync(PB1200_SD1_INSERT_INT); enable_irq(PB1200_SD1_EJECT_INT); } else { disable_irq_nosync(PB1200_SD1_EJECT_INT); enable_irq(PB1200_SD1_INSERT_INT); } /* link against CONFIG_MMC=m */ mmc_cd = symbol_get(mmc_detect_change); if (mmc_cd) { mmc_cd(ptr, msecs_to_jiffies(500)); symbol_put(mmc_detect_change); } return IRQ_HANDLED; } static int pb1200_mmc1_cd_setup(void *mmc_host, int en) { int ret; if (en) { ret = request_irq(PB1200_SD1_INSERT_INT, pb1200_mmc1_cd, 0, "sd1_insert", mmc_host); if (ret) goto out; ret = request_irq(PB1200_SD1_EJECT_INT, pb1200_mmc1_cd, 0, "sd1_eject", mmc_host); if (ret) { free_irq(PB1200_SD1_INSERT_INT, mmc_host); goto out; } if (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT) enable_irq(PB1200_SD1_EJECT_INT); else enable_irq(PB1200_SD1_INSERT_INT); } else { free_irq(PB1200_SD1_INSERT_INT, mmc_host); free_irq(PB1200_SD1_EJECT_INT, mmc_host); } ret = 0; out: return ret; } static void pb1200_mmc1led_set(struct led_classdev *led, enum led_brightness brightness) { if (brightness != LED_OFF) bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0); else bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1); } static struct led_classdev pb1200_mmc1_led = { .brightness_set = pb1200_mmc1led_set, }; static void pb1200_mmc1_set_power(void *mmc_host, int state) { if (state) { bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD1PWR); msleep(400); /* stabilization time */ } else bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD1PWR, 0); } static int pb1200_mmc1_card_readonly(void *mmc_host) { return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD1WP) ? 1 : 0; } static int pb1200_mmc1_card_inserted(void *mmc_host) { return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT) ? 1 : 0; } static struct au1xmmc_platform_data db1200_mmc_platdata[2] = { [0] = { .cd_setup = db1200_mmc_cd_setup, .set_power = db1200_mmc_set_power, .card_inserted = db1200_mmc_card_inserted, .card_readonly = db1200_mmc_card_readonly, .led = &db1200_mmc_led, }, [1] = { .cd_setup = pb1200_mmc1_cd_setup, .set_power = pb1200_mmc1_set_power, .card_inserted = pb1200_mmc1_card_inserted, .card_readonly = pb1200_mmc1_card_readonly, .led = &pb1200_mmc1_led, }, }; static struct resource au1200_mmc0_resources[] = { [0] = { .start = AU1100_SD0_PHYS_ADDR, .end = AU1100_SD0_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1200_SD_INT, .end = AU1200_SD_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1200_DSCR_CMD0_SDMS_TX0, .end = AU1200_DSCR_CMD0_SDMS_TX0, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1200_DSCR_CMD0_SDMS_RX0, .end = AU1200_DSCR_CMD0_SDMS_RX0, .flags = IORESOURCE_DMA, } }; static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32); static struct platform_device db1200_mmc0_dev = { .name = "au1xxx-mmc", .id = 0, .dev = { .dma_mask = &au1xxx_mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200_mmc_platdata[0], }, .num_resources = ARRAY_SIZE(au1200_mmc0_resources), .resource = au1200_mmc0_resources, }; static struct resource au1200_mmc1_res[] = { [0] = { .start = AU1100_SD1_PHYS_ADDR, .end = AU1100_SD1_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1200_SD_INT, .end = AU1200_SD_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1200_DSCR_CMD0_SDMS_TX1, .end = AU1200_DSCR_CMD0_SDMS_TX1, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1200_DSCR_CMD0_SDMS_RX1, .end = AU1200_DSCR_CMD0_SDMS_RX1, .flags = IORESOURCE_DMA, } }; static struct platform_device pb1200_mmc1_dev = { .name = "au1xxx-mmc", .id = 1, .dev = { .dma_mask = &au1xxx_mmc_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200_mmc_platdata[1], }, .num_resources = ARRAY_SIZE(au1200_mmc1_res), .resource = au1200_mmc1_res, }; /**********************************************************************/ static int db1200fb_panel_index(void) { return (bcsr_read(BCSR_SWITCHES) >> 8) & 0x0f; } static int db1200fb_panel_init(void) { /* Apply power */ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD | BCSR_BOARD_LCDBL); return 0; } static int db1200fb_panel_shutdown(void) { /* Remove power */ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD | BCSR_BOARD_LCDBL, 0); return 0; } static struct au1200fb_platdata db1200fb_pd = { .panel_index = db1200fb_panel_index, .panel_init = db1200fb_panel_init, .panel_shutdown = db1200fb_panel_shutdown, }; static struct resource au1200_lcd_res[] = { [0] = { .start = AU1200_LCD_PHYS_ADDR, .end = AU1200_LCD_PHYS_ADDR + 0x800 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1200_LCD_INT, .end = AU1200_LCD_INT, .flags = IORESOURCE_IRQ, } }; static u64 au1200_lcd_dmamask = DMA_BIT_MASK(32); static struct platform_device au1200_lcd_dev = { .name = "au1200-lcd", .id = 0, .dev = { .dma_mask = &au1200_lcd_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200fb_pd, }, .num_resources = ARRAY_SIZE(au1200_lcd_res), .resource = au1200_lcd_res, }; /**********************************************************************/ static struct resource au1200_psc0_res[] = { [0] = { .start = AU1550_PSC0_PHYS_ADDR, .end = AU1550_PSC0_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1200_PSC0_INT, .end = AU1200_PSC0_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1200_DSCR_CMD0_PSC0_TX, .end = AU1200_DSCR_CMD0_PSC0_TX, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1200_DSCR_CMD0_PSC0_RX, .end = AU1200_DSCR_CMD0_PSC0_RX, .flags = IORESOURCE_DMA, }, }; static struct platform_device db1200_i2c_dev = { .name = "au1xpsc_smbus", .id = 0, /* bus number */ .num_resources = ARRAY_SIZE(au1200_psc0_res), .resource = au1200_psc0_res, }; static void db1200_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol) { if (cs) bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_SPISEL); else bcsr_mod(BCSR_RESETS, BCSR_RESETS_SPISEL, 0); } static struct au1550_spi_info db1200_spi_platdata = { .mainclk_hz = 50000000, /* PSC0 clock */ .num_chipselect = 2, .activate_cs = db1200_spi_cs_en, }; static u64 spi_dmamask = DMA_BIT_MASK(32); static struct platform_device db1200_spi_dev = { .dev = { .dma_mask = &spi_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1200_spi_platdata, }, .name = "au1550-spi", .id = 0, /* bus number */ .num_resources = ARRAY_SIZE(au1200_psc0_res), .resource = au1200_psc0_res, }; static struct resource au1200_psc1_res[] = { [0] = { .start = AU1550_PSC1_PHYS_ADDR, .end = AU1550_PSC1_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1200_PSC1_INT, .end = AU1200_PSC1_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1200_DSCR_CMD0_PSC1_TX, .end = AU1200_DSCR_CMD0_PSC1_TX, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1200_DSCR_CMD0_PSC1_RX, .end = AU1200_DSCR_CMD0_PSC1_RX, .flags = IORESOURCE_DMA, }, }; /* AC97 or I2S device */ static struct platform_device db1200_audio_dev = { /* name assigned later based on switch setting */ .id = 1, /* PSC ID */ .num_resources = ARRAY_SIZE(au1200_psc1_res), .resource = au1200_psc1_res, }; /* DB1200 ASoC card device */ static struct platform_device db1200_sound_dev = { /* name assigned later based on switch setting */ .id = 1, /* PSC ID */ }; static struct platform_device db1200_stac_dev = { .name = "ac97-codec", .id = 1, /* on PSC1 */ }; static struct platform_device db1200_audiodma_dev = { .name = "au1xpsc-pcm", .id = 1, /* PSC ID */ }; static struct platform_device *db1200_devs[] __initdata = { NULL, /* PSC0, selected by S6.8 */ &db1200_ide_dev, &db1200_mmc0_dev, &au1200_lcd_dev, &db1200_eth_dev, &db1200_nand_dev, &db1200_audiodma_dev, &db1200_audio_dev, &db1200_stac_dev, &db1200_sound_dev, }; static struct platform_device *pb1200_devs[] __initdata = { &pb1200_mmc1_dev, }; /* Some peripheral base addresses differ on the PB1200 */ static int __init pb1200_res_fixup(void) { /* CPLD Revs earlier than 4 cause problems */ if (BCSR_WHOAMI_CPLD(bcsr_read(BCSR_WHOAMI)) <= 3) { printk(KERN_ERR "WARNING!!!\n"); printk(KERN_ERR "WARNING!!!\n"); printk(KERN_ERR "PB1200 must be at CPLD rev 4. Please have\n"); printk(KERN_ERR "the board updated to latest revisions.\n"); printk(KERN_ERR "This software will not work reliably\n"); printk(KERN_ERR "on anything older than CPLD rev 4.!\n"); printk(KERN_ERR "WARNING!!!\n"); printk(KERN_ERR "WARNING!!!\n"); return 1; } db1200_nand_res[0].start = PB1200_NAND_PHYS_ADDR; db1200_nand_res[0].end = PB1200_NAND_PHYS_ADDR + 0xff; db1200_ide_res[0].start = PB1200_IDE_PHYS_ADDR; db1200_ide_res[0].end = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1; db1200_eth_res[0].start = PB1200_ETH_PHYS_ADDR; db1200_eth_res[0].end = PB1200_ETH_PHYS_ADDR + 0xff; return 0; } int __init db1200_dev_setup(void) { unsigned long pfc; unsigned short sw; int swapped, bid; bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); if ((bid == BCSR_WHOAMI_PB1200_DDR1) || (bid == BCSR_WHOAMI_PB1200_DDR2)) { if (pb1200_res_fixup()) return -ENODEV; } /* GPIO7 is low-level triggered CPLD cascade */ irq_set_irq_type(AU1200_GPIO7_INT, IRQ_TYPE_LEVEL_LOW); bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT); /* insert/eject pairs: one of both is always screaming. To avoid * issues they must not be automatically enabled when initially * requested. */ irq_set_status_flags(DB1200_SD0_INSERT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1200_SD0_EJECT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1200_PC0_INSERT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1200_PC0_EJECT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1200_PC1_INSERT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1200_PC1_EJECT_INT, IRQ_NOAUTOEN); i2c_register_board_info(0, db1200_i2c_devs, ARRAY_SIZE(db1200_i2c_devs)); spi_register_board_info(db1200_spi_devs, ARRAY_SIZE(db1200_i2c_devs)); /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI) * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S) * or S12 on the PB1200. */ /* NOTE: GPIO215 controls OTG VBUS supply. In SPI mode however * this pin is claimed by PSC0 (unused though, but pinmux doesn't * allow to free it without crippling the SPI interface). * As a result, in SPI mode, OTG simply won't work (PSC0 uses * it as an input pin which is pulled high on the boards). */ pfc = __raw_readl((void __iomem *)SYS_PINFUNC) & ~SYS_PINFUNC_P0A; /* switch off OTG VBUS supply */ gpio_request(215, "otg-vbus"); gpio_direction_output(215, 1); printk(KERN_INFO "%s device configuration:\n", get_system_type()); sw = bcsr_read(BCSR_SWITCHES); if (sw & BCSR_SWITCHES_DIP_8) { db1200_devs[0] = &db1200_i2c_dev; bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC0MUX, 0); pfc |= (2 << 17); /* GPIO2 block owns GPIO215 */ printk(KERN_INFO " S6.8 OFF: PSC0 mode I2C\n"); printk(KERN_INFO " OTG port VBUS supply available!\n"); } else { db1200_devs[0] = &db1200_spi_dev; bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC0MUX); pfc |= (1 << 17); /* PSC0 owns GPIO215 */ printk(KERN_INFO " S6.8 ON : PSC0 mode SPI\n"); printk(KERN_INFO " OTG port VBUS supply disabled\n"); } __raw_writel(pfc, (void __iomem *)SYS_PINFUNC); wmb(); /* Audio: DIP7 selects I2S(0)/AC97(1), but need I2C for I2S! * so: DIP7=1 || DIP8=0 => AC97, DIP7=0 && DIP8=1 => I2S */ sw &= BCSR_SWITCHES_DIP_8 | BCSR_SWITCHES_DIP_7; if (sw == BCSR_SWITCHES_DIP_8) { bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC1MUX); db1200_audio_dev.name = "au1xpsc_i2s"; db1200_sound_dev.name = "db1200-i2s"; printk(KERN_INFO " S6.7 ON : PSC1 mode I2S\n"); } else { bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC1MUX, 0); db1200_audio_dev.name = "au1xpsc_ac97"; db1200_sound_dev.name = "db1200-ac97"; printk(KERN_INFO " S6.7 OFF: PSC1 mode AC97\n"); } /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */ __raw_writel(PSC_SEL_CLK_SERCLK, (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); wmb(); db1x_register_pcmcia_socket( AU1000_PCMCIA_ATTR_PHYS_ADDR, AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, AU1000_PCMCIA_MEM_PHYS_ADDR, AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR, AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, DB1200_PC0_INT, DB1200_PC0_INSERT_INT, /*DB1200_PC0_STSCHG_INT*/0, DB1200_PC0_EJECT_INT, 0); db1x_register_pcmcia_socket( AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000, AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1, AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000, AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, DB1200_PC1_INT, DB1200_PC1_INSERT_INT, /*DB1200_PC1_STSCHG_INT*/0, DB1200_PC1_EJECT_INT, 1); swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT; db1x_register_norflash(64 << 20, 2, swapped); platform_add_devices(db1200_devs, ARRAY_SIZE(db1200_devs)); /* PB1200 is a DB1200 with a 2nd MMC and Camera connector */ if ((bid == BCSR_WHOAMI_PB1200_DDR1) || (bid == BCSR_WHOAMI_PB1200_DDR2)) platform_add_devices(pb1200_devs, ARRAY_SIZE(pb1200_devs)); return 0; }
gpl-2.0
rofehr/linux-wetek
sound/soc/codecs/wm8988.c
2478
27373
/* * wm8988.c -- WM8988 ALSA SoC audio driver * * Copyright 2009 Wolfson Microelectronics plc * Copyright 2005 Openedhand Ltd. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/initval.h> #include "wm8988.h" /* * wm8988 register cache * We can't read the WM8988 register space when we * are using 2 wire for device control, so we cache them instead. */ static const struct reg_default wm8988_reg_defaults[] = { { 0, 0x0097 }, { 1, 0x0097 }, { 2, 0x0079 }, { 3, 0x0079 }, { 5, 0x0008 }, { 7, 0x000a }, { 8, 0x0000 }, { 10, 0x00ff }, { 11, 0x00ff }, { 12, 0x000f }, { 13, 0x000f }, { 16, 0x0000 }, { 17, 0x007b }, { 18, 0x0000 }, { 19, 0x0032 }, { 20, 0x0000 }, { 21, 0x00c3 }, { 22, 0x00c3 }, { 23, 0x00c0 }, { 24, 0x0000 }, { 25, 0x0000 }, { 26, 0x0000 }, { 27, 0x0000 }, { 31, 0x0000 }, { 32, 0x0000 }, { 33, 0x0000 }, { 34, 0x0050 }, { 35, 0x0050 }, { 36, 0x0050 }, { 37, 0x0050 }, { 40, 0x0079 }, { 41, 0x0079 }, { 42, 0x0079 }, }; static bool wm8988_writeable(struct device *dev, unsigned int reg) { switch (reg) { case WM8988_LINVOL: case WM8988_RINVOL: case WM8988_LOUT1V: case WM8988_ROUT1V: case WM8988_ADCDAC: case WM8988_IFACE: case WM8988_SRATE: case WM8988_LDAC: case WM8988_RDAC: case WM8988_BASS: case WM8988_TREBLE: case WM8988_RESET: case WM8988_3D: case WM8988_ALC1: case WM8988_ALC2: case WM8988_ALC3: case WM8988_NGATE: case WM8988_LADC: case WM8988_RADC: case WM8988_ADCTL1: case WM8988_ADCTL2: case WM8988_PWR1: case WM8988_PWR2: case WM8988_ADCTL3: case WM8988_ADCIN: case WM8988_LADCIN: case WM8988_RADCIN: case WM8988_LOUTM1: case WM8988_LOUTM2: case WM8988_ROUTM1: case WM8988_ROUTM2: case WM8988_LOUT2V: case WM8988_ROUT2V: case WM8988_LPPB: return true; default: return false; } } /* codec private data */ struct wm8988_priv { struct regmap *regmap; unsigned int sysclk; struct snd_pcm_hw_constraint_list *sysclk_constraints; }; #define wm8988_reset(c) snd_soc_write(c, WM8988_RESET, 0) /* * WM8988 Controls */ static const char *bass_boost_txt[] = {"Linear Control", "Adaptive Boost"}; static const struct soc_enum bass_boost = SOC_ENUM_SINGLE(WM8988_BASS, 7, 2, bass_boost_txt); static const char *bass_filter_txt[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" }; static const struct soc_enum bass_filter = SOC_ENUM_SINGLE(WM8988_BASS, 6, 2, bass_filter_txt); static const char *treble_txt[] = {"8kHz", "4kHz"}; static const struct soc_enum treble = SOC_ENUM_SINGLE(WM8988_TREBLE, 6, 2, treble_txt); static const char *stereo_3d_lc_txt[] = {"200Hz", "500Hz"}; static const struct soc_enum stereo_3d_lc = SOC_ENUM_SINGLE(WM8988_3D, 5, 2, stereo_3d_lc_txt); static const char *stereo_3d_uc_txt[] = {"2.2kHz", "1.5kHz"}; static const struct soc_enum stereo_3d_uc = SOC_ENUM_SINGLE(WM8988_3D, 6, 2, stereo_3d_uc_txt); static const char *stereo_3d_func_txt[] = {"Capture", "Playback"}; static const struct soc_enum stereo_3d_func = SOC_ENUM_SINGLE(WM8988_3D, 7, 2, stereo_3d_func_txt); static const char *alc_func_txt[] = {"Off", "Right", "Left", "Stereo"}; static const struct soc_enum alc_func = SOC_ENUM_SINGLE(WM8988_ALC1, 7, 4, alc_func_txt); static const char *ng_type_txt[] = {"Constant PGA Gain", "Mute ADC Output"}; static const struct soc_enum ng_type = SOC_ENUM_SINGLE(WM8988_NGATE, 1, 2, ng_type_txt); static const char *deemph_txt[] = {"None", "32Khz", "44.1Khz", "48Khz"}; static const struct soc_enum deemph = SOC_ENUM_SINGLE(WM8988_ADCDAC, 1, 4, deemph_txt); static const char *adcpol_txt[] = {"Normal", "L Invert", "R Invert", "L + R Invert"}; static const struct soc_enum adcpol = SOC_ENUM_SINGLE(WM8988_ADCDAC, 5, 4, adcpol_txt); static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); static const struct snd_kcontrol_new wm8988_snd_controls[] = { SOC_ENUM("Bass Boost", bass_boost), SOC_ENUM("Bass Filter", bass_filter), SOC_SINGLE("Bass Volume", WM8988_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8988_TREBLE, 0, 15, 0), SOC_ENUM("Treble Cut-off", treble), SOC_SINGLE("3D Switch", WM8988_3D, 0, 1, 0), SOC_SINGLE("3D Volume", WM8988_3D, 1, 15, 0), SOC_ENUM("3D Lower Cut-off", stereo_3d_lc), SOC_ENUM("3D Upper Cut-off", stereo_3d_uc), SOC_ENUM("3D Mode", stereo_3d_func), SOC_SINGLE("ALC Capture Target Volume", WM8988_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8988_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", alc_func), SOC_SINGLE("ALC Capture ZC Switch", WM8988_ALC2, 7, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8988_ALC2, 0, 15, 0), SOC_SINGLE("ALC Capture Decay Time", WM8988_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack Time", WM8988_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8988_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", ng_type), SOC_SINGLE("ALC Capture NG Switch", WM8988_NGATE, 0, 1, 0), SOC_SINGLE("ZC Timeout Switch", WM8988_ADCTL1, 0, 1, 0), SOC_DOUBLE_R_TLV("Capture Digital Volume", WM8988_LADC, WM8988_RADC, 0, 255, 0, adc_tlv), SOC_DOUBLE_R_TLV("Capture Volume", WM8988_LINVOL, WM8988_RINVOL, 0, 63, 0, pga_tlv), SOC_DOUBLE_R("Capture ZC Switch", WM8988_LINVOL, WM8988_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8988_LINVOL, WM8988_RINVOL, 7, 1, 1), SOC_ENUM("Playback De-emphasis", deemph), SOC_ENUM("Capture Polarity", adcpol), SOC_SINGLE("Playback 6dB Attenuate", WM8988_ADCDAC, 7, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8988_ADCDAC, 8, 1, 0), SOC_DOUBLE_R_TLV("PCM Volume", WM8988_LDAC, WM8988_RDAC, 0, 255, 0, dac_tlv), SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", WM8988_LOUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", WM8988_LOUTM2, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", WM8988_ROUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", WM8988_ROUTM2, 4, 7, 1, bypass_tlv), SOC_DOUBLE_R("Output 1 Playback ZC Switch", WM8988_LOUT1V, WM8988_ROUT1V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 1 Playback Volume", WM8988_LOUT1V, WM8988_ROUT1V, 0, 127, 0, out_tlv), SOC_DOUBLE_R("Output 2 Playback ZC Switch", WM8988_LOUT2V, WM8988_ROUT2V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 2 Playback Volume", WM8988_LOUT2V, WM8988_ROUT2V, 0, 127, 0, out_tlv), }; /* * DAPM Controls */ static int wm8988_lrc_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; u16 adctl2 = snd_soc_read(codec, WM8988_ADCTL2); /* Use the DAC to gate LRC if active, otherwise use ADC */ if (snd_soc_read(codec, WM8988_PWR2) & 0x180) adctl2 &= ~0x4; else adctl2 |= 0x4; return snd_soc_write(codec, WM8988_ADCTL2, adctl2); } static const char *wm8988_line_texts[] = { "Line 1", "Line 2", "PGA", "Differential"}; static const unsigned int wm8988_line_values[] = { 0, 1, 3, 4}; static const struct soc_enum wm8988_lline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LOUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_left_line_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum); static const struct soc_enum wm8988_rline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_ROUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_right_line_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum); /* Left Mixer */ static const struct snd_kcontrol_new wm8988_left_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8988_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_LOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8988_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_LOUTM2, 7, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new wm8988_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8988_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_ROUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM8988_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_ROUTM2, 7, 1, 0), }; static const char *wm8988_pga_sel[] = {"Line 1", "Line 2", "Differential"}; static const unsigned int wm8988_pga_val[] = { 0, 1, 3 }; /* Left PGA Mux */ static const struct soc_enum wm8988_lpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_left_pga_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_lpga_enum); /* Right PGA Mux */ static const struct soc_enum wm8988_rpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_RADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_right_pga_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_rpga_enum); /* Differential Mux */ static const char *wm8988_diff_sel[] = {"Line 1", "Line 2"}; static const struct soc_enum diffmux = SOC_ENUM_SINGLE(WM8988_ADCIN, 8, 2, wm8988_diff_sel); static const struct snd_kcontrol_new wm8988_diffmux_controls = SOC_DAPM_ENUM("Route", diffmux); /* Mono ADC Mux */ static const char *wm8988_mono_mux[] = {"Stereo", "Mono (Left)", "Mono (Right)", "Digital Mono"}; static const struct soc_enum monomux = SOC_ENUM_SINGLE(WM8988_ADCIN, 6, 4, wm8988_mono_mux); static const struct snd_kcontrol_new wm8988_monomux_controls = SOC_DAPM_ENUM("Route", monomux); static const struct snd_soc_dapm_widget wm8988_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("Mic Bias", WM8988_PWR1, 1, 0, NULL, 0), SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, &wm8988_diffmux_controls), SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Left PGA Mux", WM8988_PWR1, 5, 0, &wm8988_left_pga_controls), SND_SOC_DAPM_MUX("Right PGA Mux", WM8988_PWR1, 4, 0, &wm8988_right_pga_controls), SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_left_line_controls), SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_right_line_controls), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8988_PWR1, 2, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8988_PWR1, 3, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8988_PWR2, 7, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8988_PWR2, 8, 0), SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, &wm8988_left_mixer_controls[0], ARRAY_SIZE(wm8988_left_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, &wm8988_right_mixer_controls[0], ARRAY_SIZE(wm8988_right_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 2", WM8988_PWR2, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8988_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 1", WM8988_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 1", WM8988_PWR2, 6, 0, NULL, 0), SND_SOC_DAPM_POST("LRC control", wm8988_lrc_control), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("VREF"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("LINPUT2"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("RINPUT2"), }; static const struct snd_soc_dapm_route wm8988_dapm_routes[] = { { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left PGA Mux", "Line 1", "LINPUT1" }, { "Left PGA Mux", "Line 2", "LINPUT2" }, { "Left PGA Mux", "Differential", "Differential Mux" }, { "Right PGA Mux", "Line 1", "RINPUT1" }, { "Right PGA Mux", "Line 2", "RINPUT2" }, { "Right PGA Mux", "Differential", "Differential Mux" }, { "Differential Mux", "Line 1", "LINPUT1" }, { "Differential Mux", "Line 1", "RINPUT1" }, { "Differential Mux", "Line 2", "LINPUT2" }, { "Differential Mux", "Line 2", "RINPUT2" }, { "Left ADC Mux", "Stereo", "Left PGA Mux" }, { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, { "Right ADC Mux", "Stereo", "Right PGA Mux" }, { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, { "Left ADC", NULL, "Left ADC Mux" }, { "Right ADC", NULL, "Right ADC Mux" }, { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left Mixer", "Playback Switch", "Left DAC" }, { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Left Mixer", "Right Playback Switch", "Right DAC" }, { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Right Mixer", "Left Playback Switch", "Left DAC" }, { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Right Mixer", "Playback Switch", "Right DAC" }, { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Left Out 1", NULL, "Left Mixer" }, { "LOUT1", NULL, "Left Out 1" }, { "Right Out 1", NULL, "Right Mixer" }, { "ROUT1", NULL, "Right Out 1" }, { "Left Out 2", NULL, "Left Mixer" }, { "LOUT2", NULL, "Left Out 2" }, { "Right Out 2", NULL, "Right Mixer" }, { "ROUT2", NULL, "Right Out 2" }, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:5; u8 usb:1; }; /* codec hifi mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 1536, 0x6, 0x0}, {11289600, 8000, 1408, 0x16, 0x0}, {18432000, 8000, 2304, 0x7, 0x0}, {16934400, 8000, 2112, 0x17, 0x0}, {12000000, 8000, 1500, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 1024, 0x18, 0x0}, {16934400, 11025, 1536, 0x19, 0x0}, {12000000, 11025, 1088, 0x19, 0x1}, /* 16k */ {12288000, 16000, 768, 0xa, 0x0}, {18432000, 16000, 1152, 0xb, 0x0}, {12000000, 16000, 750, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 512, 0x1a, 0x0}, {16934400, 22050, 768, 0x1b, 0x0}, {12000000, 22050, 544, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 384, 0xc, 0x0}, {18432000, 32000, 576, 0xd, 0x0}, {12000000, 32000, 375, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x10, 0x0}, {16934400, 44100, 384, 0x11, 0x0}, {12000000, 44100, 272, 0x11, 0x1}, /* 48k */ {12288000, 48000, 256, 0x0, 0x0}, {18432000, 48000, 384, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0x1e, 0x0}, {16934400, 88200, 192, 0x1f, 0x0}, {12000000, 88200, 136, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 128, 0xe, 0x0}, {18432000, 96000, 192, 0xf, 0x0}, {12000000, 96000, 125, 0xe, 0x1}, }; static inline int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return -EINVAL; } /* The set of rates we can generate from the above for each SYSCLK */ static unsigned int rates_12288[] = { 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000, }; static struct snd_pcm_hw_constraint_list constraints_12288 = { .count = ARRAY_SIZE(rates_12288), .list = rates_12288, }; static unsigned int rates_112896[] = { 8000, 11025, 22050, 44100, }; static struct snd_pcm_hw_constraint_list constraints_112896 = { .count = ARRAY_SIZE(rates_112896), .list = rates_112896, }; static unsigned int rates_12[] = { 8000, 11025, 12000, 16000, 22050, 2400, 32000, 41100, 48000, 48000, 88235, 96000, }; static struct snd_pcm_hw_constraint_list constraints_12 = { .count = ARRAY_SIZE(rates_12), .list = rates_12, }; /* * Note that this should be called from init rather than from hw_params. */ static int wm8988_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 18432000: case 22579200: case 36864000: wm8988->sysclk_constraints = &constraints_112896; wm8988->sysclk = freq; return 0; case 12288000: case 16934400: case 24576000: case 33868800: wm8988->sysclk_constraints = &constraints_12288; wm8988->sysclk = freq; return 0; case 12000000: case 24000000: wm8988->sysclk_constraints = &constraints_12; wm8988->sysclk = freq; return 0; } return -EINVAL; } static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface = 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } snd_soc_write(codec, WM8988_IFACE, iface); return 0; } static int wm8988_pcm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); /* The set of sample rates that can be supported depends on the * MCLK supplied to the CODEC - enforce this. */ if (!wm8988->sysclk) { dev_err(codec->dev, "No MCLK configured, call set_sysclk() on init\n"); return -EINVAL; } snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, wm8988->sysclk_constraints); return 0; } static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); u16 iface = snd_soc_read(codec, WM8988_IFACE) & 0x1f3; u16 srate = snd_soc_read(codec, WM8988_SRATE) & 0x180; int coeff; coeff = get_coeff(wm8988->sysclk, params_rate(params)); if (coeff < 0) { coeff = get_coeff(wm8988->sysclk / 2, params_rate(params)); srate |= 0x40; } if (coeff < 0) { dev_err(codec->dev, "Unable to configure sample rate %dHz with %dHz MCLK\n", params_rate(params), wm8988->sysclk); return coeff; } /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x000c; break; } /* set iface & srate */ snd_soc_write(codec, WM8988_IFACE, iface); if (coeff >= 0) snd_soc_write(codec, WM8988_SRATE, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); return 0; } static int wm8988_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8988_ADCDAC) & 0xfff7; if (mute) snd_soc_write(codec, WM8988_ADCDAC, mute_reg | 0x8); else snd_soc_write(codec, WM8988_ADCDAC, mute_reg); return 0; } static int wm8988_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); u16 pwr_reg = snd_soc_read(codec, WM8988_PWR1) & ~0x1c1; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* VREF, VMID=2x50k, digital enabled */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { regcache_sync(wm8988->regmap); /* VREF, VMID=2x5k */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1); /* Charge caps */ msleep(100); } /* VREF, VMID=2*500k, digital stopped */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8988_PWR1, 0x0000); break; } codec->dapm.bias_level = level; return 0; } #define WM8988_RATES SNDRV_PCM_RATE_8000_96000 #define WM8988_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8988_ops = { .startup = wm8988_pcm_startup, .hw_params = wm8988_pcm_hw_params, .set_fmt = wm8988_set_dai_fmt, .set_sysclk = wm8988_set_dai_sysclk, .digital_mute = wm8988_mute, }; static struct snd_soc_dai_driver wm8988_dai = { .name = "wm8988-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .ops = &wm8988_ops, .symmetric_rates = 1, }; static int wm8988_suspend(struct snd_soc_codec *codec) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF); regcache_mark_dirty(wm8988->regmap); return 0; } static int wm8988_resume(struct snd_soc_codec *codec) { wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8988_probe(struct snd_soc_codec *codec) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); int ret = 0; codec->control_data = wm8988->regmap; ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } ret = wm8988_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); return ret; } /* set the update bits (we always update left then right) */ snd_soc_update_bits(codec, WM8988_RADC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_ROUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_RINVOL, 0x0100, 0x0100); wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8988_remove(struct snd_soc_codec *codec) { wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8988 = { .probe = wm8988_probe, .remove = wm8988_remove, .suspend = wm8988_suspend, .resume = wm8988_resume, .set_bias_level = wm8988_set_bias_level, .controls = wm8988_snd_controls, .num_controls = ARRAY_SIZE(wm8988_snd_controls), .dapm_widgets = wm8988_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8988_dapm_widgets), .dapm_routes = wm8988_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes), }; static struct regmap_config wm8988_regmap = { .reg_bits = 7, .val_bits = 9, .max_register = WM8988_LPPB, .writeable_reg = wm8988_writeable, .cache_type = REGCACHE_RBTREE, .reg_defaults = wm8988_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8988_reg_defaults), }; #if defined(CONFIG_SPI_MASTER) static int wm8988_spi_probe(struct spi_device *spi) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&spi->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; wm8988->regmap = devm_regmap_init_spi(spi, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&spi->dev, "Failed to init regmap: %d\n", ret); return ret; } spi_set_drvdata(spi, wm8988); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8988, &wm8988_dai, 1); return ret; } static int wm8988_spi_remove(struct spi_device *spi) { snd_soc_unregister_codec(&spi->dev); return 0; } static struct spi_driver wm8988_spi_driver = { .driver = { .name = "wm8988", .owner = THIS_MODULE, }, .probe = wm8988_spi_probe, .remove = wm8988_spi_remove, }; #endif /* CONFIG_SPI_MASTER */ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static int wm8988_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&i2c->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8988); wm8988->regmap = devm_regmap_init_i2c(i2c, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); return ret; } ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8988, &wm8988_dai, 1); return ret; } static int wm8988_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8988_i2c_id[] = { { "wm8988", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8988_i2c_id); static struct i2c_driver wm8988_i2c_driver = { .driver = { .name = "wm8988", .owner = THIS_MODULE, }, .probe = wm8988_i2c_probe, .remove = wm8988_i2c_remove, .id_table = wm8988_i2c_id, }; #endif static int __init wm8988_modinit(void) { int ret = 0; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8988_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8988_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8988_modinit); static void __exit wm8988_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8988_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8988_spi_driver); #endif } module_exit(wm8988_exit); MODULE_DESCRIPTION("ASoC WM8988 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
robertdolca/linux
arch/powerpc/perf/e6500-pmu.c
2478
3269
/* * Performance counter support for e6500 family processors. * * Author: Priyanka Jain, Priyanka.Jain@freescale.com * Based on e500-pmu.c * Copyright 2013 Freescale Semiconductor, Inc. * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/string.h> #include <linux/perf_event.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Map of generic hardware event types to hardware events * Zero if unsupported */ static int e6500_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 1, [PERF_COUNT_HW_INSTRUCTIONS] = 2, [PERF_COUNT_HW_CACHE_MISSES] = 221, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12, [PERF_COUNT_HW_BRANCH_MISSES] = 15, }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static int e6500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /*RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 27, 222 }, [C(OP_WRITE)] = { 28, 223 }, [C(OP_PREFETCH)] = { 29, 0 }, }, [C(L1I)] = { /*RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 2, 254 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 37, 0 }, }, /* * Assuming LL means L2, it's not a good match for this model. * It does not have separate read/write events (but it does have * separate instruction/data events). */ [C(LL)] = { /*RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0, 0 }, }, /* * There are data/instruction MMU misses, but that's a miss on * the chip's internal level-one TLB which is probably not * what the user wants. Instead, unified level-two TLB misses * are reported here. */ [C(DTLB)] = { /*RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 26, 66 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /*RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 12, 15 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static int num_events = 512; /* Upper half of event id is PMLCb, for threshold events */ static u64 e6500_xlate_event(u64 event_id) { u32 event_low = (u32)event_id; if (event_low >= num_events || (event_id & (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH))) return 0; return FSL_EMB_EVENT_VALID; } static struct fsl_emb_pmu e6500_pmu = { .name = "e6500 family", .n_counter = 6, .n_restricted = 0, .xlate_event = e6500_xlate_event, .n_generic = ARRAY_SIZE(e6500_generic_events), .generic_events = e6500_generic_events, .cache_events = &e6500_cache_events, }; static int init_e6500_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e6500")) return -ENODEV; return register_fsl_emb_pmu(&e6500_pmu); } early_initcall(init_e6500_pmu);
gpl-2.0
KangDroid/android_kernel_moto_shamu
sound/soc/codecs/wm8523.c
2478
14191
/* * wm8523.c -- WM8523 ALSA SoC Audio driver * * Copyright 2009 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8523.h" #define WM8523_NUM_SUPPLIES 2 static const char *wm8523_supply_names[WM8523_NUM_SUPPLIES] = { "AVDD", "LINEVDD", }; #define WM8523_NUM_RATES 7 /* codec private data */ struct wm8523_priv { struct regmap *regmap; struct regulator_bulk_data supplies[WM8523_NUM_SUPPLIES]; unsigned int sysclk; unsigned int rate_constraint_list[WM8523_NUM_RATES]; struct snd_pcm_hw_constraint_list rate_constraint; }; static const struct reg_default wm8523_reg_defaults[] = { { 2, 0x0000 }, /* R2 - PSCTRL1 */ { 3, 0x1812 }, /* R3 - AIF_CTRL1 */ { 4, 0x0000 }, /* R4 - AIF_CTRL2 */ { 5, 0x0001 }, /* R5 - DAC_CTRL3 */ { 6, 0x0190 }, /* R6 - DAC_GAINL */ { 7, 0x0190 }, /* R7 - DAC_GAINR */ { 8, 0x0000 }, /* R8 - ZERO_DETECT */ }; static bool wm8523_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case WM8523_DEVICE_ID: case WM8523_REVISION: return true; default: return false; } } static const DECLARE_TLV_DB_SCALE(dac_tlv, -10000, 25, 0); static const char *wm8523_zd_count_text[] = { "1024", "2048", }; static const struct soc_enum wm8523_zc_count = SOC_ENUM_SINGLE(WM8523_ZERO_DETECT, 0, 2, wm8523_zd_count_text); static const struct snd_kcontrol_new wm8523_controls[] = { SOC_DOUBLE_R_TLV("Playback Volume", WM8523_DAC_GAINL, WM8523_DAC_GAINR, 0, 448, 0, dac_tlv), SOC_SINGLE("ZC Switch", WM8523_DAC_CTRL3, 4, 1, 0), SOC_SINGLE("Playback Deemphasis Switch", WM8523_AIF_CTRL1, 8, 1, 0), SOC_DOUBLE("Playback Switch", WM8523_DAC_CTRL3, 2, 3, 1, 1), SOC_SINGLE("Volume Ramp Up Switch", WM8523_DAC_CTRL3, 1, 1, 0), SOC_SINGLE("Volume Ramp Down Switch", WM8523_DAC_CTRL3, 0, 1, 0), SOC_ENUM("Zero Detect Count", wm8523_zc_count), }; static const struct snd_soc_dapm_widget wm8523_dapm_widgets[] = { SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUTPUT("LINEVOUTL"), SND_SOC_DAPM_OUTPUT("LINEVOUTR"), }; static const struct snd_soc_dapm_route wm8523_dapm_routes[] = { { "LINEVOUTL", NULL, "DAC" }, { "LINEVOUTR", NULL, "DAC" }, }; static struct { int value; int ratio; } lrclk_ratios[WM8523_NUM_RATES] = { { 1, 128 }, { 2, 192 }, { 3, 256 }, { 4, 384 }, { 5, 512 }, { 6, 768 }, { 7, 1152 }, }; static int wm8523_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); /* The set of sample rates that can be supported depends on the * MCLK supplied to the CODEC - enforce this. */ if (!wm8523->sysclk) { dev_err(codec->dev, "No MCLK configured, call set_sysclk() on init\n"); return -EINVAL; } snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &wm8523->rate_constraint); return 0; } static int wm8523_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); int i; u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1); u16 aifctrl2 = snd_soc_read(codec, WM8523_AIF_CTRL2); /* Find a supported LRCLK ratio */ for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) { if (wm8523->sysclk / params_rate(params) == lrclk_ratios[i].ratio) break; } /* Should never happen, should be handled by constraints */ if (i == ARRAY_SIZE(lrclk_ratios)) { dev_err(codec->dev, "MCLK/fs ratio %d unsupported\n", wm8523->sysclk / params_rate(params)); return -EINVAL; } aifctrl2 &= ~WM8523_SR_MASK; aifctrl2 |= lrclk_ratios[i].value; aifctrl1 &= ~WM8523_WL_MASK; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: aifctrl1 |= 0x8; break; case SNDRV_PCM_FORMAT_S24_LE: aifctrl1 |= 0x10; break; case SNDRV_PCM_FORMAT_S32_LE: aifctrl1 |= 0x18; break; } snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1); snd_soc_write(codec, WM8523_AIF_CTRL2, aifctrl2); return 0; } static int wm8523_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); unsigned int val; int i; wm8523->sysclk = freq; wm8523->rate_constraint.count = 0; for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) { val = freq / lrclk_ratios[i].ratio; /* Check that it's a standard rate since core can't * cope with others and having the odd rates confuses * constraint matching. */ switch (val) { case 8000: case 11025: case 16000: case 22050: case 32000: case 44100: case 48000: case 64000: case 88200: case 96000: case 176400: case 192000: dev_dbg(codec->dev, "Supported sample rate: %dHz\n", val); wm8523->rate_constraint_list[i] = val; wm8523->rate_constraint.count++; break; default: dev_dbg(codec->dev, "Skipping sample rate: %dHz\n", val); } } /* Need at least one supported rate... */ if (wm8523->rate_constraint.count == 0) return -EINVAL; return 0; } static int wm8523_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1); aifctrl1 &= ~(WM8523_BCLK_INV_MASK | WM8523_LRCLK_INV_MASK | WM8523_FMT_MASK | WM8523_AIF_MSTR_MASK); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: aifctrl1 |= WM8523_AIF_MSTR; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: aifctrl1 |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: aifctrl1 |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: aifctrl1 |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: aifctrl1 |= 0x0023; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: aifctrl1 |= WM8523_BCLK_INV | WM8523_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: aifctrl1 |= WM8523_BCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: aifctrl1 |= WM8523_LRCLK_INV; break; default: return -EINVAL; } snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1); return 0; } static int wm8523_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); int ret; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* Full power on */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 3); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); return ret; } /* Sync back default/cached values */ regcache_sync(wm8523->regmap); /* Initial power up */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 1); msleep(100); } /* Power up to mute */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 2); break; case SND_SOC_BIAS_OFF: /* The chip runs through the power down sequence for us. */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 0); msleep(100); regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); break; } codec->dapm.bias_level = level; return 0; } #define WM8523_RATES SNDRV_PCM_RATE_8000_192000 #define WM8523_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops wm8523_dai_ops = { .startup = wm8523_startup, .hw_params = wm8523_hw_params, .set_sysclk = wm8523_set_dai_sysclk, .set_fmt = wm8523_set_dai_fmt, }; static struct snd_soc_dai_driver wm8523_dai = { .name = "wm8523-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, /* Mono modes not yet supported */ .channels_max = 2, .rates = WM8523_RATES, .formats = WM8523_FORMATS, }, .ops = &wm8523_dai_ops, }; #ifdef CONFIG_PM static int wm8523_suspend(struct snd_soc_codec *codec) { wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8523_resume(struct snd_soc_codec *codec) { wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define wm8523_suspend NULL #define wm8523_resume NULL #endif static int wm8523_probe(struct snd_soc_codec *codec) { struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); int ret; wm8523->rate_constraint.list = &wm8523->rate_constraint_list[0]; wm8523->rate_constraint.count = ARRAY_SIZE(wm8523->rate_constraint_list); ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_REGMAP); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } /* Change some default settings - latch VU and enable ZC */ snd_soc_update_bits(codec, WM8523_DAC_GAINR, WM8523_DACR_VU, WM8523_DACR_VU); snd_soc_update_bits(codec, WM8523_DAC_CTRL3, WM8523_ZC, WM8523_ZC); wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8523_remove(struct snd_soc_codec *codec) { wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8523 = { .probe = wm8523_probe, .remove = wm8523_remove, .suspend = wm8523_suspend, .resume = wm8523_resume, .set_bias_level = wm8523_set_bias_level, .controls = wm8523_controls, .num_controls = ARRAY_SIZE(wm8523_controls), .dapm_widgets = wm8523_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8523_dapm_widgets), .dapm_routes = wm8523_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8523_dapm_routes), }; static const struct of_device_id wm8523_of_match[] = { { .compatible = "wlf,wm8523" }, { }, }; static const struct regmap_config wm8523_regmap = { .reg_bits = 8, .val_bits = 16, .max_register = WM8523_ZERO_DETECT, .reg_defaults = wm8523_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8523_reg_defaults), .cache_type = REGCACHE_RBTREE, .volatile_reg = wm8523_volatile_register, }; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static int wm8523_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8523_priv *wm8523; unsigned int val; int ret, i; wm8523 = devm_kzalloc(&i2c->dev, sizeof(struct wm8523_priv), GFP_KERNEL); if (wm8523 == NULL) return -ENOMEM; wm8523->regmap = devm_regmap_init_i2c(i2c, &wm8523_regmap); if (IS_ERR(wm8523->regmap)) { ret = PTR_ERR(wm8523->regmap); dev_err(&i2c->dev, "Failed to create regmap: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(wm8523->supplies); i++) wm8523->supplies[i].supply = wm8523_supply_names[i]; ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret); return ret; } ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret); return ret; } ret = regmap_read(wm8523->regmap, WM8523_DEVICE_ID, &val); if (ret < 0) { dev_err(&i2c->dev, "Failed to read ID register\n"); goto err_enable; } if (val != 0x8523) { dev_err(&i2c->dev, "Device is not a WM8523, ID is %x\n", ret); ret = -EINVAL; goto err_enable; } ret = regmap_read(wm8523->regmap, WM8523_REVISION, &val); if (ret < 0) { dev_err(&i2c->dev, "Failed to read revision register\n"); goto err_enable; } dev_info(&i2c->dev, "revision %c\n", (val & WM8523_CHIP_REV_MASK) + 'A'); ret = regmap_write(wm8523->regmap, WM8523_DEVICE_ID, 0x8523); if (ret != 0) { dev_err(&i2c->dev, "Failed to reset device: %d\n", ret); goto err_enable; } regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); i2c_set_clientdata(i2c, wm8523); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8523, &wm8523_dai, 1); return ret; err_enable: regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); return ret; } static int wm8523_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8523_i2c_id[] = { { "wm8523", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8523_i2c_id); static struct i2c_driver wm8523_i2c_driver = { .driver = { .name = "wm8523", .owner = THIS_MODULE, .of_match_table = wm8523_of_match, }, .probe = wm8523_i2c_probe, .remove = wm8523_i2c_remove, .id_table = wm8523_i2c_id, }; #endif static int __init wm8523_modinit(void) { int ret; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8523_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8523 I2C driver: %d\n", ret); } #endif return 0; } module_init(wm8523_modinit); static void __exit wm8523_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8523_i2c_driver); #endif } module_exit(wm8523_exit); MODULE_DESCRIPTION("ASoC WM8523 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
mastero9017/expectus_kernel_hammerhead
fs/nilfs2/super.c
2734
36493
/* * super.c - NILFS module and super block management. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Ryusuke Konishi <ryusuke@osrg.net> */ /* * linux/fs/ext2/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/crc32.h> #include <linux/vfs.h> #include <linux/writeback.h> #include <linux/seq_file.h> #include <linux/mount.h> #include "nilfs.h" #include "export.h" #include "mdt.h" #include "alloc.h" #include "btree.h" #include "btnode.h" #include "page.h" #include "cpfile.h" #include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */ #include "ifile.h" #include "dat.h" #include "segment.h" #include "segbuf.h" MODULE_AUTHOR("NTT Corp."); MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem " "(NILFS)"); MODULE_LICENSE("GPL"); static struct kmem_cache *nilfs_inode_cachep; struct kmem_cache *nilfs_transaction_cachep; struct kmem_cache *nilfs_segbuf_cachep; struct kmem_cache *nilfs_btree_path_cache; static int nilfs_setup_super(struct super_block *sb, int is_mount); static int nilfs_remount(struct super_block *sb, int *flags, char *data); static void nilfs_set_error(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; down_write(&nilfs->ns_sem); if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { nilfs->ns_mount_state |= NILFS_ERROR_FS; sbp = nilfs_prepare_super(sb, 0); if (likely(sbp)) { sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); if (sbp[1]) sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS); nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); } } up_write(&nilfs->ns_sem); } /** * nilfs_error() - report failure condition on a filesystem * * nilfs_error() sets an ERROR_FS flag on the superblock as well as * reporting an error message. It should be called when NILFS detects * incoherences or defects of meta data on disk. As for sustainable * errors such as a single-shot I/O error, nilfs_warning() or the printk() * function should be used instead. * * The segment constructor must not call this function because it can * kill itself. */ void nilfs_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct the_nilfs *nilfs = sb->s_fs_info; struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (!(sb->s_flags & MS_RDONLY)) { nilfs_set_error(sb); if (nilfs_test_opt(nilfs, ERRORS_RO)) { printk(KERN_CRIT "Remounting filesystem read-only\n"); sb->s_flags |= MS_RDONLY; } } if (nilfs_test_opt(nilfs, ERRORS_PANIC)) panic("NILFS (device %s): panic forced after error\n", sb->s_id); } void nilfs_warning(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "NILFS warning (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } struct inode *nilfs_alloc_inode(struct super_block *sb) { struct nilfs_inode_info *ii; ii = kmem_cache_alloc(nilfs_inode_cachep, GFP_NOFS); if (!ii) return NULL; ii->i_bh = NULL; ii->i_state = 0; ii->i_cno = 0; ii->vfs_inode.i_version = 1; nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi); return &ii->vfs_inode; } static void nilfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct nilfs_mdt_info *mdi = NILFS_MDT(inode); if (mdi) { kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ kfree(mdi); } kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode)); } void nilfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, nilfs_i_callback); } static int nilfs_sync_super(struct super_block *sb, int flag) { struct the_nilfs *nilfs = sb->s_fs_info; int err; retry: set_buffer_dirty(nilfs->ns_sbh[0]); if (nilfs_test_opt(nilfs, BARRIER)) { err = __sync_dirty_buffer(nilfs->ns_sbh[0], WRITE_SYNC | WRITE_FLUSH_FUA); } else { err = sync_dirty_buffer(nilfs->ns_sbh[0]); } if (unlikely(err)) { printk(KERN_ERR "NILFS: unable to write superblock (err=%d)\n", err); if (err == -EIO && nilfs->ns_sbh[1]) { /* * sbp[0] points to newer log than sbp[1], * so copy sbp[0] to sbp[1] to take over sbp[0]. */ memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0], nilfs->ns_sbsize); nilfs_fall_back_super_block(nilfs); goto retry; } } else { struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; nilfs->ns_sbwcount++; /* * The latest segment becomes trailable from the position * written in superblock. */ clear_nilfs_discontinued(nilfs); /* update GC protection for recent segments */ if (nilfs->ns_sbh[1]) { if (flag == NILFS_SB_COMMIT_ALL) { set_buffer_dirty(nilfs->ns_sbh[1]); if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0) goto out; } if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) < le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno)) sbp = nilfs->ns_sbp[1]; } spin_lock(&nilfs->ns_last_segment_lock); nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); spin_unlock(&nilfs->ns_last_segment_lock); } out: return err; } void nilfs_set_log_cursor(struct nilfs_super_block *sbp, struct the_nilfs *nilfs) { sector_t nfreeblocks; /* nilfs->ns_sem must be locked by the caller. */ nilfs_count_free_blocks(nilfs, &nfreeblocks); sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks); spin_lock(&nilfs->ns_last_segment_lock); sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); spin_unlock(&nilfs->ns_last_segment_lock); } struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb, int flip) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp = nilfs->ns_sbp; /* nilfs->ns_sem must be locked by the caller. */ if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { if (sbp[1] && sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); } else { printk(KERN_CRIT "NILFS: superblock broke on dev %s\n", sb->s_id); return NULL; } } else if (sbp[1] && sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); } if (flip && sbp[1]) nilfs_swap_super_block(nilfs); return sbp; } int nilfs_commit_super(struct super_block *sb, int flag) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp = nilfs->ns_sbp; time_t t; /* nilfs->ns_sem must be locked by the caller. */ t = get_seconds(); nilfs->ns_sbwtime = t; sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[0], nilfs->ns_sbsize)); if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) { sbp[1]->s_wtime = sbp[0]->s_wtime; sbp[1]->s_sum = 0; sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[1], nilfs->ns_sbsize)); } clear_nilfs_sb_dirty(nilfs); return nilfs_sync_super(sb, flag); } /** * nilfs_cleanup_super() - write filesystem state for cleanup * @sb: super block instance to be unmounted or degraded to read-only * * This function restores state flags in the on-disk super block. * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the * filesystem was not clean previously. */ int nilfs_cleanup_super(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int flag = NILFS_SB_COMMIT; int ret = -EIO; sbp = nilfs_prepare_super(sb, 0); if (sbp) { sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); nilfs_set_log_cursor(sbp[0], nilfs); if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) { /* * make the "clean" flag also to the opposite * super block if both super blocks point to * the same checkpoint. */ sbp[1]->s_state = sbp[0]->s_state; flag = NILFS_SB_COMMIT_ALL; } ret = nilfs_commit_super(sb, flag); } return ret; } /** * nilfs_move_2nd_super - relocate secondary super block * @sb: super block instance * @sb2off: new offset of the secondary super block (in bytes) */ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) { struct the_nilfs *nilfs = sb->s_fs_info; struct buffer_head *nsbh; struct nilfs_super_block *nsbp; sector_t blocknr, newblocknr; unsigned long offset; int sb2i = -1; /* array index of the secondary superblock */ int ret = 0; /* nilfs->ns_sem must be locked by the caller. */ if (nilfs->ns_sbh[1] && nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) { sb2i = 1; blocknr = nilfs->ns_sbh[1]->b_blocknr; } else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) { sb2i = 0; blocknr = nilfs->ns_sbh[0]->b_blocknr; } if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off) goto out; /* super block location is unchanged */ /* Get new super block buffer */ newblocknr = sb2off >> nilfs->ns_blocksize_bits; offset = sb2off & (nilfs->ns_blocksize - 1); nsbh = sb_getblk(sb, newblocknr); if (!nsbh) { printk(KERN_WARNING "NILFS warning: unable to move secondary superblock " "to block %llu\n", (unsigned long long)newblocknr); ret = -EIO; goto out; } nsbp = (void *)nsbh->b_data + offset; memset(nsbp, 0, nilfs->ns_blocksize); if (sb2i >= 0) { memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize); brelse(nilfs->ns_sbh[sb2i]); nilfs->ns_sbh[sb2i] = nsbh; nilfs->ns_sbp[sb2i] = nsbp; } else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) { /* secondary super block will be restored to index 1 */ nilfs->ns_sbh[1] = nsbh; nilfs->ns_sbp[1] = nsbp; } else { brelse(nsbh); } out: return ret; } /** * nilfs_resize_fs - resize the filesystem * @sb: super block instance * @newsize: new size of the filesystem (in bytes) */ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; __u64 devsize, newnsegs; loff_t sb2off; int ret; ret = -ERANGE; devsize = i_size_read(sb->s_bdev->bd_inode); if (newsize > devsize) goto out; /* * Write lock is required to protect some functions depending * on the number of segments, the number of reserved segments, * and so forth. */ down_write(&nilfs->ns_segctor_sem); sb2off = NILFS_SB2_OFFSET_BYTES(newsize); newnsegs = sb2off >> nilfs->ns_blocksize_bits; do_div(newnsegs, nilfs->ns_blocks_per_segment); ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs); up_write(&nilfs->ns_segctor_sem); if (ret < 0) goto out; ret = nilfs_construct_segment(sb); if (ret < 0) goto out; down_write(&nilfs->ns_sem); nilfs_move_2nd_super(sb, sb2off); ret = -EIO; sbp = nilfs_prepare_super(sb, 0); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); /* * Drop NILFS_RESIZE_FS flag for compatibility with * mount-time resize which may be implemented in a * future release. */ sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_RESIZE_FS); sbp[0]->s_dev_size = cpu_to_le64(newsize); sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments); if (sbp[1]) memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); } up_write(&nilfs->ns_sem); /* * Reset the range of allocatable segments last. This order * is important in the case of expansion because the secondary * superblock must be protected from log write until migration * completes. */ if (!ret) nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1); out: return ret; } static void nilfs_put_super(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; nilfs_detach_log_writer(sb); if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); } iput(nilfs->ns_sufile); iput(nilfs->ns_cpfile); iput(nilfs->ns_dat); destroy_nilfs(nilfs); sb->s_fs_info = NULL; } static int nilfs_sync_fs(struct super_block *sb, int wait) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int err = 0; /* This function is called when super block should be written back */ if (wait) err = nilfs_construct_segment(sb); down_write(&nilfs->ns_sem); if (nilfs_sb_dirty(nilfs)) { sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs)); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); nilfs_commit_super(sb, NILFS_SB_COMMIT); } } up_write(&nilfs->ns_sem); return err; } int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, struct nilfs_root **rootp) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_root *root; struct nilfs_checkpoint *raw_cp; struct buffer_head *bh_cp; int err = -ENOMEM; root = nilfs_find_or_create_root( nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno); if (!root) return err; if (root->ifile) goto reuse; /* already attached checkpoint */ down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, &bh_cp); up_read(&nilfs->ns_segctor_sem); if (unlikely(err)) { if (err == -ENOENT || err == -EINVAL) { printk(KERN_ERR "NILFS: Invalid checkpoint " "(checkpoint number=%llu)\n", (unsigned long long)cno); err = -EINVAL; } goto failed; } err = nilfs_ifile_read(sb, root, nilfs->ns_inode_size, &raw_cp->cp_ifile_inode, &root->ifile); if (err) goto failed_bh; atomic_set(&root->inodes_count, le64_to_cpu(raw_cp->cp_inodes_count)); atomic_set(&root->blocks_count, le64_to_cpu(raw_cp->cp_blocks_count)); nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); reuse: *rootp = root; return 0; failed_bh: nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); failed: nilfs_put_root(root); return err; } static int nilfs_freeze(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; int err; if (sb->s_flags & MS_RDONLY) return 0; /* Mark super block clean */ down_write(&nilfs->ns_sem); err = nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); return err; } static int nilfs_unfreeze(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; if (sb->s_flags & MS_RDONLY) return 0; down_write(&nilfs->ns_sem); nilfs_setup_super(sb, false); up_write(&nilfs->ns_sem); return 0; } static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root; struct the_nilfs *nilfs = root->nilfs; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); unsigned long long blocks; unsigned long overhead; unsigned long nrsvblocks; sector_t nfreeblocks; int err; /* * Compute all of the segment blocks * * The blocks before first segment and after last segment * are excluded. */ blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments - nilfs->ns_first_data_block; nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment; /* * Compute the overhead * * When distributing meta data blocks outside segment structure, * We must count them as the overhead. */ overhead = 0; err = nilfs_count_free_blocks(nilfs, &nfreeblocks); if (unlikely(err)) return err; buf->f_type = NILFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = blocks - overhead; buf->f_bfree = nfreeblocks; buf->f_bavail = (buf->f_bfree >= nrsvblocks) ? (buf->f_bfree - nrsvblocks) : 0; buf->f_files = atomic_read(&root->inodes_count); buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */ buf->f_namelen = NILFS_NAME_LEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct super_block *sb = dentry->d_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root; if (!nilfs_test_opt(nilfs, BARRIER)) seq_puts(seq, ",nobarrier"); if (root->cno != NILFS_CPTREE_CURRENT_CNO) seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno); if (nilfs_test_opt(nilfs, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (nilfs_test_opt(nilfs, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (nilfs_test_opt(nilfs, STRICT_ORDER)) seq_puts(seq, ",order=strict"); if (nilfs_test_opt(nilfs, NORECOVERY)) seq_puts(seq, ",norecovery"); if (nilfs_test_opt(nilfs, DISCARD)) seq_puts(seq, ",discard"); return 0; } static const struct super_operations nilfs_sops = { .alloc_inode = nilfs_alloc_inode, .destroy_inode = nilfs_destroy_inode, .dirty_inode = nilfs_dirty_inode, /* .write_inode = nilfs_write_inode, */ /* .put_inode = nilfs_put_inode, */ /* .drop_inode = nilfs_drop_inode, */ .evict_inode = nilfs_evict_inode, .put_super = nilfs_put_super, /* .write_super = nilfs_write_super, */ .sync_fs = nilfs_sync_fs, .freeze_fs = nilfs_freeze, .unfreeze_fs = nilfs_unfreeze, /* .write_super_lockfs */ /* .unlockfs */ .statfs = nilfs_statfs, .remount_fs = nilfs_remount, /* .umount_begin */ .show_options = nilfs_show_options }; enum { Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, Opt_discard, Opt_nodiscard, Opt_err, }; static match_table_t tokens = { {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_snapshot, "cp=%u"}, {Opt_order, "order=%s"}, {Opt_norecovery, "norecovery"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, {Opt_err, NULL} }; static int parse_options(char *options, struct super_block *sb, int is_remount) { struct the_nilfs *nilfs = sb->s_fs_info; char *p; substring_t args[MAX_OPT_ARGS]; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_barrier: nilfs_set_opt(nilfs, BARRIER); break; case Opt_nobarrier: nilfs_clear_opt(nilfs, BARRIER); break; case Opt_order: if (strcmp(args[0].from, "relaxed") == 0) /* Ordered data semantics */ nilfs_clear_opt(nilfs, STRICT_ORDER); else if (strcmp(args[0].from, "strict") == 0) /* Strict in-order semantics */ nilfs_set_opt(nilfs, STRICT_ORDER); else return 0; break; case Opt_err_panic: nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC); break; case Opt_err_ro: nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO); break; case Opt_err_cont: nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT); break; case Opt_snapshot: if (is_remount) { printk(KERN_ERR "NILFS: \"%s\" option is invalid " "for remount.\n", p); return 0; } break; case Opt_norecovery: nilfs_set_opt(nilfs, NORECOVERY); break; case Opt_discard: nilfs_set_opt(nilfs, DISCARD); break; case Opt_nodiscard: nilfs_clear_opt(nilfs, DISCARD); break; default: printk(KERN_ERR "NILFS: Unrecognized mount option \"%s\"\n", p); return 0; } } return 1; } static inline void nilfs_set_default_options(struct super_block *sb, struct nilfs_super_block *sbp) { struct the_nilfs *nilfs = sb->s_fs_info; nilfs->ns_mount_opt = NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER; } static int nilfs_setup_super(struct super_block *sb, int is_mount) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int max_mnt_count; int mnt_count; /* nilfs->ns_sem must be locked by the caller. */ sbp = nilfs_prepare_super(sb, 0); if (!sbp) return -EIO; if (!is_mount) goto skip_mount_setup; max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count); mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); if (nilfs->ns_mount_state & NILFS_ERROR_FS) { printk(KERN_WARNING "NILFS warning: mounting fs with errors\n"); #if 0 } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) { printk(KERN_WARNING "NILFS warning: maximal mount count reached\n"); #endif } if (!max_mnt_count) sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT); sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1); sbp[0]->s_mtime = cpu_to_le64(get_seconds()); skip_mount_setup: sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); /* synchronize sbp[1] with sbp[0] */ if (sbp[1]) memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); } struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb, u64 pos, int blocksize, struct buffer_head **pbh) { unsigned long long sb_index = pos; unsigned long offset; offset = do_div(sb_index, blocksize); *pbh = sb_bread(sb, sb_index); if (!*pbh) return NULL; return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset); } int nilfs_store_magic_and_option(struct super_block *sb, struct nilfs_super_block *sbp, char *data) { struct the_nilfs *nilfs = sb->s_fs_info; sb->s_magic = le16_to_cpu(sbp->s_magic); /* FS independent flags */ #ifdef NILFS_ATIME_DISABLE sb->s_flags |= MS_NOATIME; #endif nilfs_set_default_options(sb, sbp); nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid); nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid); nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval); nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max); return !parse_options(data, sb, 0) ? -EINVAL : 0 ; } int nilfs_check_feature_compatibility(struct super_block *sb, struct nilfs_super_block *sbp) { __u64 features; features = le64_to_cpu(sbp->s_feature_incompat) & ~NILFS_FEATURE_INCOMPAT_SUPP; if (features) { printk(KERN_ERR "NILFS: couldn't mount because of unsupported " "optional features (%llx)\n", (unsigned long long)features); return -EINVAL; } features = le64_to_cpu(sbp->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; if (!(sb->s_flags & MS_RDONLY) && features) { printk(KERN_ERR "NILFS: couldn't mount RDWR because of " "unsupported optional features (%llx)\n", (unsigned long long)features); return -EINVAL; } return 0; } static int nilfs_get_root_dentry(struct super_block *sb, struct nilfs_root *root, struct dentry **root_dentry) { struct inode *inode; struct dentry *dentry; int ret = 0; inode = nilfs_iget(sb, root, NILFS_ROOT_INO); if (IS_ERR(inode)) { printk(KERN_ERR "NILFS: get root inode failed\n"); ret = PTR_ERR(inode); goto out; } if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) { iput(inode); printk(KERN_ERR "NILFS: corrupt root inode.\n"); ret = -EINVAL; goto out; } if (root->cno == NILFS_CPTREE_CURRENT_CNO) { dentry = d_find_alias(inode); if (!dentry) { dentry = d_make_root(inode); if (!dentry) { ret = -ENOMEM; goto failed_dentry; } } else { iput(inode); } } else { dentry = d_obtain_alias(inode); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto failed_dentry; } } *root_dentry = dentry; out: return ret; failed_dentry: printk(KERN_ERR "NILFS: get root dentry failed\n"); goto out; } static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, struct dentry **root_dentry) { struct the_nilfs *nilfs = s->s_fs_info; struct nilfs_root *root; int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno); up_read(&nilfs->ns_segctor_sem); if (ret < 0) { ret = (ret == -ENOENT) ? -EINVAL : ret; goto out; } else if (!ret) { printk(KERN_ERR "NILFS: The specified checkpoint is " "not a snapshot (checkpoint number=%llu).\n", (unsigned long long)cno); ret = -EINVAL; goto out; } ret = nilfs_attach_checkpoint(s, cno, false, &root); if (ret) { printk(KERN_ERR "NILFS: error loading snapshot " "(checkpoint number=%llu).\n", (unsigned long long)cno); goto out; } ret = nilfs_get_root_dentry(s, root, root_dentry); nilfs_put_root(root); out: return ret; } static int nilfs_tree_was_touched(struct dentry *root_dentry) { return root_dentry->d_count > 1; } /** * nilfs_try_to_shrink_tree() - try to shrink dentries of a checkpoint * @root_dentry: root dentry of the tree to be shrunk * * This function returns true if the tree was in-use. */ static int nilfs_try_to_shrink_tree(struct dentry *root_dentry) { if (have_submounts(root_dentry)) return true; shrink_dcache_parent(root_dentry); return nilfs_tree_was_touched(root_dentry); } int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_root *root; struct inode *inode; struct dentry *dentry; int ret; if (cno < 0 || cno > nilfs->ns_cno) return false; if (cno >= nilfs_last_cno(nilfs)) return true; /* protect recent checkpoints */ ret = false; root = nilfs_lookup_root(nilfs, cno); if (root) { inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO); if (inode) { dentry = d_find_alias(inode); if (dentry) { if (nilfs_tree_was_touched(dentry)) ret = nilfs_try_to_shrink_tree(dentry); dput(dentry); } iput(inode); } nilfs_put_root(root); } return ret; } /** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent) { struct the_nilfs *nilfs; struct nilfs_root *fsroot; struct backing_dev_info *bdi; __u64 cno; int err; nilfs = alloc_nilfs(sb->s_bdev); if (!nilfs) return -ENOMEM; sb->s_fs_info = nilfs; err = init_nilfs(nilfs, sb, (char *)data); if (err) goto failed_nilfs; sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; sb->s_max_links = NILFS_LINK_MAX; bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; sb->s_bdi = bdi ? : &default_backing_dev_info; err = load_nilfs(nilfs, sb); if (err) goto failed_nilfs; cno = nilfs_last_cno(nilfs); err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); if (err) { printk(KERN_ERR "NILFS: error loading last checkpoint " "(checkpoint number=%llu).\n", (unsigned long long)cno); goto failed_unload; } if (!(sb->s_flags & MS_RDONLY)) { err = nilfs_attach_log_writer(sb, fsroot); if (err) goto failed_checkpoint; } err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root); if (err) goto failed_segctor; nilfs_put_root(fsroot); if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sb, true); up_write(&nilfs->ns_sem); } return 0; failed_segctor: nilfs_detach_log_writer(sb); failed_checkpoint: nilfs_put_root(fsroot); failed_unload: iput(nilfs->ns_sufile); iput(nilfs->ns_cpfile); iput(nilfs->ns_dat); failed_nilfs: destroy_nilfs(nilfs); return err; } static int nilfs_remount(struct super_block *sb, int *flags, char *data) { struct the_nilfs *nilfs = sb->s_fs_info; unsigned long old_sb_flags; unsigned long old_mount_opt; int err; old_sb_flags = sb->s_flags; old_mount_opt = nilfs->ns_mount_opt; if (!parse_options(data, sb, 1)) { err = -EINVAL; goto restore_opts; } sb->s_flags = (sb->s_flags & ~MS_POSIXACL); err = -EINVAL; if (!nilfs_valid_fs(nilfs)) { printk(KERN_WARNING "NILFS (device %s): couldn't " "remount because the filesystem is in an " "incomplete recovery state.\n", sb->s_id); goto restore_opts; } if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) goto out; if (*flags & MS_RDONLY) { /* Shutting down log writer */ nilfs_detach_log_writer(sb); sb->s_flags |= MS_RDONLY; /* * Remounting a valid RW partition RDONLY, so set * the RDONLY flag and then mark the partition as valid again. */ down_write(&nilfs->ns_sem); nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); } else { __u64 features; struct nilfs_root *root; /* * Mounting a RDONLY partition read-write, so reread and * store the current valid flag. (It may have been changed * by fsck since we originally mounted the partition.) */ down_read(&nilfs->ns_sem); features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; up_read(&nilfs->ns_sem); if (features) { printk(KERN_WARNING "NILFS (device %s): couldn't " "remount RDWR because of unsupported optional " "features (%llx)\n", sb->s_id, (unsigned long long)features); err = -EROFS; goto restore_opts; } sb->s_flags &= ~MS_RDONLY; root = NILFS_I(sb->s_root->d_inode)->i_root; err = nilfs_attach_log_writer(sb, root); if (err) goto restore_opts; down_write(&nilfs->ns_sem); nilfs_setup_super(sb, true); up_write(&nilfs->ns_sem); } out: return 0; restore_opts: sb->s_flags = old_sb_flags; nilfs->ns_mount_opt = old_mount_opt; return err; } struct nilfs_super_data { struct block_device *bdev; __u64 cno; int flags; }; /** * nilfs_identify - pre-read mount options needed to identify mount instance * @data: mount options * @sd: nilfs_super_data */ static int nilfs_identify(char *data, struct nilfs_super_data *sd) { char *p, *options = data; substring_t args[MAX_OPT_ARGS]; int token; int ret = 0; do { p = strsep(&options, ","); if (p != NULL && *p) { token = match_token(p, tokens, args); if (token == Opt_snapshot) { if (!(sd->flags & MS_RDONLY)) { ret++; } else { sd->cno = simple_strtoull(args[0].from, NULL, 0); /* * No need to see the end pointer; * match_token() has done syntax * checking. */ if (sd->cno == 0) ret++; } } if (ret) printk(KERN_ERR "NILFS: invalid mount option: %s\n", p); } if (!options) break; BUG_ON(options == data); *(options - 1) = ','; } while (!ret); return ret; } static int nilfs_set_bdev_super(struct super_block *s, void *data) { s->s_bdev = data; s->s_dev = s->s_bdev->bd_dev; return 0; } static int nilfs_test_bdev_super(struct super_block *s, void *data) { return (void *)s->s_bdev == data; } static struct dentry * nilfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct nilfs_super_data sd; struct super_block *s; fmode_t mode = FMODE_READ | FMODE_EXCL; struct dentry *root_dentry; int err, s_new = false; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(sd.bdev)) return ERR_CAST(sd.bdev); sd.cno = 0; sd.flags = flags; if (nilfs_identify((char *)data, &sd)) { err = -EINVAL; goto failed; } /* * once the super is inserted into the list by sget, s_umount * will protect the lockfs code from trying to start a snapshot * while we are mounting */ mutex_lock(&sd.bdev->bd_fsfreeze_mutex); if (sd.bdev->bd_fsfreeze_count > 0) { mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); err = -EBUSY; goto failed; } s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev); mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); if (IS_ERR(s)) { err = PTR_ERR(s); goto failed; } if (!s->s_root) { char b[BDEVNAME_SIZE]; s_new = true; /* New superblock instance created */ s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(sd.bdev)); err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (err) goto failed_super; s->s_flags |= MS_ACTIVE; } else if (!sd.cno) { int busy = false; if (nilfs_tree_was_touched(s->s_root)) { busy = nilfs_try_to_shrink_tree(s->s_root); if (busy && (flags ^ s->s_flags) & MS_RDONLY) { printk(KERN_ERR "NILFS: the device already " "has a %s mount.\n", (s->s_flags & MS_RDONLY) ? "read-only" : "read/write"); err = -EBUSY; goto failed_super; } } if (!busy) { /* * Try remount to setup mount states if the current * tree is not mounted and only snapshots use this sb. */ err = nilfs_remount(s, &flags, data); if (err) goto failed_super; } } if (sd.cno) { err = nilfs_attach_snapshot(s, sd.cno, &root_dentry); if (err) goto failed_super; } else { root_dentry = dget(s->s_root); } if (!s_new) blkdev_put(sd.bdev, mode); return root_dentry; failed_super: deactivate_locked_super(s); failed: if (!s_new) blkdev_put(sd.bdev, mode); return ERR_PTR(err); } struct file_system_type nilfs_fs_type = { .owner = THIS_MODULE, .name = "nilfs2", .mount = nilfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static void nilfs_inode_init_once(void *obj) { struct nilfs_inode_info *ii = obj; INIT_LIST_HEAD(&ii->i_dirty); #ifdef CONFIG_NILFS_XATTR init_rwsem(&ii->xattr_sem); #endif address_space_init_once(&ii->i_btnode_cache); ii->i_bmap = &ii->i_bmap_data; inode_init_once(&ii->vfs_inode); } static void nilfs_segbuf_init_once(void *obj) { memset(obj, 0, sizeof(struct nilfs_segment_buffer)); } static void nilfs_destroy_cachep(void) { if (nilfs_inode_cachep) kmem_cache_destroy(nilfs_inode_cachep); if (nilfs_transaction_cachep) kmem_cache_destroy(nilfs_transaction_cachep); if (nilfs_segbuf_cachep) kmem_cache_destroy(nilfs_segbuf_cachep); if (nilfs_btree_path_cache) kmem_cache_destroy(nilfs_btree_path_cache); } static int __init nilfs_init_cachep(void) { nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache", sizeof(struct nilfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT, nilfs_inode_init_once); if (!nilfs_inode_cachep) goto fail; nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache", sizeof(struct nilfs_transaction_info), 0, SLAB_RECLAIM_ACCOUNT, NULL); if (!nilfs_transaction_cachep) goto fail; nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache", sizeof(struct nilfs_segment_buffer), 0, SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once); if (!nilfs_segbuf_cachep) goto fail; nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache", sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX, 0, 0, NULL); if (!nilfs_btree_path_cache) goto fail; return 0; fail: nilfs_destroy_cachep(); return -ENOMEM; } static int __init init_nilfs_fs(void) { int err; err = nilfs_init_cachep(); if (err) goto fail; err = register_filesystem(&nilfs_fs_type); if (err) goto free_cachep; printk(KERN_INFO "NILFS version 2 loaded\n"); return 0; free_cachep: nilfs_destroy_cachep(); fail: return err; } static void __exit exit_nilfs_fs(void) { nilfs_destroy_cachep(); unregister_filesystem(&nilfs_fs_type); } module_init(init_nilfs_fs) module_exit(exit_nilfs_fs)
gpl-2.0
scanno/android_kernel_oneplus_msm8974
drivers/misc/pm8xxx-upl.c
3502
8407
/* Copyright (c) 2010,2011 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * Qualcomm PM8XXX UPL driver * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/mfd/pm8xxx/upl.h> /* PMIC8XXX UPL registers */ #define SSBI_REG_UPL_CTRL 0x17B #define SSBI_REG_UPL_TRUTHTABLE1 0x17C #define SSBI_REG_UPL_TRUTHTABLE2 0x17D struct pm8xxx_upl_device { struct device *dev; struct mutex upl_mutex; #if defined(CONFIG_DEBUG_FS) struct dentry *dent; #endif }; static struct pm8xxx_upl_device *upl_dev; /* APIs */ /* * pm8xxx_upl_request - request a handle to access UPL device */ struct pm8xxx_upl_device *pm8xxx_upl_request(void) { return upl_dev; } EXPORT_SYMBOL(pm8xxx_upl_request); /* * pm8xxx_upl_read_truthtable - read value currently stored in UPL truth table * * @upldev: the UPL device * @truthtable: value read from UPL truth table */ int pm8xxx_upl_read_truthtable(struct pm8xxx_upl_device *upldev, u16 *truthtable) { int rc = 0; u8 table[2]; if (upldev == NULL || IS_ERR(upldev)) return -EINVAL; mutex_lock(&upldev->upl_mutex); rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE1, &(table[0])); if (rc) { pr_err("%s: FAIL pm8xxx_readb(0x%X)=0x%02X: rc=%d\n", __func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc); goto upl_read_done; } rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE2, &(table[1])); if (rc) pr_err("%s: FAIL pm8xxx_readb(0x%X)=0x%02X: rc=%d\n", __func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc); upl_read_done: mutex_unlock(&upldev->upl_mutex); *truthtable = (((u16)table[1]) << 8) | table[0]; return rc; } EXPORT_SYMBOL(pm8xxx_upl_read_truthtable); /* * pm8xxx_upl_writes_truthtable - write value into UPL truth table * * @upldev: the UPL device * @truthtable: value written to UPL truth table * * Each bit in parameter "truthtable" corresponds to the UPL output for a given * set of input pin values. For example, if the input pins have the following * values: A=1, B=1, C=1, D=0, then the UPL would output the value of bit 14 * (0b1110) in parameter "truthtable". */ int pm8xxx_upl_write_truthtable(struct pm8xxx_upl_device *upldev, u16 truthtable) { int rc = 0; u8 table[2]; if (upldev == NULL || IS_ERR(upldev)) return -EINVAL; table[0] = truthtable & 0xFF; table[1] = (truthtable >> 8) & 0xFF; mutex_lock(&upldev->upl_mutex); rc = pm8xxx_writeb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE1, table[0]); if (rc) { pr_err("%s: FAIL pm8xxx_writeb(0x%X)=0x%04X: rc=%d\n", __func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc); goto upl_write_done; } rc = pm8xxx_writeb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE2, table[1]); if (rc) pr_err("%s: FAIL pm8xxx_writeb(0x%X)=0x%04X: rc=%d\n", __func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc); upl_write_done: mutex_unlock(&upldev->upl_mutex); return rc; } EXPORT_SYMBOL(pm8xxx_upl_write_truthtable); /* * pm8xxx_upl_config - configure UPL I/O settings and UPL enable/disable * * @upldev: the UPL device * @mask: setting mask to configure * @flags: setting flags */ int pm8xxx_upl_config(struct pm8xxx_upl_device *upldev, u32 mask, u32 flags) { int rc; u8 upl_ctrl, m, f; if (upldev == NULL || IS_ERR(upldev)) return -EINVAL; mutex_lock(&upldev->upl_mutex); rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_CTRL, &upl_ctrl); if (rc) { pr_err("%s: FAIL pm8xxx_readb(0x%X)=0x%02X: rc=%d\n", __func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc); goto upl_config_done; } m = mask & 0x00ff; f = flags & 0x00ff; upl_ctrl &= ~m; upl_ctrl |= m & f; rc = pm8xxx_writeb(upldev->dev->parent, SSBI_REG_UPL_CTRL, upl_ctrl); if (rc) pr_err("%s: FAIL pm8xxx_writeb(0x%X)=0x%02X: rc=%d\n", __func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc); upl_config_done: mutex_unlock(&upldev->upl_mutex); return rc; } EXPORT_SYMBOL(pm8xxx_upl_config); #if defined(CONFIG_DEBUG_FS) static int truthtable_set(void *data, u64 val) { int rc; rc = pm8xxx_upl_write_truthtable(data, val); if (rc) pr_err("%s: pm8xxx_upl_write_truthtable: rc=%d, " "truthtable=0x%llX\n", __func__, rc, val); return rc; } static int truthtable_get(void *data, u64 *val) { int rc; u16 truthtable; rc = pm8xxx_upl_read_truthtable(data, &truthtable); if (rc) pr_err("%s: pm8xxx_upl_read_truthtable: rc=%d, " "truthtable=0x%X\n", __func__, rc, truthtable); if (val) *val = truthtable; return rc; } DEFINE_SIMPLE_ATTRIBUTE(upl_truthtable_fops, truthtable_get, truthtable_set, "0x%04llX\n"); /* enter values as 0xMMMMFFFF where MMMM is the mask and FFFF is the flags */ static int control_set(void *data, u64 val) { u8 mask, flags; int rc; flags = val & 0xFFFF; mask = (val >> 16) & 0xFFFF; rc = pm8xxx_upl_config(data, mask, flags); if (rc) pr_err("%s: pm8xxx_upl_config: rc=%d, mask = 0x%X, " "flags = 0x%X\n", __func__, rc, mask, flags); return rc; } static int control_get(void *data, u64 *val) { struct pm8xxx_upl_device *upldev; int rc = 0; u8 ctrl; upldev = data; mutex_lock(&upldev->upl_mutex); rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_CTRL, &ctrl); if (rc) pr_err("%s: FAIL pm8xxx_readb(): rc=%d (ctrl=0x%02X)\n", __func__, rc, ctrl); mutex_unlock(&upldev->upl_mutex); *val = ctrl; return rc; } DEFINE_SIMPLE_ATTRIBUTE(upl_control_fops, control_get, control_set, "0x%02llX\n"); static int pm8xxx_upl_debug_init(struct pm8xxx_upl_device *upldev) { struct dentry *dent; struct dentry *temp; dent = debugfs_create_dir("pm8xxx-upl", NULL); if (dent == NULL || IS_ERR(dent)) { pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n", __func__, (unsigned)dent); return -ENOMEM; } temp = debugfs_create_file("truthtable", S_IRUSR | S_IWUSR, dent, upldev, &upl_truthtable_fops); if (temp == NULL || IS_ERR(temp)) { pr_err("%s: ERR debugfs_create_file: dent=0x%X\n", __func__, (unsigned)dent); goto debug_error; } temp = debugfs_create_file("control", S_IRUSR | S_IWUSR, dent, upldev, &upl_control_fops); if (temp == NULL || IS_ERR(temp)) { pr_err("%s: ERR debugfs_create_file: dent=0x%X\n", __func__, (unsigned)dent); goto debug_error; } upldev->dent = dent; return 0; debug_error: debugfs_remove_recursive(dent); return -ENOMEM; } static int __devexit pm8xxx_upl_debug_remove(struct pm8xxx_upl_device *upldev) { debugfs_remove_recursive(upldev->dent); return 0; } #endif /* CONFIG_DEBUG_FS */ static int __devinit pm8xxx_upl_probe(struct platform_device *pdev) { struct pm8xxx_upl_device *upldev; upldev = kzalloc(sizeof *upldev, GFP_KERNEL); if (upldev == NULL) { pr_err("%s: kzalloc() failed.\n", __func__); return -ENOMEM; } mutex_init(&upldev->upl_mutex); upl_dev = upldev; upldev->dev = &pdev->dev; platform_set_drvdata(pdev, upldev); #if defined(CONFIG_DEBUG_FS) pm8xxx_upl_debug_init(upl_dev); #endif pr_notice("%s: OK\n", __func__); return 0; } static int __devexit pm8xxx_upl_remove(struct platform_device *pdev) { struct pm8xxx_upl_device *upldev = platform_get_drvdata(pdev); #if defined(CONFIG_DEBUG_FS) pm8xxx_upl_debug_remove(upldev); #endif platform_set_drvdata(pdev, NULL); kfree(upldev); pr_notice("%s: OK\n", __func__); return 0; } static struct platform_driver pm8xxx_upl_driver = { .probe = pm8xxx_upl_probe, .remove = __devexit_p(pm8xxx_upl_remove), .driver = { .name = PM8XXX_UPL_DEV_NAME, .owner = THIS_MODULE, }, }; static int __init pm8xxx_upl_init(void) { return platform_driver_register(&pm8xxx_upl_driver); } static void __exit pm8xxx_upl_exit(void) { platform_driver_unregister(&pm8xxx_upl_driver); } module_init(pm8xxx_upl_init); module_exit(pm8xxx_upl_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PM8XXX UPL driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:" PM8XXX_UPL_DEV_NAME);
gpl-2.0
VegaDevTeam/android_kernel_pantech_msm8974
arch/x86/kernel/cpu/intel_cacheinfo.c
4526
32777
/* * Routines to indentify caches on Intel CPU. * * Changes: * Venkatesh Pallipadi : Adding cache identification through cpuid(4) * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/pci.h> #include <asm/processor.h> #include <linux/smp.h> #include <asm/amd_nb.h> #include <asm/smp.h> #define LVL_1_INST 1 #define LVL_1_DATA 2 #define LVL_2 3 #define LVL_3 4 #define LVL_TRACE 5 struct _cache_table { unsigned char descriptor; char cache_type; short size; }; #define MB(x) ((x) * 1024) /* All the cache descriptor types we care about (no TLB or trace cache entries) */ static const struct _cache_table __cpuinitconst cache_table[] = { { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */ { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */ { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */ { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */ { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */ { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */ { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */ { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */ { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */ { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */ { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */ { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */ { 0x00, 0, 0} }; enum _cache_type { CACHE_TYPE_NULL = 0, CACHE_TYPE_DATA = 1, CACHE_TYPE_INST = 2, CACHE_TYPE_UNIFIED = 3 }; union _cpuid4_leaf_eax { struct { enum _cache_type type:5; unsigned int level:3; unsigned int is_self_initializing:1; unsigned int is_fully_associative:1; unsigned int reserved:4; unsigned int num_threads_sharing:12; unsigned int num_cores_on_die:6; } split; u32 full; }; union _cpuid4_leaf_ebx { struct { unsigned int coherency_line_size:12; unsigned int physical_line_partition:10; unsigned int ways_of_associativity:10; } split; u32 full; }; union _cpuid4_leaf_ecx { struct { unsigned int number_of_sets:32; } split; u32 full; }; struct _cpuid4_info_regs { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned long size; struct amd_northbridge *nb; }; struct _cpuid4_info { struct _cpuid4_info_regs base; DECLARE_BITMAP(shared_cpu_map, NR_CPUS); }; unsigned short num_cache_leaves; /* AMD doesn't have CPUID4. Emulate it here to report the same information to the user. This makes some assumptions about the machine: L2 not shared, no SMT etc. that is currently true on AMD CPUs. In theory the TLBs could be reported as fake type (they are in "dummy"). Maybe later */ union l1_cache { struct { unsigned line_size:8; unsigned lines_per_tag:8; unsigned assoc:8; unsigned size_in_kb:8; }; unsigned val; }; union l2_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned size_in_kb:16; }; unsigned val; }; union l3_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned res:2; unsigned size_encoded:14; }; unsigned val; }; static const unsigned short __cpuinitconst assocs[] = { [1] = 1, [2] = 2, [4] = 4, [6] = 8, [8] = 16, [0xa] = 32, [0xb] = 48, [0xc] = 64, [0xd] = 96, [0xe] = 128, [0xf] = 0xffff /* fully associative - no way to show this currently */ }; static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx) { unsigned dummy; unsigned line_size, lines_per_tag, assoc, size_in_kb; union l1_cache l1i, l1d; union l2_cache l2; union l3_cache l3; union l1_cache *l1 = &l1d; eax->full = 0; ebx->full = 0; ecx->full = 0; cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val); switch (leaf) { case 1: l1 = &l1i; case 0: if (!l1->val) return; assoc = assocs[l1->assoc]; line_size = l1->line_size; lines_per_tag = l1->lines_per_tag; size_in_kb = l1->size_in_kb; break; case 2: if (!l2.val) return; assoc = assocs[l2.assoc]; line_size = l2.line_size; lines_per_tag = l2.lines_per_tag; /* cpu_data has errata corrections for K7 applied */ size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); break; case 3: if (!l3.val) return; assoc = assocs[l3.assoc]; line_size = l3.line_size; lines_per_tag = l3.lines_per_tag; size_in_kb = l3.size_encoded * 512; if (boot_cpu_has(X86_FEATURE_AMD_DCM)) { size_in_kb = size_in_kb >> 1; assoc = assoc >> 1; } break; default: return; } eax->split.is_self_initializing = 1; eax->split.type = types[leaf]; eax->split.level = levels[leaf]; eax->split.num_threads_sharing = 0; eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; if (assoc == 0xffff) eax->split.is_fully_associative = 1; ebx->split.coherency_line_size = line_size - 1; ebx->split.ways_of_associativity = assoc - 1; ebx->split.physical_line_partition = lines_per_tag - 1; ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / (ebx->split.ways_of_associativity + 1) - 1; } struct _cache_attr { struct attribute attr; ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, unsigned int); }; #ifdef CONFIG_AMD_NB /* * L3 cache descriptors */ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) { struct amd_l3_cache *l3 = &nb->l3_cache; unsigned int sc0, sc1, sc2, sc3; u32 val = 0; pci_read_config_dword(nb->misc, 0x1C4, &val); /* calculate subcache sizes */ l3->subcaches[0] = sc0 = !(val & BIT(0)); l3->subcaches[1] = sc1 = !(val & BIT(4)); if (boot_cpu_data.x86 == 0x15) { l3->subcaches[0] = sc0 += !(val & BIT(1)); l3->subcaches[1] = sc1 += !(val & BIT(5)); } l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) { int node; /* only for L3, and not in virtualized environments */ if (index < 3) return; node = amd_get_nb_id(smp_processor_id()); this_leaf->nb = node_to_amd_nb(node); if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) amd_calc_l3_indices(this_leaf->nb); } /* * check whether a slot used for disabling an L3 index is occupied. * @l3: L3 cache descriptor * @slot: slot number (0..1) * * @returns: the disabled index if used or negative value if slot free. */ int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) { unsigned int reg = 0; pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg); /* check whether this slot is activated already */ if (reg & (3UL << 30)) return reg & 0xfff; return -1; } static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, unsigned int slot) { int index; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) return -EINVAL; index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); if (index >= 0) return sprintf(buf, "%d\n", index); return sprintf(buf, "FREE\n"); } #define SHOW_CACHE_DISABLE(slot) \ static ssize_t \ show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ unsigned int cpu) \ { \ return show_cache_disable(this_leaf, buf, slot); \ } SHOW_CACHE_DISABLE(0) SHOW_CACHE_DISABLE(1) static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long idx) { int i; idx |= BIT(30); /* * disable index in all 4 subcaches */ for (i = 0; i < 4; i++) { u32 reg = idx | (i << 20); if (!nb->l3_cache.subcaches[i]) continue; pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); /* * We need to WBINVD on a core on the node containing the L3 * cache which indices we disable therefore a simple wbinvd() * is not sufficient. */ wbinvd_on_cpu(cpu); reg |= BIT(31); pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); } } /* * disable a L3 cache index by using a disable-slot * * @l3: L3 cache descriptor * @cpu: A CPU on the node containing the L3 cache * @slot: slot number (0..1) * @index: index to disable * * @return: 0 on success, error status on failure */ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long index) { int ret = 0; /* check if @slot is already used or the index is already disabled */ ret = amd_get_l3_disable_slot(nb, slot); if (ret >= 0) return -EEXIST; if (index > nb->l3_cache.indices) return -EINVAL; /* check whether the other slot has disabled the same index already */ if (index == amd_get_l3_disable_slot(nb, !slot)) return -EEXIST; amd_l3_disable_index(nb, cpu, slot, index); return 0; } static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, size_t count, unsigned int slot) { unsigned long val = 0; int cpu, err = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) return -EINVAL; cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); if (strict_strtoul(buf, 10, &val) < 0) return -EINVAL; err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); if (err) { if (err == -EEXIST) pr_warning("L3 slot %d in use/index already disabled!\n", slot); return err; } return count; } #define STORE_CACHE_DISABLE(slot) \ static ssize_t \ store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ const char *buf, size_t count, \ unsigned int cpu) \ { \ return store_cache_disable(this_leaf, buf, count, slot); \ } STORE_CACHE_DISABLE(0) STORE_CACHE_DISABLE(1) static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, show_cache_disable_0, store_cache_disable_0); static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, show_cache_disable_1, store_cache_disable_1); static ssize_t show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return -EINVAL; return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); } static ssize_t store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, unsigned int cpu) { unsigned long val; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return -EINVAL; if (strict_strtoul(buf, 16, &val) < 0) return -EINVAL; if (amd_set_subcaches(cpu, val)) return -EINVAL; return count; } static struct _cache_attr subcaches = __ATTR(subcaches, 0644, show_subcaches, store_subcaches); #else /* CONFIG_AMD_NB */ #define amd_init_l3_cache(x, y) #endif /* CONFIG_AMD_NB */ static int __cpuinit cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned edx; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { amd_cpuid4(index, &eax, &ebx, &ecx); amd_init_l3_cache(this_leaf, index); } else { cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); } if (eax.split.type == CACHE_TYPE_NULL) return -EIO; /* better error ? */ this_leaf->eax = eax; this_leaf->ebx = ebx; this_leaf->ecx = ecx; this_leaf->size = (ecx.split.number_of_sets + 1) * (ebx.split.coherency_line_size + 1) * (ebx.split.physical_line_partition + 1) * (ebx.split.ways_of_associativity + 1); return 0; } static int __cpuinit find_num_cache_leaves(void) { unsigned int eax, ebx, ecx, edx; union _cpuid4_leaf_eax cache_eax; int i = -1; do { ++i; /* Do cpuid(4) loop to find out num_cache_leaves */ cpuid_count(4, i, &eax, &ebx, &ecx, &edx); cache_eax.full = eax; } while (cache_eax.split.type != CACHE_TYPE_NULL); return i; } unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) { /* Cache sizes */ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; #ifdef CONFIG_X86_HT unsigned int cpu = c->cpu_index; #endif if (c->cpuid_level > 3) { static int is_initialized; if (is_initialized == 0) { /* Init num_cache_leaves from boot CPU */ num_cache_leaves = find_num_cache_leaves(); is_initialized++; } /* * Whenever possible use cpuid(4), deterministic cache * parameters cpuid leaf to find the cache details */ for (i = 0; i < num_cache_leaves; i++) { struct _cpuid4_info_regs this_leaf; int retval; retval = cpuid4_cache_lookup_regs(i, &this_leaf); if (retval >= 0) { switch (this_leaf.eax.split.level) { case 1: if (this_leaf.eax.split.type == CACHE_TYPE_DATA) new_l1d = this_leaf.size/1024; else if (this_leaf.eax.split.type == CACHE_TYPE_INST) new_l1i = this_leaf.size/1024; break; case 2: new_l2 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order(num_threads_sharing); l2_id = c->apicid >> index_msb; break; case 3: new_l3 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order( num_threads_sharing); l3_id = c->apicid >> index_msb; break; default: break; } } } } /* * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for * trace cache */ if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ int j, n; unsigned int regs[4]; unsigned char *dp = (unsigned char *)regs; int only_trace = 0; if (num_cache_leaves != 0 && c->x86 == 15) only_trace = 1; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; for (i = 0 ; i < n ; i++) { cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); /* If bit 31 is set, this is an unknown format */ for (j = 0 ; j < 3 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; /* Byte 0 is level count, not a descriptor */ for (j = 1 ; j < 16 ; j++) { unsigned char des = dp[j]; unsigned char k = 0; /* look up this descriptor in the table */ while (cache_table[k].descriptor != 0) { if (cache_table[k].descriptor == des) { if (only_trace && cache_table[k].cache_type != LVL_TRACE) break; switch (cache_table[k].cache_type) { case LVL_1_INST: l1i += cache_table[k].size; break; case LVL_1_DATA: l1d += cache_table[k].size; break; case LVL_2: l2 += cache_table[k].size; break; case LVL_3: l3 += cache_table[k].size; break; case LVL_TRACE: trace += cache_table[k].size; break; } break; } k++; } } } } if (new_l1d) l1d = new_l1d; if (new_l1i) l1i = new_l1i; if (new_l2) { l2 = new_l2; #ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l2_id; #endif } if (new_l3) { l3 = new_l3; #ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l3_id; #endif } c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2; } #ifdef CONFIG_SYSFS /* pointer to _cpuid4_info array (for each cache leaf) */ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) #ifdef CONFIG_SMP static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf; int ret, i, sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); ret = 0; if (index == 3) { ret = 1; for_each_cpu(i, cpu_llc_shared_mask(cpu)) { if (!per_cpu(ici_cpuid4_info, i)) continue; this_leaf = CPUID4_INFO_IDX(i, index); for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { if (!cpu_online(sibling)) continue; set_bit(sibling, this_leaf->shared_cpu_map); } } } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { ret = 1; for_each_cpu(i, cpu_sibling_mask(cpu)) { if (!per_cpu(ici_cpuid4_info, i)) continue; this_leaf = CPUID4_INFO_IDX(i, index); for_each_cpu(sibling, cpu_sibling_mask(cpu)) { if (!cpu_online(sibling)) continue; set_bit(sibling, this_leaf->shared_cpu_map); } } } return ret; } static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; unsigned long num_threads_sharing; int index_msb, i; struct cpuinfo_x86 *c = &cpu_data(cpu); if (c->x86_vendor == X86_VENDOR_AMD) { if (cache_shared_amd_cpu_map_setup(cpu, index)) return; } this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; if (num_threads_sharing == 1) cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); else { index_msb = get_count_order(num_threads_sharing); for_each_online_cpu(i) { if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { cpumask_set_cpu(i, to_cpumask(this_leaf->shared_cpu_map)); if (i != cpu && per_cpu(ici_cpuid4_info, i)) { sibling_leaf = CPUID4_INFO_IDX(i, index); cpumask_set_cpu(cpu, to_cpumask( sibling_leaf->shared_cpu_map)); } } } } } static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; int sibling; this_leaf = CPUID4_INFO_IDX(cpu, index); for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { sibling_leaf = CPUID4_INFO_IDX(sibling, index); cpumask_clear_cpu(cpu, to_cpumask(sibling_leaf->shared_cpu_map)); } } #else static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { } static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { } #endif static void __cpuinit free_cache_attributes(unsigned int cpu) { int i; for (i = 0; i < num_cache_leaves; i++) cache_remove_shared_cpu_map(cpu, i); kfree(per_cpu(ici_cpuid4_info, cpu)); per_cpu(ici_cpuid4_info, cpu) = NULL; } static void __cpuinit get_cpu_leaves(void *_retval) { int j, *retval = _retval, cpu = smp_processor_id(); /* Do cpuid and store the results */ for (j = 0; j < num_cache_leaves; j++) { struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); if (unlikely(*retval < 0)) { int i; for (i = 0; i < j; i++) cache_remove_shared_cpu_map(cpu, i); break; } cache_shared_cpu_map_setup(cpu, j); } } static int __cpuinit detect_cache_attributes(unsigned int cpu) { int retval; if (num_cache_leaves == 0) return -ENOENT; per_cpu(ici_cpuid4_info, cpu) = kzalloc( sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); if (per_cpu(ici_cpuid4_info, cpu) == NULL) return -ENOMEM; smp_call_function_single(cpu, get_cpu_leaves, &retval, true); if (retval) { kfree(per_cpu(ici_cpuid4_info, cpu)); per_cpu(ici_cpuid4_info, cpu) = NULL; } return retval; } #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/cpu.h> /* pointer to kobject for cpuX/cache */ static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); struct _index_kobject { struct kobject kobj; unsigned int cpu; unsigned short index; }; /* pointer to array of kobjects for cpuX/cache/indexY */ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) #define show_one_plus(file_name, object, val) \ static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ unsigned int cpu) \ { \ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ } show_one_plus(level, base.eax.split.level, 0); show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1); show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1); show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1); show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1); static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); } static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, int type, char *buf) { ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; int n = 0; if (len > 1) { const struct cpumask *mask; mask = to_cpumask(this_leaf->shared_cpu_map); n = type ? cpulist_scnprintf(buf, len-2, mask) : cpumask_scnprintf(buf, len-2, mask); buf[n++] = '\n'; buf[n] = '\0'; } return n; } static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, unsigned int cpu) { return show_shared_cpu_map_func(leaf, 0, buf); } static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, unsigned int cpu) { return show_shared_cpu_map_func(leaf, 1, buf); } static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { switch (this_leaf->base.eax.split.type) { case CACHE_TYPE_DATA: return sprintf(buf, "Data\n"); case CACHE_TYPE_INST: return sprintf(buf, "Instruction\n"); case CACHE_TYPE_UNIFIED: return sprintf(buf, "Unified\n"); default: return sprintf(buf, "Unknown\n"); } } #define to_object(k) container_of(k, struct _index_kobject, kobj) #define to_attr(a) container_of(a, struct _cache_attr, attr) #define define_one_ro(_name) \ static struct _cache_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) define_one_ro(level); define_one_ro(type); define_one_ro(coherency_line_size); define_one_ro(physical_line_partition); define_one_ro(ways_of_associativity); define_one_ro(number_of_sets); define_one_ro(size); define_one_ro(shared_cpu_map); define_one_ro(shared_cpu_list); static struct attribute *default_attrs[] = { &type.attr, &level.attr, &coherency_line_size.attr, &physical_line_partition.attr, &ways_of_associativity.attr, &number_of_sets.attr, &size.attr, &shared_cpu_map.attr, &shared_cpu_list.attr, NULL }; #ifdef CONFIG_AMD_NB static struct attribute ** __cpuinit amd_l3_attrs(void) { static struct attribute **attrs; int n; if (attrs) return attrs; n = sizeof (default_attrs) / sizeof (struct attribute *); if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) n += 2; if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) n += 1; attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); if (attrs == NULL) return attrs = default_attrs; for (n = 0; default_attrs[n]; n++) attrs[n] = default_attrs[n]; if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { attrs[n++] = &cache_disable_0.attr; attrs[n++] = &cache_disable_1.attr; } if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) attrs[n++] = &subcaches.attr; return attrs; } #endif static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct _cache_attr *fattr = to_attr(attr); struct _index_kobject *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->show ? fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), buf, this_leaf->cpu) : 0; return ret; } static ssize_t store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct _cache_attr *fattr = to_attr(attr); struct _index_kobject *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->store ? fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), buf, count, this_leaf->cpu) : 0; return ret; } static const struct sysfs_ops sysfs_ops = { .show = show, .store = store, }; static struct kobj_type ktype_cache = { .sysfs_ops = &sysfs_ops, .default_attrs = default_attrs, }; static struct kobj_type ktype_percpu_entry = { .sysfs_ops = &sysfs_ops, }; static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) { kfree(per_cpu(ici_cache_kobject, cpu)); kfree(per_cpu(ici_index_kobject, cpu)); per_cpu(ici_cache_kobject, cpu) = NULL; per_cpu(ici_index_kobject, cpu) = NULL; free_cache_attributes(cpu); } static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) { int err; if (num_cache_leaves == 0) return -ENOENT; err = detect_cache_attributes(cpu); if (err) return err; /* Allocate all required memory */ per_cpu(ici_cache_kobject, cpu) = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) goto err_out; per_cpu(ici_index_kobject, cpu) = kzalloc( sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) goto err_out; return 0; err_out: cpuid4_cache_sysfs_exit(cpu); return -ENOMEM; } static DECLARE_BITMAP(cache_dev_map, NR_CPUS); /* Add/Remove cache interface for CPU device */ static int __cpuinit cache_add_dev(struct device *dev) { unsigned int cpu = dev->id; unsigned long i, j; struct _index_kobject *this_object; struct _cpuid4_info *this_leaf; int retval; retval = cpuid4_cache_sysfs_init(cpu); if (unlikely(retval < 0)) return retval; retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), &ktype_percpu_entry, &dev->kobj, "%s", "cache"); if (retval < 0) { cpuid4_cache_sysfs_exit(cpu); return retval; } for (i = 0; i < num_cache_leaves; i++) { this_object = INDEX_KOBJECT_PTR(cpu, i); this_object->cpu = cpu; this_object->index = i; this_leaf = CPUID4_INFO_IDX(cpu, i); ktype_cache.default_attrs = default_attrs; #ifdef CONFIG_AMD_NB if (this_leaf->base.nb) ktype_cache.default_attrs = amd_l3_attrs(); #endif retval = kobject_init_and_add(&(this_object->kobj), &ktype_cache, per_cpu(ici_cache_kobject, cpu), "index%1lu", i); if (unlikely(retval)) { for (j = 0; j < i; j++) kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); kobject_put(per_cpu(ici_cache_kobject, cpu)); cpuid4_cache_sysfs_exit(cpu); return retval; } kobject_uevent(&(this_object->kobj), KOBJ_ADD); } cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); return 0; } static void __cpuinit cache_remove_dev(struct device *dev) { unsigned int cpu = dev->id; unsigned long i; if (per_cpu(ici_cpuid4_info, cpu) == NULL) return; if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) return; cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); for (i = 0; i < num_cache_leaves; i++) kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); kobject_put(per_cpu(ici_cache_kobject, cpu)); cpuid4_cache_sysfs_exit(cpu); } static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct device *dev; dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: cache_add_dev(dev); break; case CPU_DEAD: case CPU_DEAD_FROZEN: cache_remove_dev(dev); break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { .notifier_call = cacheinfo_cpu_callback, }; static int __cpuinit cache_sysfs_init(void) { int i; if (num_cache_leaves == 0) return 0; for_each_online_cpu(i) { int err; struct device *dev = get_cpu_device(i); err = cache_add_dev(dev); if (err) return err; } register_hotcpu_notifier(&cacheinfo_cpu_notifier); return 0; } device_initcall(cache_sysfs_init); #endif
gpl-2.0
AKToronto/Bubba-Zombie
drivers/net/wireless/ath/carl9170/fw.c
4782
11285
/* * Atheros CARL9170 driver * * firmware parser * * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, see * http://www.gnu.org/licenses/. */ #include <linux/kernel.h> #include <linux/firmware.h> #include <linux/crc32.h> #include <linux/module.h> #include "carl9170.h" #include "fwcmd.h" #include "version.h" #define MAKE_STR(symbol) #symbol #define TO_STR(symbol) MAKE_STR(symbol) #define CARL9170FW_API_VER_STR TO_STR(CARL9170FW_API_MAX_VER) MODULE_VERSION(CARL9170FW_API_VER_STR ":" CARL9170FW_VERSION_GIT); static const u8 otus_magic[4] = { OTUS_MAGIC }; static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4], const unsigned int len, const u8 compatible_revision) { const struct carl9170fw_desc_head *iter; carl9170fw_for_each_hdr(iter, ar->fw.desc) { if (carl9170fw_desc_cmp(iter, descid, len, compatible_revision)) return (void *)iter; } /* needed to find the LAST desc */ if (carl9170fw_desc_cmp(iter, descid, len, compatible_revision)) return (void *)iter; return NULL; } static int carl9170_fw_verify_descs(struct ar9170 *ar, const struct carl9170fw_desc_head *head, unsigned int max_len) { const struct carl9170fw_desc_head *pos; unsigned long pos_addr, end_addr; unsigned int pos_length; if (max_len < sizeof(*pos)) return -ENODATA; max_len = min_t(unsigned int, CARL9170FW_DESC_MAX_LENGTH, max_len); pos = head; pos_addr = (unsigned long) pos; end_addr = pos_addr + max_len; while (pos_addr < end_addr) { if (pos_addr + sizeof(*head) > end_addr) return -E2BIG; pos_length = le16_to_cpu(pos->length); if (pos_length < sizeof(*head)) return -EBADMSG; if (pos_length > max_len) return -EOVERFLOW; if (pos_addr + pos_length > end_addr) return -EMSGSIZE; if (carl9170fw_desc_cmp(pos, LAST_MAGIC, CARL9170FW_LAST_DESC_SIZE, CARL9170FW_LAST_DESC_CUR_VER)) return 0; pos_addr += pos_length; pos = (void *)pos_addr; max_len -= pos_length; } return -EINVAL; } static void carl9170_fw_info(struct ar9170 *ar) { const struct carl9170fw_motd_desc *motd_desc; unsigned int str_ver_len; u32 fw_date; dev_info(&ar->udev->dev, "driver API: %s 2%03d-%02d-%02d [%d-%d]\n", CARL9170FW_VERSION_GIT, CARL9170FW_VERSION_YEAR, CARL9170FW_VERSION_MONTH, CARL9170FW_VERSION_DAY, CARL9170FW_API_MIN_VER, CARL9170FW_API_MAX_VER); motd_desc = carl9170_fw_find_desc(ar, MOTD_MAGIC, sizeof(*motd_desc), CARL9170FW_MOTD_DESC_CUR_VER); if (motd_desc) { str_ver_len = strnlen(motd_desc->release, CARL9170FW_MOTD_RELEASE_LEN); fw_date = le32_to_cpu(motd_desc->fw_year_month_day); dev_info(&ar->udev->dev, "firmware API: %.*s 2%03d-%02d-%02d\n", str_ver_len, motd_desc->release, CARL9170FW_GET_YEAR(fw_date), CARL9170FW_GET_MONTH(fw_date), CARL9170FW_GET_DAY(fw_date)); strlcpy(ar->hw->wiphy->fw_version, motd_desc->release, sizeof(ar->hw->wiphy->fw_version)); } } static bool valid_dma_addr(const u32 address) { if (address >= AR9170_SRAM_OFFSET && address < (AR9170_SRAM_OFFSET + AR9170_SRAM_SIZE)) return true; return false; } static bool valid_cpu_addr(const u32 address) { if (valid_dma_addr(address) || (address >= AR9170_PRAM_OFFSET && address < (AR9170_PRAM_OFFSET + AR9170_PRAM_SIZE))) return true; return false; } static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data, size_t len) { const struct carl9170fw_otus_desc *otus_desc; const struct carl9170fw_last_desc *last_desc; const struct carl9170fw_chk_desc *chk_desc; unsigned long fin, diff; unsigned int dsc_len; u32 crc32; last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); if (!last_desc) return -EINVAL; otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER); if (!otus_desc) { dev_err(&ar->udev->dev, "failed to find compatible firmware " "descriptor.\n"); return -ENODATA; } chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC, sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER); if (!chk_desc) { dev_warn(&ar->udev->dev, "Unprotected firmware image.\n"); return 0; } dsc_len = min_t(unsigned int, len, (unsigned long)chk_desc - (unsigned long)otus_desc); fin = (unsigned long) last_desc + sizeof(*last_desc); diff = fin - (unsigned long) otus_desc; if (diff < len) len -= diff; if (len < 256) return -EIO; crc32 = crc32_le(~0, data, len); if (cpu_to_le32(crc32) != chk_desc->fw_crc32) { dev_err(&ar->udev->dev, "fw checksum test failed.\n"); return -ENOEXEC; } crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len); if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) { dev_err(&ar->udev->dev, "descriptor check failed.\n"); return -EINVAL; } return 0; } static int carl9170_fw_tx_sequence(struct ar9170 *ar) { const struct carl9170fw_txsq_desc *txsq_desc; txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); if (txsq_desc) { ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); if (!valid_cpu_addr(ar->fw.tx_seq_table)) return -EINVAL; } else { ar->fw.tx_seq_table = 0; } return 0; } static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) { const struct carl9170fw_otus_desc *otus_desc; int err; u16 if_comb_types; err = carl9170_fw_checksum(ar, data, len); if (err) return err; otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER); if (!otus_desc) { return -ENODATA; } #define SUPP(feat) \ (carl9170fw_supports(otus_desc->feature_set, feat)) if (!SUPP(CARL9170FW_DUMMY_FEATURE)) { dev_err(&ar->udev->dev, "invalid firmware descriptor " "format detected.\n"); return -EINVAL; } ar->fw.api_version = otus_desc->api_ver; if (ar->fw.api_version < CARL9170FW_API_MIN_VER || ar->fw.api_version > CARL9170FW_API_MAX_VER) { dev_err(&ar->udev->dev, "unsupported firmware api version.\n"); return -EINVAL; } if (!SUPP(CARL9170FW_COMMAND_PHY) || SUPP(CARL9170FW_UNUSABLE) || !SUPP(CARL9170FW_HANDLE_BACK_REQ)) { dev_err(&ar->udev->dev, "firmware does support " "mandatory features.\n"); return -ECANCELED; } if (ilog2(le32_to_cpu(otus_desc->feature_set)) >= __CARL9170FW_FEATURE_NUM) { dev_warn(&ar->udev->dev, "driver does not support all " "firmware features.\n"); } if (!SUPP(CARL9170FW_COMMAND_CAM)) { dev_info(&ar->udev->dev, "crypto offloading is disabled " "by firmware.\n"); ar->disable_offload = true; } if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM)) ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS; if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) { dev_err(&ar->udev->dev, "firmware does not provide " "mandatory interfaces.\n"); return -EINVAL; } if (SUPP(CARL9170FW_MINIBOOT)) ar->fw.offset = le16_to_cpu(otus_desc->miniboot_size); else ar->fw.offset = 0; if (SUPP(CARL9170FW_USB_DOWN_STREAM)) { ar->hw->extra_tx_headroom += sizeof(struct ar9170_stream); ar->fw.tx_stream = true; } if (SUPP(CARL9170FW_USB_UP_STREAM)) ar->fw.rx_stream = true; if (SUPP(CARL9170FW_RX_FILTER)) { ar->fw.rx_filter = true; ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL | FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS; } if (SUPP(CARL9170FW_HW_COUNTERS)) ar->fw.hw_counters = true; if (SUPP(CARL9170FW_WOL)) device_set_wakeup_enable(&ar->udev->dev, true); if_comb_types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT); ar->fw.vif_num = otus_desc->vif_num; ar->fw.cmd_bufs = otus_desc->cmd_bufs; ar->fw.address = le32_to_cpu(otus_desc->fw_address); ar->fw.rx_size = le16_to_cpu(otus_desc->rx_max_frame_len); ar->fw.mem_blocks = min_t(unsigned int, otus_desc->tx_descs, 0xfe); atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks); ar->fw.mem_block_size = le16_to_cpu(otus_desc->tx_frag_len); if (ar->fw.vif_num >= AR9170_MAX_VIRTUAL_MAC || !ar->fw.vif_num || ar->fw.mem_blocks < 16 || !ar->fw.cmd_bufs || ar->fw.mem_block_size < 64 || ar->fw.mem_block_size > 512 || ar->fw.rx_size > 32768 || ar->fw.rx_size < 4096 || !valid_cpu_addr(ar->fw.address)) { dev_err(&ar->udev->dev, "firmware shows obvious signs of " "malicious tampering.\n"); return -EINVAL; } ar->fw.beacon_addr = le32_to_cpu(otus_desc->bcn_addr); ar->fw.beacon_max_len = le16_to_cpu(otus_desc->bcn_len); if (valid_dma_addr(ar->fw.beacon_addr) && ar->fw.beacon_max_len >= AR9170_MAC_BCN_LENGTH_MAX) { ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); if (SUPP(CARL9170FW_WLANTX_CAB)) { if_comb_types |= BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO); } } ar->if_comb_limits[0].max = ar->fw.vif_num; ar->if_comb_limits[0].types = if_comb_types; ar->if_combs[0].num_different_channels = 1; ar->if_combs[0].max_interfaces = ar->fw.vif_num; ar->if_combs[0].limits = ar->if_comb_limits; ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits); ar->hw->wiphy->iface_combinations = ar->if_combs; ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs); ar->hw->wiphy->interface_modes |= if_comb_types; #undef SUPPORTED return carl9170_fw_tx_sequence(ar); } static struct carl9170fw_desc_head * carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len) { int scan = 0, found = 0; if (!carl9170fw_size_check(len)) { dev_err(&ar->udev->dev, "firmware size is out of bound.\n"); return NULL; } while (scan < len - sizeof(struct carl9170fw_desc_head)) { if (fw_data[scan++] == otus_magic[found]) found++; else found = 0; if (scan >= len) break; if (found == sizeof(otus_magic)) break; } if (found != sizeof(otus_magic)) return NULL; return (void *)&fw_data[scan - found]; } int carl9170_parse_firmware(struct ar9170 *ar) { const struct carl9170fw_desc_head *fw_desc = NULL; const struct firmware *fw = ar->fw.fw; unsigned long header_offset = 0; int err; if (WARN_ON(!fw)) return -EINVAL; fw_desc = carl9170_find_fw_desc(ar, fw->data, fw->size); if (!fw_desc) { dev_err(&ar->udev->dev, "unsupported firmware.\n"); return -ENODATA; } header_offset = (unsigned long)fw_desc - (unsigned long)fw->data; err = carl9170_fw_verify_descs(ar, fw_desc, fw->size - header_offset); if (err) { dev_err(&ar->udev->dev, "damaged firmware (%d).\n", err); return err; } ar->fw.desc = fw_desc; carl9170_fw_info(ar); err = carl9170_fw(ar, fw->data, fw->size); if (err) { dev_err(&ar->udev->dev, "failed to parse firmware (%d).\n", err); return err; } return 0; }
gpl-2.0
TeamWin/kernel_samsung_lt02ltetmo
drivers/mtd/cmdlinepart.c
5038
10139
/* * Read flash partition table from command line * * Copyright © 2002 SYSGO Real-Time Solutions GmbH * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * The format for the command line is as follows: * * mtdparts=<mtddef>[;<mtddef] * <mtddef> := <mtd-id>:<partdef>[,<partdef>] * where <mtd-id> is the name from the "cat /proc/mtd" command * <partdef> := <size>[@offset][<name>][ro][lk] * <mtd-id> := unique name used in mapping driver/device (mtd->name) * <size> := standard linux memsize OR "-" to denote all remaining space * <name> := '(' NAME ')' * * Examples: * * 1 NOR Flash, with 1 single writable partition: * edb7312-nor:- * * 1 NOR Flash with 2 partitions, 1 NAND with one * edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home) */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/bootmem.h> #include <linux/module.h> /* error message prefix */ #define ERRP "mtd: " /* debug macro */ #if 0 #define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0) #else #define dbg(x) #endif /* special size referring to all the remaining space in a partition */ #define SIZE_REMAINING UINT_MAX #define OFFSET_CONTINUOUS UINT_MAX struct cmdline_mtd_partition { struct cmdline_mtd_partition *next; char *mtd_id; int num_parts; struct mtd_partition *parts; }; /* mtdpart_setup() parses into here */ static struct cmdline_mtd_partition *partitions; /* the command line passed to mtdpart_setupd() */ static char *cmdline; static int cmdline_parsed = 0; /* * Parse one partition definition for an MTD. Since there can be many * comma separated partition definitions, this function calls itself * recursively until no more partition definitions are found. Nice side * effect: the memory to keep the mtd_partition structs and the names * is allocated upon the last definition being found. At that point the * syntax has been verified ok. */ static struct mtd_partition * newpart(char *s, char **retptr, int *num_parts, int this_part, unsigned char **extra_mem_ptr, int extra_mem_size) { struct mtd_partition *parts; unsigned long size; unsigned long offset = OFFSET_CONTINUOUS; char *name; int name_len; unsigned char *extra_mem; char delim; unsigned int mask_flags; /* fetch the partition size */ if (*s == '-') { /* assign all remaining space to this partition */ size = SIZE_REMAINING; s++; } else { size = memparse(s, &s); if (size < PAGE_SIZE) { printk(KERN_ERR ERRP "partition size too small (%lx)\n", size); return NULL; } } /* fetch partition name and flags */ mask_flags = 0; /* this is going to be a regular partition */ delim = 0; /* check for offset */ if (*s == '@') { s++; offset = memparse(s, &s); } /* now look for name */ if (*s == '(') { delim = ')'; } if (delim) { char *p; name = ++s; p = strchr(name, delim); if (!p) { printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim); return NULL; } name_len = p - name; s = p + 1; } else { name = NULL; name_len = 13; /* Partition_000 */ } /* record name length for memory allocation later */ extra_mem_size += name_len + 1; /* test for options */ if (strncmp(s, "ro", 2) == 0) { mask_flags |= MTD_WRITEABLE; s += 2; } /* if lk is found do NOT unlock the MTD partition*/ if (strncmp(s, "lk", 2) == 0) { mask_flags |= MTD_POWERUP_LOCK; s += 2; } /* test if more partitions are following */ if (*s == ',') { if (size == SIZE_REMAINING) { printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n"); return NULL; } /* more partitions follow, parse them */ parts = newpart(s + 1, &s, num_parts, this_part + 1, &extra_mem, extra_mem_size); if (!parts) return NULL; } else { /* this is the last partition: allocate space for all */ int alloc_size; *num_parts = this_part + 1; alloc_size = *num_parts * sizeof(struct mtd_partition) + extra_mem_size; parts = kzalloc(alloc_size, GFP_KERNEL); if (!parts) return NULL; extra_mem = (unsigned char *)(parts + *num_parts); } /* enter this partition (offset will be calculated later if it is zero at this point) */ parts[this_part].size = size; parts[this_part].offset = offset; parts[this_part].mask_flags = mask_flags; if (name) { strlcpy(extra_mem, name, name_len + 1); } else { sprintf(extra_mem, "Partition_%03d", this_part); } parts[this_part].name = extra_mem; extra_mem += name_len + 1; dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", this_part, parts[this_part].name, parts[this_part].offset, parts[this_part].size, parts[this_part].mask_flags)); /* return (updated) pointer to extra_mem memory */ if (extra_mem_ptr) *extra_mem_ptr = extra_mem; /* return (updated) pointer command line string */ *retptr = s; /* return partition table */ return parts; } /* * Parse the command line. */ static int mtdpart_setup_real(char *s) { cmdline_parsed = 1; for( ; s != NULL; ) { struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len; int num_parts; char *p, *mtd_id; mtd_id = s; /* fetch <mtd-id> */ if (!(p = strchr(s, ':'))) { printk(KERN_ERR ERRP "no mtd-id\n"); return 0; } mtd_id_len = p - mtd_id; dbg(("parsing <%s>\n", p+1)); /* * parse one mtd. have it reserve memory for the * struct cmdline_mtd_partition and the mtd-id string. */ parts = newpart(p + 1, /* cmdline */ &s, /* out: updated cmdline ptr */ &num_parts, /* out: number of parts */ 0, /* first partition */ (unsigned char**)&this_mtd, /* out: extra mem */ mtd_id_len + 1 + sizeof(*this_mtd) + sizeof(void*)-1 /*alignment*/); if(!parts) { /* * An error occurred. We're either: * a) out of memory, or * b) in the middle of the partition spec * Either way, this mtd is hosed and we're * unlikely to succeed in parsing any more */ return 0; } /* align this_mtd */ this_mtd = (struct cmdline_mtd_partition *) ALIGN((unsigned long)this_mtd, sizeof(void*)); /* enter results */ this_mtd->parts = parts; this_mtd->num_parts = num_parts; this_mtd->mtd_id = (char*)(this_mtd + 1); strlcpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1); /* link into chain */ this_mtd->next = partitions; partitions = this_mtd; dbg(("mtdid=<%s> num_parts=<%d>\n", this_mtd->mtd_id, this_mtd->num_parts)); /* EOS - we're done */ if (*s == 0) break; /* does another spec follow? */ if (*s != ';') { printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s); return 0; } s++; } return 1; } /* * Main function to be called from the MTD mapping driver/device to * obtain the partitioning information. At this point the command line * arguments will actually be parsed and turned to struct mtd_partition * information. It returns partitions for the requested mtd device, or * the first one in the chain if a NULL mtd_id is passed in. */ static int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { unsigned long offset; int i; struct cmdline_mtd_partition *part; const char *mtd_id = master->name; /* parse command line */ if (!cmdline_parsed) mtdpart_setup_real(cmdline); for(part = partitions; part; part = part->next) { if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) { for(i = 0, offset = 0; i < part->num_parts; i++) { if (part->parts[i].offset == OFFSET_CONTINUOUS) part->parts[i].offset = offset; else offset = part->parts[i].offset; if (part->parts[i].size == SIZE_REMAINING) part->parts[i].size = master->size - offset; if (offset + part->parts[i].size > master->size) { printk(KERN_WARNING ERRP "%s: partitioning exceeds flash size, truncating\n", part->mtd_id); part->parts[i].size = master->size - offset; part->num_parts = i; } offset += part->parts[i].size; } *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts, GFP_KERNEL); if (!*pparts) return -ENOMEM; return part->num_parts; } } return 0; } /* * This is the handler for our kernel parameter, called from * main.c::checksetup(). Note that we can not yet kmalloc() anything, * so we only save the commandline for later processing. * * This function needs to be visible for bootloaders. */ static int mtdpart_setup(char *s) { cmdline = s; return 1; } __setup("mtdparts=", mtdpart_setup); static struct mtd_part_parser cmdline_parser = { .owner = THIS_MODULE, .parse_fn = parse_cmdline_partitions, .name = "cmdlinepart", }; static int __init cmdline_parser_init(void) { return register_mtd_parser(&cmdline_parser); } module_init(cmdline_parser_init); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>"); MODULE_DESCRIPTION("Command line configuration of MTD partitions");
gpl-2.0
Stuxnet-Kernel/stuxnet_cancro
drivers/leds/leds-sunfire.c
5550
6137
/* leds-sunfire.c: SUNW,Ultra-Enterprise LED driver. * * Copyright (C) 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/leds.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/fhc.h> #include <asm/upa.h> #define DRIVER_NAME "leds-sunfire" #define PFX DRIVER_NAME ": " MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun Fire LED driver"); MODULE_LICENSE("GPL"); struct sunfire_led { struct led_classdev led_cdev; void __iomem *reg; }; #define to_sunfire_led(d) container_of(d, struct sunfire_led, led_cdev) static void __clockboard_set(struct led_classdev *led_cdev, enum led_brightness led_val, u8 bit) { struct sunfire_led *p = to_sunfire_led(led_cdev); u8 reg = upa_readb(p->reg); switch (bit) { case CLOCK_CTRL_LLED: if (led_val) reg &= ~bit; else reg |= bit; break; default: if (led_val) reg |= bit; else reg &= ~bit; break; } upa_writeb(reg, p->reg); } static void clockboard_left_set(struct led_classdev *led_cdev, enum led_brightness led_val) { __clockboard_set(led_cdev, led_val, CLOCK_CTRL_LLED); } static void clockboard_middle_set(struct led_classdev *led_cdev, enum led_brightness led_val) { __clockboard_set(led_cdev, led_val, CLOCK_CTRL_MLED); } static void clockboard_right_set(struct led_classdev *led_cdev, enum led_brightness led_val) { __clockboard_set(led_cdev, led_val, CLOCK_CTRL_RLED); } static void __fhc_set(struct led_classdev *led_cdev, enum led_brightness led_val, u32 bit) { struct sunfire_led *p = to_sunfire_led(led_cdev); u32 reg = upa_readl(p->reg); switch (bit) { case FHC_CONTROL_LLED: if (led_val) reg &= ~bit; else reg |= bit; break; default: if (led_val) reg |= bit; else reg &= ~bit; break; } upa_writel(reg, p->reg); } static void fhc_left_set(struct led_classdev *led_cdev, enum led_brightness led_val) { __fhc_set(led_cdev, led_val, FHC_CONTROL_LLED); } static void fhc_middle_set(struct led_classdev *led_cdev, enum led_brightness led_val) { __fhc_set(led_cdev, led_val, FHC_CONTROL_MLED); } static void fhc_right_set(struct led_classdev *led_cdev, enum led_brightness led_val) { __fhc_set(led_cdev, led_val, FHC_CONTROL_RLED); } typedef void (*set_handler)(struct led_classdev *, enum led_brightness); struct led_type { const char *name; set_handler handler; const char *default_trigger; }; #define NUM_LEDS_PER_BOARD 3 struct sunfire_drvdata { struct sunfire_led leds[NUM_LEDS_PER_BOARD]; }; static int __devinit sunfire_led_generic_probe(struct platform_device *pdev, struct led_type *types) { struct sunfire_drvdata *p; int i, err; if (pdev->num_resources != 1) { printk(KERN_ERR PFX "Wrong number of resources %d, should be 1\n", pdev->num_resources); err = -EINVAL; goto out; } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Could not allocate struct sunfire_drvdata\n"); err = -ENOMEM; goto out; } for (i = 0; i < NUM_LEDS_PER_BOARD; i++) { struct led_classdev *lp = &p->leds[i].led_cdev; p->leds[i].reg = (void __iomem *) pdev->resource[0].start; lp->name = types[i].name; lp->brightness = LED_FULL; lp->brightness_set = types[i].handler; lp->default_trigger = types[i].default_trigger; err = led_classdev_register(&pdev->dev, lp); if (err) { printk(KERN_ERR PFX "Could not register %s LED\n", lp->name); goto out_unregister_led_cdevs; } } dev_set_drvdata(&pdev->dev, p); return 0; out_unregister_led_cdevs: for (i--; i >= 0; i--) led_classdev_unregister(&p->leds[i].led_cdev); kfree(p); out: return err; } static int __devexit sunfire_led_generic_remove(struct platform_device *pdev) { struct sunfire_drvdata *p = dev_get_drvdata(&pdev->dev); int i; for (i = 0; i < NUM_LEDS_PER_BOARD; i++) led_classdev_unregister(&p->leds[i].led_cdev); kfree(p); return 0; } static struct led_type clockboard_led_types[NUM_LEDS_PER_BOARD] = { { .name = "clockboard-left", .handler = clockboard_left_set, }, { .name = "clockboard-middle", .handler = clockboard_middle_set, }, { .name = "clockboard-right", .handler = clockboard_right_set, .default_trigger= "heartbeat", }, }; static int __devinit sunfire_clockboard_led_probe(struct platform_device *pdev) { return sunfire_led_generic_probe(pdev, clockboard_led_types); } static struct led_type fhc_led_types[NUM_LEDS_PER_BOARD] = { { .name = "fhc-left", .handler = fhc_left_set, }, { .name = "fhc-middle", .handler = fhc_middle_set, }, { .name = "fhc-right", .handler = fhc_right_set, .default_trigger= "heartbeat", }, }; static int __devinit sunfire_fhc_led_probe(struct platform_device *pdev) { return sunfire_led_generic_probe(pdev, fhc_led_types); } MODULE_ALIAS("platform:sunfire-clockboard-leds"); MODULE_ALIAS("platform:sunfire-fhc-leds"); static struct platform_driver sunfire_clockboard_led_driver = { .probe = sunfire_clockboard_led_probe, .remove = __devexit_p(sunfire_led_generic_remove), .driver = { .name = "sunfire-clockboard-leds", .owner = THIS_MODULE, }, }; static struct platform_driver sunfire_fhc_led_driver = { .probe = sunfire_fhc_led_probe, .remove = __devexit_p(sunfire_led_generic_remove), .driver = { .name = "sunfire-fhc-leds", .owner = THIS_MODULE, }, }; static int __init sunfire_leds_init(void) { int err = platform_driver_register(&sunfire_clockboard_led_driver); if (err) { printk(KERN_ERR PFX "Could not register clock board LED driver\n"); return err; } err = platform_driver_register(&sunfire_fhc_led_driver); if (err) { printk(KERN_ERR PFX "Could not register FHC LED driver\n"); platform_driver_unregister(&sunfire_clockboard_led_driver); } return err; } static void __exit sunfire_leds_exit(void) { platform_driver_unregister(&sunfire_clockboard_led_driver); platform_driver_unregister(&sunfire_fhc_led_driver); } module_init(sunfire_leds_init); module_exit(sunfire_leds_exit);
gpl-2.0
binkybear/android_kernel_google_msm
arch/alpha/mm/extable.c
13230
2387
/* * linux/arch/alpha/mm/extable.c */ #include <linux/module.h> #include <linux/sort.h> #include <asm/uaccess.h> static inline unsigned long ex_to_addr(const struct exception_table_entry *x) { return (unsigned long)&x->insn + x->insn; } static void swap_ex(void *a, void *b, int size) { struct exception_table_entry *ex_a = a, *ex_b = b; unsigned long addr_a = ex_to_addr(ex_a), addr_b = ex_to_addr(ex_b); unsigned int t = ex_a->fixup.unit; ex_a->fixup.unit = ex_b->fixup.unit; ex_b->fixup.unit = t; ex_a->insn = (int)(addr_b - (unsigned long)&ex_a->insn); ex_b->insn = (int)(addr_a - (unsigned long)&ex_b->insn); } /* * The exception table needs to be sorted so that the binary * search that we use to find entries in it works properly. * This is used both for the kernel exception table and for * the exception tables of modules that get loaded. */ static int cmp_ex(const void *a, const void *b) { const struct exception_table_entry *x = a, *y = b; /* avoid overflow */ if (ex_to_addr(x) > ex_to_addr(y)) return 1; if (ex_to_addr(x) < ex_to_addr(y)) return -1; return 0; } void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish) { sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, swap_ex); } #ifdef CONFIG_MODULES /* * Any entry referring to the module init will be at the beginning or * the end. */ void trim_init_extable(struct module *m) { /*trim the beginning*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[0]), m)) { m->extable++; m->num_exentries--; } /*trim the end*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]), m)) m->num_exentries--; } #endif /* CONFIG_MODULES */ const struct exception_table_entry * search_extable(const struct exception_table_entry *first, const struct exception_table_entry *last, unsigned long value) { while (first <= last) { const struct exception_table_entry *mid; unsigned long mid_value; mid = (last - first) / 2 + first; mid_value = ex_to_addr(mid); if (mid_value == value) return mid; else if (mid_value < value) first = mid+1; else last = mid-1; } return NULL; }
gpl-2.0
Gandi/ktrill
net/bridge/br_netlink.c
175
23740
/* * Bridge netlink control interface * * Authors: * Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/etherdevice.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> #include <net/sock.h> #include <uapi/linux/if_bridge.h> #include "br_private.h" #include "br_private_stp.h" static int br_get_num_vlan_infos(const struct net_port_vlans *pv, u32 filter_mask) { u16 vid_range_start = 0, vid_range_end = 0; u16 vid_range_flags = 0; u16 pvid, vid, flags; int num_vlans = 0; if (filter_mask & RTEXT_FILTER_BRVLAN) return pv->num_vlans; if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) return 0; /* Count number of vlan info's */ pvid = br_get_pvid(pv); for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { flags = 0; if (vid == pvid) flags |= BRIDGE_VLAN_INFO_PVID; if (test_bit(vid, pv->untagged_bitmap)) flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (vid_range_start == 0) { goto initvars; } else if ((vid - vid_range_end) == 1 && flags == vid_range_flags) { vid_range_end = vid; continue; } else { if ((vid_range_end - vid_range_start) > 0) num_vlans += 2; else num_vlans += 1; } initvars: vid_range_start = vid; vid_range_end = vid; vid_range_flags = flags; } if (vid_range_start != 0) { if ((vid_range_end - vid_range_start) > 0) num_vlans += 2; else num_vlans += 1; } return num_vlans; } static size_t br_get_link_af_size_filtered(const struct net_device *dev, u32 filter_mask) { struct net_port_vlans *pv; int num_vlan_infos; rcu_read_lock(); if (br_port_exists(dev)) pv = nbp_get_vlan_info(br_port_get_rcu(dev)); else if (dev->priv_flags & IFF_EBRIDGE) pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev)); else pv = NULL; if (pv) num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask); else num_vlan_infos = 0; rcu_read_unlock(); if (!num_vlan_infos) return 0; /* Each VLAN is returned in bridge_vlan_info along with flags */ return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); } static inline size_t br_port_info_size(void) { return nla_total_size(1) /* IFLA_BRPORT_STATE */ + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ + nla_total_size(4) /* IFLA_BRPORT_COST */ + nla_total_size(1) /* IFLA_BRPORT_MODE */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + 0; } static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ + nla_total_size(br_get_link_af_size_filtered(dev, filter_mask)); /* IFLA_AF_SPEC */ } static int br_port_fill_attrs(struct sk_buff *skb, const struct net_bridge_port *p) { u8 mode = !!(p->flags & BR_HAIRPIN_MODE); if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, !!(p->flags & BR_PROXYARP_WIFI))) return -EMSGSIZE; return 0; } static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, u16 vid_end, u16 flags) { struct bridge_vlan_info vinfo; if ((vid_end - vid_start) > 0) { /* add range to skb */ vinfo.vid = vid_start; vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; vinfo.vid = vid_end; vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } else { vinfo.vid = vid_start; vinfo.flags = flags; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, const struct net_port_vlans *pv) { u16 vid_range_start = 0, vid_range_end = 0; u16 vid_range_flags = 0; u16 pvid, vid, flags; int err = 0; /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan * and mark vlan info with begin and end flags * if vlaninfo represents a range */ pvid = br_get_pvid(pv); for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { flags = 0; if (vid == pvid) flags |= BRIDGE_VLAN_INFO_PVID; if (test_bit(vid, pv->untagged_bitmap)) flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (vid_range_start == 0) { goto initvars; } else if ((vid - vid_range_end) == 1 && flags == vid_range_flags) { vid_range_end = vid; continue; } else { err = br_fill_ifvlaninfo_range(skb, vid_range_start, vid_range_end, vid_range_flags); if (err) return err; } initvars: vid_range_start = vid; vid_range_end = vid; vid_range_flags = flags; } if (vid_range_start != 0) { /* Call it once more to send any left over vlans */ err = br_fill_ifvlaninfo_range(skb, vid_range_start, vid_range_end, vid_range_flags); if (err) return err; } return 0; } static int br_fill_ifvlaninfo(struct sk_buff *skb, const struct net_port_vlans *pv) { struct bridge_vlan_info vinfo; u16 pvid, vid; pvid = br_get_pvid(pv); for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { vinfo.vid = vid; vinfo.flags = 0; if (vid == pvid) vinfo.flags |= BRIDGE_VLAN_INFO_PVID; if (test_bit(vid, pv->untagged_bitmap)) vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } /* * Create one netlink message for one interface * Contains port and master info as well as carrier and bridge state. */ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, u32 pid, u32 seq, int event, unsigned int flags, u32 filter_mask, const struct net_device *dev) { const struct net_bridge *br; struct ifinfomsg *hdr; struct nlmsghdr *nlh; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; if (port) br = port->br; else br = netdev_priv(dev); br_debug(br, "br_fill_info event %d port %s master %s\n", event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); if (nlh == NULL) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ifi_family = AF_BRIDGE; hdr->__ifi_pad = 0; hdr->ifi_type = dev->type; hdr->ifi_index = dev->ifindex; hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u8(skb, IFLA_OPERSTATE, operstate) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || (dev->ifindex != dev_get_iflink(dev) && nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) goto nla_put_failure; if (event == RTM_NEWLINK && port) { struct nlattr *nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); if (nest == NULL || br_port_fill_attrs(skb, port) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } /* Check if the VID information is requested */ if ((filter_mask & RTEXT_FILTER_BRVLAN) || (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { const struct net_port_vlans *pv; struct nlattr *af; int err; if (port) pv = nbp_get_vlan_info(port); else pv = br_get_vlan_info(br); if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) goto done; af = nla_nest_start(skb, IFLA_AF_SPEC); if (!af) goto nla_put_failure; if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) err = br_fill_ifvlaninfo_compressed(skb, pv); else err = br_fill_ifvlaninfo(skb, pv); if (err) goto nla_put_failure; nla_nest_end(skb, af); } done: nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } /* * Notify listeners of a change in port information */ void br_ifinfo_notify(int event, struct net_bridge_port *port) { struct net *net; struct sk_buff *skb; int err = -ENOBUFS; u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; if (!port) return; net = dev_net(port->dev); br_debug(port->br, "port %u(%s) event %d\n", (unsigned int)port->port_no, port->dev->name, event); skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC); if (skb == NULL) goto errout; err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev); if (err < 0) { /* -EMSGSIZE implies BUG in br_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); return; errout: rtnl_set_sk_err(net, RTNLGRP_LINK, err); } /* * Dump information about all ports, in response to GETLINK */ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) { struct net_bridge_port *port = br_port_get_rtnl(dev); if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) return 0; return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, filter_mask, dev); } static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, int cmd, struct bridge_vlan_info *vinfo) { int err = 0; switch (cmd) { case RTM_SETLINK: if (p) { err = nbp_vlan_add(p, vinfo->vid, vinfo->flags); if (err) break; if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER) err = br_vlan_add(p->br, vinfo->vid, vinfo->flags); } else { err = br_vlan_add(br, vinfo->vid, vinfo->flags); } break; case RTM_DELLINK: if (p) { nbp_vlan_delete(p, vinfo->vid); if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER) br_vlan_delete(p->br, vinfo->vid); } else { br_vlan_delete(br, vinfo->vid); } break; } return err; } static int br_afspec(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *af_spec, int cmd) { struct bridge_vlan_info *vinfo_start = NULL; struct bridge_vlan_info *vinfo = NULL; struct nlattr *attr; int err = 0; int rem; nla_for_each_nested(attr, af_spec, rem) { if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) continue; if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; vinfo = nla_data(attr); if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) return -EINVAL; if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { if (vinfo_start) return -EINVAL; vinfo_start = vinfo; continue; } if (vinfo_start) { struct bridge_vlan_info tmp_vinfo; int v; if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END)) return -EINVAL; if (vinfo->vid <= vinfo_start->vid) return -EINVAL; memcpy(&tmp_vinfo, vinfo_start, sizeof(struct bridge_vlan_info)); for (v = vinfo_start->vid; v <= vinfo->vid; v++) { tmp_vinfo.vid = v; err = br_vlan_info(br, p, cmd, &tmp_vinfo); if (err) break; } vinfo_start = NULL; } else { err = br_vlan_info(br, p, cmd, vinfo); } if (err) break; } return err; } static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, [IFLA_BRPORT_COST] = { .type = NLA_U32 }, [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ static int br_set_port_state(struct net_bridge_port *p, u8 state) { if (state > BR_STATE_BLOCKING) return -EINVAL; /* if kernel STP is running, don't allow changes */ if (p->br->stp_enabled == BR_KERNEL_STP) return -EBUSY; /* if device is not up, change is not allowed * if link is not present, only allowable state is disabled */ if (!netif_running(p->dev) || (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) return -ENETDOWN; br_set_state(p, state); br_log_state(p); br_port_state_selection(p->br); return 0; } /* Set/clear or port flags based on attribute */ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], int attrtype, unsigned long mask) { if (tb[attrtype]) { u8 flag = nla_get_u8(tb[attrtype]); if (flag) p->flags |= mask; else p->flags &= ~mask; } } /* Process bridge protocol info on port */ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) { int err; unsigned long old_flags = p->flags; br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); if (err) return err; } if (tb[IFLA_BRPORT_PRIORITY]) { err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); if (err) return err; } if (tb[IFLA_BRPORT_STATE]) { err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); if (err) return err; } br_port_flags_change(p, old_flags ^ p->flags); return 0; } /* Change state and parameters on port. */ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct nlattr *protinfo; struct nlattr *afspec; struct net_bridge_port *p; struct nlattr *tb[IFLA_BRPORT_MAX + 1]; int err = 0; protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!protinfo && !afspec) return 0; p = br_port_get_rtnl(dev); /* We want to accept dev as bridge itself if the AF_SPEC * is set to see if someone is setting vlan info on the bridge */ if (!p && !afspec) return -EINVAL; if (p && protinfo) { if (protinfo->nla_type & NLA_F_NESTED) { err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, br_port_policy); if (err) return err; spin_lock_bh(&p->br->lock); err = br_setport(p, tb); spin_unlock_bh(&p->br->lock); } else { /* Binary compatibility with old RSTP */ if (nla_len(protinfo) < sizeof(u8)) return -EINVAL; spin_lock_bh(&p->br->lock); err = br_set_port_state(p, nla_get_u8(protinfo)); spin_unlock_bh(&p->br->lock); } if (err) goto out; } if (afspec) { err = br_afspec((struct net_bridge *)netdev_priv(dev), p, afspec, RTM_SETLINK); } if (err == 0) br_ifinfo_notify(RTM_NEWLINK, p); out: return err; } /* Delete port information */ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct nlattr *afspec; struct net_bridge_port *p; int err = 0; afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!afspec) return 0; p = br_port_get_rtnl(dev); /* We want to accept dev as bridge itself as well */ if (!p && !(dev->priv_flags & IFF_EBRIDGE)) return -EINVAL; err = br_afspec((struct net_bridge *)netdev_priv(dev), p, afspec, RTM_DELLINK); if (err == 0) /* Send RTM_NEWLINK because userspace * expects RTM_NEWLINK for vlan dels */ br_ifinfo_notify(RTM_NEWLINK, p); return err; } static int br_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (!data) return 0; #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (data[IFLA_BR_VLAN_PROTOCOL]) { switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) { case htons(ETH_P_8021Q): case htons(ETH_P_8021AD): break; default: return -EPROTONOSUPPORT; } } #endif return 0; } static int br_dev_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_bridge *br = netdev_priv(dev); if (tb[IFLA_ADDRESS]) { spin_lock_bh(&br->lock); br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); spin_unlock_bh(&br->lock); } return register_netdevice(dev); } static int br_port_slave_changelink(struct net_device *brdev, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_bridge *br = netdev_priv(brdev); int ret; if (!data) return 0; spin_lock_bh(&br->lock); ret = br_setport(br_port_get_rtnl(dev), data); spin_unlock_bh(&br->lock); return ret; } static int br_port_fill_slave_info(struct sk_buff *skb, const struct net_device *brdev, const struct net_device *dev) { return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); } static size_t br_port_get_slave_size(const struct net_device *brdev, const struct net_device *dev) { return br_port_info_size(); } static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], struct nlattr *data[]) { struct net_bridge *br = netdev_priv(brdev); int err; if (!data) return 0; if (data[IFLA_BR_FORWARD_DELAY]) { err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); if (err) return err; } if (data[IFLA_BR_HELLO_TIME]) { err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); if (err) return err; } if (data[IFLA_BR_MAX_AGE]) { err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); if (err) return err; } if (data[IFLA_BR_AGEING_TIME]) { u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]); br->ageing_time = clock_t_to_jiffies(ageing_time); } if (data[IFLA_BR_STP_STATE]) { u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); br_stp_set_enabled(br, stp_enabled); } if (data[IFLA_BR_PRIORITY]) { u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); br_stp_set_bridge_priority(br, priority); } if (data[IFLA_BR_VLAN_FILTERING]) { u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); err = __br_vlan_filter_toggle(br, vlan_filter); if (err) return err; } #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (data[IFLA_BR_VLAN_PROTOCOL]) { __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); err = __br_vlan_set_proto(br, vlan_proto); if (err) return err; } #endif return 0; } static size_t br_get_size(const struct net_device *brdev) { return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ #ifdef CONFIG_BRIDGE_VLAN_FILTERING nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ #endif 0; } static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) { struct net_bridge *br = netdev_priv(brdev); u32 forward_delay = jiffies_to_clock_t(br->forward_delay); u32 hello_time = jiffies_to_clock_t(br->hello_time); u32 age_time = jiffies_to_clock_t(br->max_age); u32 ageing_time = jiffies_to_clock_t(br->ageing_time); u32 stp_enabled = br->stp_enabled; u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; u8 vlan_enabled = br_vlan_enabled(br); if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled)) return -EMSGSIZE; #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto)) return -EMSGSIZE; #endif return 0; } static size_t br_get_link_af_size(const struct net_device *dev) { struct net_port_vlans *pv; if (br_port_exists(dev)) pv = nbp_get_vlan_info(br_port_get_rtnl(dev)); else if (dev->priv_flags & IFF_EBRIDGE) pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev)); else return 0; if (!pv) return 0; /* Each VLAN is returned in bridge_vlan_info along with flags */ return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info)); } static struct rtnl_af_ops br_af_ops __read_mostly = { .family = AF_BRIDGE, .get_link_af_size = br_get_link_af_size, }; struct rtnl_link_ops br_link_ops __read_mostly = { .kind = "bridge", .priv_size = sizeof(struct net_bridge), .setup = br_dev_setup, .maxtype = IFLA_BR_MAX, .policy = br_policy, .validate = br_validate, .newlink = br_dev_newlink, .changelink = br_changelink, .dellink = br_dev_delete, .get_size = br_get_size, .fill_info = br_fill_info, .slave_maxtype = IFLA_BRPORT_MAX, .slave_policy = br_port_policy, .slave_changelink = br_port_slave_changelink, .get_slave_size = br_port_get_slave_size, .fill_slave_info = br_port_fill_slave_info, }; int __init br_netlink_init(void) { int err; br_mdb_init(); rtnl_af_register(&br_af_ops); err = rtnl_link_register(&br_link_ops); if (err) goto out_af; return 0; out_af: rtnl_af_unregister(&br_af_ops); br_mdb_uninit(); return err; } void br_netlink_fini(void) { br_mdb_uninit(); rtnl_af_unregister(&br_af_ops); rtnl_link_unregister(&br_link_ops); }
gpl-2.0
volk3/CS736
drivers/pinctrl/berlin/berlin-bg2cd.c
431
7698
/* * Marvell Berlin BG2CD pinctrl driver. * * Copyright (C) 2014 Marvell Technology Group Ltd. * * Antoine Ténart <antoine.tenart@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "berlin.h" static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = { /* G */ BERLIN_PINCTRL_GROUP("G0", 0x00, 0x1, 0x00, BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), BERLIN_PINCTRL_FUNCTION(0x2, "led"), BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), BERLIN_PINCTRL_GROUP("G1", 0x00, 0x2, 0x01, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), BERLIN_PINCTRL_GROUP("G2", 0x00, 0x2, 0x02, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), BERLIN_PINCTRL_FUNCTION(0x2, "fe"), BERLIN_PINCTRL_FUNCTION(0x3, "pll"), BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), BERLIN_PINCTRL_GROUP("G3", 0x00, 0x2, 0x04, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), BERLIN_PINCTRL_FUNCTION(0x2, "twsi2"), BERLIN_PINCTRL_FUNCTION(0x3, "pll"), BERLIN_PINCTRL_FUNCTION(0x4, "fe"), BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), BERLIN_PINCTRL_GROUP("G4", 0x00, 0x2, 0x06, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"), BERLIN_PINCTRL_FUNCTION(0x3, "pll"), BERLIN_PINCTRL_FUNCTION(0x4, "pwm"), BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), BERLIN_PINCTRL_GROUP("G5", 0x00, 0x3, 0x08, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"), BERLIN_PINCTRL_FUNCTION(0x3, "arc"), BERLIN_PINCTRL_FUNCTION(0x4, "pwm"), BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), BERLIN_PINCTRL_GROUP("G6", 0x00, 0x2, 0x0b, BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RX/TX */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), BERLIN_PINCTRL_GROUP("G7", 0x00, 0x3, 0x0d, BERLIN_PINCTRL_FUNCTION(0x0, "eddc"), BERLIN_PINCTRL_FUNCTION(0x1, "twsi1"), BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), BERLIN_PINCTRL_GROUP("G8", 0x00, 0x3, 0x10, BERLIN_PINCTRL_FUNCTION(0x0, "ss0"), BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), BERLIN_PINCTRL_GROUP("G9", 0x00, 0x3, 0x13, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), BERLIN_PINCTRL_FUNCTION(0x2, "twsi0")), BERLIN_PINCTRL_GROUP("G10", 0x00, 0x2, 0x16, BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), BERLIN_PINCTRL_GROUP("G11", 0x00, 0x2, 0x18, BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), BERLIN_PINCTRL_GROUP("G12", 0x00, 0x3, 0x1a, BERLIN_PINCTRL_FUNCTION(0x0, "usb1"), BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), BERLIN_PINCTRL_GROUP("G13", 0x04, 0x3, 0x00, BERLIN_PINCTRL_FUNCTION(0x0, "nand"), BERLIN_PINCTRL_FUNCTION(0x1, "usb0_dbg"), BERLIN_PINCTRL_FUNCTION(0x2, "usb1_dbg")), BERLIN_PINCTRL_GROUP("G14", 0x04, 0x1, 0x03, BERLIN_PINCTRL_FUNCTION(0x0, "nand"), BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), BERLIN_PINCTRL_GROUP("G15", 0x04, 0x2, 0x04, BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), BERLIN_PINCTRL_GROUP("G16", 0x04, 0x3, 0x06, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G17", 0x04, 0x3, 0x09, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G18", 0x04, 0x1, 0x0c, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G19", 0x04, 0x1, 0x0d, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G20", 0x04, 0x1, 0x0e, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G21", 0x04, 0x3, 0x0f, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G22", 0x04, 0x3, 0x12, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G23", 0x04, 0x3, 0x15, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G24", 0x04, 0x2, 0x18, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G25", 0x04, 0x2, 0x1a, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G26", 0x04, 0x1, 0x1c, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G27", 0x04, 0x1, 0x1d, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("G28", 0x04, 0x2, 0x1e, BERLIN_PINCTRL_FUNCTION_UNKNOWN), }; static const struct berlin_desc_group berlin2cd_sysmgr_pinctrl_groups[] = { /* GSM */ BERLIN_PINCTRL_GROUP("GSM0", 0x40, 0x2, 0x00, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM1", 0x40, 0x2, 0x02, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM2", 0x40, 0x2, 0x04, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM3", 0x40, 0x2, 0x06, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM4", 0x40, 0x2, 0x08, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM5", 0x40, 0x2, 0x0a, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM6", 0x40, 0x2, 0x0c, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM7", 0x40, 0x1, 0x0e, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM8", 0x40, 0x1, 0x0f, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM9", 0x40, 0x1, 0x10, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM10", 0x40, 0x1, 0x11, BERLIN_PINCTRL_FUNCTION_UNKNOWN), BERLIN_PINCTRL_GROUP("GSM11", 0x40, 0x1, 0x12, BERLIN_PINCTRL_FUNCTION_UNKNOWN), }; static const struct berlin_pinctrl_desc berlin2cd_soc_pinctrl_data = { .groups = berlin2cd_soc_pinctrl_groups, .ngroups = ARRAY_SIZE(berlin2cd_soc_pinctrl_groups), }; static const struct berlin_pinctrl_desc berlin2cd_sysmgr_pinctrl_data = { .groups = berlin2cd_sysmgr_pinctrl_groups, .ngroups = ARRAY_SIZE(berlin2cd_sysmgr_pinctrl_groups), }; static const struct of_device_id berlin2cd_pinctrl_match[] = { { .compatible = "marvell,berlin2cd-chip-ctrl", .data = &berlin2cd_soc_pinctrl_data }, { .compatible = "marvell,berlin2cd-system-ctrl", .data = &berlin2cd_sysmgr_pinctrl_data }, {} }; MODULE_DEVICE_TABLE(of, berlin2cd_pinctrl_match); static int berlin2cd_pinctrl_probe(struct platform_device *pdev) { const struct of_device_id *match = of_match_device(berlin2cd_pinctrl_match, &pdev->dev); struct regmap_config *rmconfig; struct regmap *regmap; struct resource *res; void __iomem *base; rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL); if (!rmconfig) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); rmconfig->reg_bits = 32, rmconfig->val_bits = 32, rmconfig->reg_stride = 4, rmconfig->max_register = resource_size(res); regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig); if (IS_ERR(regmap)) return PTR_ERR(regmap); return berlin_pinctrl_probe(pdev, match->data); } static struct platform_driver berlin2cd_pinctrl_driver = { .probe = berlin2cd_pinctrl_probe, .driver = { .name = "berlin-bg2cd-pinctrl", .of_match_table = berlin2cd_pinctrl_match, }, }; module_platform_driver(berlin2cd_pinctrl_driver); MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>"); MODULE_DESCRIPTION("Marvell Berlin BG2CD pinctrl driver"); MODULE_LICENSE("GPL");
gpl-2.0
G5Devs/android_kernel_lge_msm8996
drivers/scsi/fcoe/fcoe_transport.c
943
27175
/* * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/errno.h> #include <linux/crc32.h> #include <scsi/libfcoe.h> #include "libfcoe.h" MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); MODULE_LICENSE("GPL v2"); static int fcoe_transport_create(const char *, struct kernel_param *); static int fcoe_transport_destroy(const char *, struct kernel_param *); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); static int fcoe_transport_enable(const char *, struct kernel_param *); static int fcoe_transport_disable(const char *, struct kernel_param *); static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr); static LIST_HEAD(fcoe_transports); static DEFINE_MUTEX(ft_mutex); static LIST_HEAD(fcoe_netdevs); static DEFINE_MUTEX(fn_mutex); unsigned int libfcoe_debug_logging; module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR); __MODULE_PARM_TYPE(show, "string"); MODULE_PARM_DESC(show, " Show attached FCoE transports"); module_param_call(create, fcoe_transport_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR); __MODULE_PARM_TYPE(create, "string"); MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); module_param_call(create_vn2vn, fcoe_transport_create, NULL, (void *)FIP_MODE_VN2VN, S_IWUSR); __MODULE_PARM_TYPE(create_vn2vn, "string"); MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " "on an Ethernet interface"); module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(destroy, "string"); MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(enable, "string"); MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); __MODULE_PARM_TYPE(disable, "string"); MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); /* notification function for packets from net device */ static struct notifier_block libfcoe_notifier = { .notifier_call = libfcoe_device_notification, }; /** * fcoe_link_speed_update() - Update the supported and actual link speeds * @lport: The local port to update speeds for * * Returns: 0 if the ethtool query was successful * -1 if the ethtool query failed */ int fcoe_link_speed_update(struct fc_lport *lport) { struct net_device *netdev = fcoe_get_netdev(lport); struct ethtool_cmd ecmd; if (!__ethtool_get_settings(netdev, &ecmd)) { lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT | FC_PORTSPEED_20GBIT | FC_PORTSPEED_40GBIT); if (ecmd.supported & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; if (ecmd.supported & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full | SUPPORTED_10000baseR_FEC)) lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full)) lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; if (ecmd.supported & (SUPPORTED_40000baseKR4_Full | SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseSR4_Full | SUPPORTED_40000baseLR4_Full)) lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; switch (ethtool_cmd_speed(&ecmd)) { case SPEED_1000: lport->link_speed = FC_PORTSPEED_1GBIT; break; case SPEED_10000: lport->link_speed = FC_PORTSPEED_10GBIT; break; case 20000: lport->link_speed = FC_PORTSPEED_20GBIT; break; case 40000: lport->link_speed = FC_PORTSPEED_40GBIT; break; default: lport->link_speed = FC_PORTSPEED_UNKNOWN; break; } return 0; } return -1; } EXPORT_SYMBOL_GPL(fcoe_link_speed_update); /** * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport * @lport: The local port to update speeds for * @fc_lesb: Pointer to the LESB to be filled up * @netdev: Pointer to the netdev that is associated with the lport * * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6 * Clause 7.11 in v1.04. */ void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb, struct net_device *netdev) { unsigned int cpu; u32 lfc, vlfc, mdac; struct fc_stats *stats; struct fcoe_fc_els_lesb *lesb; struct rtnl_link_stats64 temp; lfc = 0; vlfc = 0; mdac = 0; lesb = (struct fcoe_fc_els_lesb *)fc_lesb; memset(lesb, 0, sizeof(*lesb)); for_each_possible_cpu(cpu) { stats = per_cpu_ptr(lport->stats, cpu); lfc += stats->LinkFailureCount; vlfc += stats->VLinkFailureCount; mdac += stats->MissDiscAdvCount; } lesb->lesb_link_fail = htonl(lfc); lesb->lesb_vlink_fail = htonl(vlfc); lesb->lesb_miss_fka = htonl(mdac); lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); } EXPORT_SYMBOL_GPL(__fcoe_get_lesb); /** * fcoe_get_lesb() - Fill the FCoE Link Error Status Block * @lport: the local port * @fc_lesb: the link error status block */ void fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb) { struct net_device *netdev = fcoe_get_netdev(lport); __fcoe_get_lesb(lport, fc_lesb, netdev); } EXPORT_SYMBOL_GPL(fcoe_get_lesb); /** * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given * fcoe controller device * @ctlr_dev: The given fcoe controller device * */ void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) { struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); struct net_device *netdev = fcoe_get_netdev(fip->lp); struct fc_els_lesb *fc_lesb; fc_lesb = (struct fc_els_lesb *)(&ctlr_dev->lesb); __fcoe_get_lesb(fip->lp, fc_lesb, netdev); } EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb); void fcoe_wwn_to_str(u64 wwn, char *buf, int len) { u8 wwpn[8]; u64_to_wwn(wwn, wwpn); snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x", wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], wwpn[7]); } EXPORT_SYMBOL_GPL(fcoe_wwn_to_str); /** * fcoe_validate_vport_create() - Validate a vport before creating it * @vport: NPIV port to be created * * This routine is meant to add validation for a vport before creating it * via fcoe_vport_create(). * Current validations are: * - WWPN supplied is unique for given lport */ int fcoe_validate_vport_create(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port; int rc = 0; char buf[32]; mutex_lock(&n_port->lp_mutex); fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); /* Check if the wwpn is not same as that of the lport */ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the " "base port WWPN\n", buf); rc = -EINVAL; goto out; } /* Check if there is any existing vport with same wwpn */ list_for_each_entry(vn_port, &n_port->vports, list) { if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s " "already exists\n", buf); rc = -EINVAL; break; } } out: mutex_unlock(&n_port->lp_mutex); return rc; } EXPORT_SYMBOL_GPL(fcoe_validate_vport_create); /** * fcoe_get_wwn() - Get the world wide name from LLD if it supports it * @netdev: the associated net device * @wwn: the output WWN * @type: the type of WWN (WWPN or WWNN) * * Returns: 0 for success */ int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) { const struct net_device_ops *ops = netdev->netdev_ops; if (ops->ndo_fcoe_get_wwn) return ops->ndo_fcoe_get_wwn(netdev, wwn, type); return -EINVAL; } EXPORT_SYMBOL_GPL(fcoe_get_wwn); /** * fcoe_fc_crc() - Calculates the CRC for a given frame * @fp: The frame to be checksumed * * This uses crc32() routine to calculate the CRC for a frame * * Return: The 32 bit CRC value */ u32 fcoe_fc_crc(struct fc_frame *fp) { struct sk_buff *skb = fp_skb(fp); struct skb_frag_struct *frag; unsigned char *data; unsigned long off, len, clen; u32 crc; unsigned i; crc = crc32(~0, skb->data, skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; off = frag->page_offset; len = skb_frag_size(frag); while (len > 0) { clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); data = kmap_atomic( skb_frag_page(frag) + (off >> PAGE_SHIFT)); crc = crc32(crc, data + (off & ~PAGE_MASK), clen); kunmap_atomic(data); off += clen; len -= clen; } } return crc; } EXPORT_SYMBOL_GPL(fcoe_fc_crc); /** * fcoe_start_io() - Start FCoE I/O * @skb: The packet to be transmitted * * This routine is called from the net device to start transmitting * FCoE packets. * * Returns: 0 for success */ int fcoe_start_io(struct sk_buff *skb) { struct sk_buff *nskb; int rc; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; rc = dev_queue_xmit(nskb); if (rc != 0) return rc; kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(fcoe_start_io); /** * fcoe_clean_pending_queue() - Dequeue a skb and free it * @lport: The local port to dequeue a skb on */ void fcoe_clean_pending_queue(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct sk_buff *skb; spin_lock_bh(&port->fcoe_pending_queue.lock); while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { spin_unlock_bh(&port->fcoe_pending_queue.lock); kfree_skb(skb); spin_lock_bh(&port->fcoe_pending_queue.lock); } spin_unlock_bh(&port->fcoe_pending_queue.lock); } EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); /** * fcoe_check_wait_queue() - Attempt to clear the transmit backlog * @lport: The local port whose backlog is to be cleared * * This empties the wait_queue, dequeues the head of the wait_queue queue * and calls fcoe_start_io() for each packet. If all skb have been * transmitted it returns the qlen. If an error occurs it restores * wait_queue (to try again later) and returns -1. * * The wait_queue is used when the skb transmit fails. The failed skb * will go in the wait_queue which will be emptied by the timer function or * by the next skb transmit. */ void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) { struct fcoe_port *port = lport_priv(lport); int rc; spin_lock_bh(&port->fcoe_pending_queue.lock); if (skb) __skb_queue_tail(&port->fcoe_pending_queue, skb); if (port->fcoe_pending_queue_active) goto out; port->fcoe_pending_queue_active = 1; while (port->fcoe_pending_queue.qlen) { /* keep qlen > 0 until fcoe_start_io succeeds */ port->fcoe_pending_queue.qlen++; skb = __skb_dequeue(&port->fcoe_pending_queue); spin_unlock_bh(&port->fcoe_pending_queue.lock); rc = fcoe_start_io(skb); spin_lock_bh(&port->fcoe_pending_queue.lock); if (rc) { __skb_queue_head(&port->fcoe_pending_queue, skb); /* undo temporary increment above */ port->fcoe_pending_queue.qlen--; break; } /* undo temporary increment above */ port->fcoe_pending_queue.qlen--; } if (port->fcoe_pending_queue.qlen < port->min_queue_depth) lport->qfull = 0; if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) mod_timer(&port->timer, jiffies + 2); port->fcoe_pending_queue_active = 0; out: if (port->fcoe_pending_queue.qlen > port->max_queue_depth) lport->qfull = 1; spin_unlock_bh(&port->fcoe_pending_queue.lock); } EXPORT_SYMBOL_GPL(fcoe_check_wait_queue); /** * fcoe_queue_timer() - The fcoe queue timer * @lport: The local port * * Calls fcoe_check_wait_queue on timeout */ void fcoe_queue_timer(ulong lport) { fcoe_check_wait_queue((struct fc_lport *)lport, NULL); } EXPORT_SYMBOL_GPL(fcoe_queue_timer); /** * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC * @skb: The packet to be transmitted * @tlen: The total length of the trailer * @fps: The fcoe context * * This routine allocates a page for frame trailers. The page is re-used if * there is enough room left on it for the current trailer. If there isn't * enough buffer left a new page is allocated for the trailer. Reference to * the page from this function as well as the skbs using the page fragments * ensure that the page is freed at the appropriate time. * * Returns: 0 for success */ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, struct fcoe_percpu_s *fps) { struct page *page; page = fps->crc_eof_page; if (!page) { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; fps->crc_eof_page = page; fps->crc_eof_offset = 0; } get_page(page); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, fps->crc_eof_offset, tlen); skb->len += tlen; skb->data_len += tlen; skb->truesize += tlen; fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); if (fps->crc_eof_offset >= PAGE_SIZE) { fps->crc_eof_page = NULL; fps->crc_eof_offset = 0; put_page(page); } return 0; } EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof); /** * fcoe_transport_lookup - find an fcoe transport that matches a netdev * @netdev: The netdev to look for from all attached transports * * Returns : ptr to the fcoe transport that supports this netdev or NULL * if not found. * * The ft_mutex should be held when this is called */ static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev) { struct fcoe_transport *ft = NULL; list_for_each_entry(ft, &fcoe_transports, list) if (ft->match && ft->match(netdev)) return ft; return NULL; } /** * fcoe_transport_attach - Attaches an FCoE transport * @ft: The fcoe transport to be attached * * Returns : 0 for success */ int fcoe_transport_attach(struct fcoe_transport *ft) { int rc = 0; mutex_lock(&ft_mutex); if (ft->attached) { LIBFCOE_TRANSPORT_DBG("transport %s already attached\n", ft->name); rc = -EEXIST; goto out_attach; } /* Add default transport to the tail */ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT)) list_add(&ft->list, &fcoe_transports); else list_add_tail(&ft->list, &fcoe_transports); ft->attached = true; LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name); out_attach: mutex_unlock(&ft_mutex); return rc; } EXPORT_SYMBOL(fcoe_transport_attach); /** * fcoe_transport_detach - Detaches an FCoE transport * @ft: The fcoe transport to be attached * * Returns : 0 for success */ int fcoe_transport_detach(struct fcoe_transport *ft) { int rc = 0; struct fcoe_netdev_mapping *nm = NULL, *tmp; mutex_lock(&ft_mutex); if (!ft->attached) { LIBFCOE_TRANSPORT_DBG("transport %s already detached\n", ft->name); rc = -ENODEV; goto out_attach; } /* remove netdev mapping for this transport as it is going away */ mutex_lock(&fn_mutex); list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { if (nm->ft == ft) { LIBFCOE_TRANSPORT_DBG("transport %s going away, " "remove its netdev mapping for %s\n", ft->name, nm->netdev->name); list_del(&nm->list); kfree(nm); } } mutex_unlock(&fn_mutex); list_del(&ft->list); ft->attached = false; LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); out_attach: mutex_unlock(&ft_mutex); return rc; } EXPORT_SYMBOL(fcoe_transport_detach); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) { int i, j; struct fcoe_transport *ft = NULL; i = j = sprintf(buffer, "Attached FCoE transports:"); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) { if (i >= PAGE_SIZE - IFNAMSIZ) break; i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); } mutex_unlock(&ft_mutex); if (i == j) i += snprintf(&buffer[i], IFNAMSIZ, "none"); return i; } static int __init fcoe_transport_init(void) { register_netdevice_notifier(&libfcoe_notifier); return 0; } static int fcoe_transport_exit(void) { struct fcoe_transport *ft; unregister_netdevice_notifier(&libfcoe_notifier); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) printk(KERN_ERR "FCoE transport %s is still attached!\n", ft->name); mutex_unlock(&ft_mutex); return 0; } static int fcoe_add_netdev_mapping(struct net_device *netdev, struct fcoe_transport *ft) { struct fcoe_netdev_mapping *nm; nm = kmalloc(sizeof(*nm), GFP_KERNEL); if (!nm) { printk(KERN_ERR "Unable to allocate netdev_mapping"); return -ENOMEM; } nm->netdev = netdev; nm->ft = ft; mutex_lock(&fn_mutex); list_add(&nm->list, &fcoe_netdevs); mutex_unlock(&fn_mutex); return 0; } static void fcoe_del_netdev_mapping(struct net_device *netdev) { struct fcoe_netdev_mapping *nm = NULL, *tmp; mutex_lock(&fn_mutex); list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { if (nm->netdev == netdev) { list_del(&nm->list); kfree(nm); mutex_unlock(&fn_mutex); return; } } mutex_unlock(&fn_mutex); } /** * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which * it was created * * Returns : ptr to the fcoe transport that supports this netdev or NULL * if not found. * * The ft_mutex should be held when this is called */ static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev) { struct fcoe_transport *ft = NULL; struct fcoe_netdev_mapping *nm; mutex_lock(&fn_mutex); list_for_each_entry(nm, &fcoe_netdevs, list) { if (netdev == nm->netdev) { ft = nm->ft; mutex_unlock(&fn_mutex); return ft; } } mutex_unlock(&fn_mutex); return NULL; } /** * fcoe_if_to_netdev() - Parse a name buffer to get a net device * @buffer: The name of the net device * * Returns: NULL or a ptr to net_device */ static struct net_device *fcoe_if_to_netdev(const char *buffer) { char *cp; char ifname[IFNAMSIZ + 2]; if (buffer) { strlcpy(ifname, buffer, IFNAMSIZ); cp = ifname + strlen(ifname); while (--cp >= ifname && *cp == '\n') *cp = '\0'; return dev_get_by_name(&init_net, ifname); } return NULL; } /** * libfcoe_device_notification() - Handler for net device events * @notifier: The context of the notification * @event: The type of event * @ptr: The net device that the event was on * * This function is called by the Ethernet driver in case of link change event. * * Returns: 0 for success */ static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UNREGISTER: LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n", netdev->name); fcoe_del_netdev_mapping(netdev); break; } return NOTIFY_OK; } ssize_t fcoe_ctlr_create_store(struct bus_type *bus, const char *buf, size_t count) { struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; int rc = 0; int err; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buf); if (!netdev) { LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf); rc = -ENODEV; goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (ft) { LIBFCOE_TRANSPORT_DBG("transport %s already has existing " "FCoE instance on %s.\n", ft->name, netdev->name); rc = -EEXIST; goto out_putdev; } ft = fcoe_transport_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); rc = -ENODEV; goto out_putdev; } /* pass to transport create */ err = ft->alloc ? ft->alloc(netdev) : -ENODEV; if (err) { fcoe_del_netdev_mapping(netdev); rc = -ENOMEM; goto out_putdev; } err = fcoe_add_netdev_mapping(netdev, ft); if (err) { LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " "for FCoE transport %s for %s.\n", ft->name, netdev->name); rc = -ENODEV; goto out_putdev; } LIBFCOE_TRANSPORT_DBG("transport %s succeeded to create fcoe on %s.\n", ft->name, netdev->name); out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); if (rc) return rc; return count; } ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus, const char *buf, size_t count) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buf); if (!netdev) { LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf); goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); goto out_putdev; } /* pass to transport destroy */ rc = ft->destroy(netdev); if (rc) goto out_putdev; fcoe_del_netdev_mapping(netdev); LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", ft->name, (rc) ? "failed" : "succeeded", netdev->name); rc = count; /* required for successful return */ out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } EXPORT_SYMBOL(fcoe_ctlr_destroy_store); /** * fcoe_transport_create() - Create a fcoe interface * @buffer: The name of the Ethernet interface to create on * @kp: The associated kernel param * * Called from sysfs. This holds the ft_mutex while calling the * registered fcoe transport's create function. * * Returns: 0 for success */ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; enum fip_state fip_mode = (enum fip_state)(long)kp->arg; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (ft) { LIBFCOE_TRANSPORT_DBG("transport %s already has existing " "FCoE instance on %s.\n", ft->name, netdev->name); rc = -EEXIST; goto out_putdev; } ft = fcoe_transport_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); goto out_putdev; } rc = fcoe_add_netdev_mapping(netdev, ft); if (rc) { LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " "for FCoE transport %s for %s.\n", ft->name, netdev->name); goto out_putdev; } /* pass to transport create */ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV; if (rc) fcoe_del_netdev_mapping(netdev); LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", ft->name, (rc) ? "failed" : "succeeded", netdev->name); out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * fcoe_transport_destroy() - Destroy a FCoE interface * @buffer: The name of the Ethernet interface to be destroyed * @kp: The associated kernel parameter * * Called from sysfs. This holds the ft_mutex while calling the * registered fcoe transport's destroy function. * * Returns: 0 for success */ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); goto out_nodev; } ft = fcoe_netdev_map_lookup(netdev); if (!ft) { LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", netdev->name); goto out_putdev; } /* pass to transport destroy */ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV; fcoe_del_netdev_mapping(netdev); LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", ft->name, (rc) ? "failed" : "succeeded", netdev->name); out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * fcoe_transport_disable() - Disables a FCoE interface * @buffer: The name of the Ethernet interface to be disabled * @kp: The associated kernel parameter * * Called from sysfs. * * Returns: 0 for success */ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; ft = fcoe_netdev_map_lookup(netdev); if (!ft) goto out_putdev; rc = ft->disable ? ft->disable(netdev) : -ENODEV; out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * fcoe_transport_enable() - Enables a FCoE interface * @buffer: The name of the Ethernet interface to be enabled * @kp: The associated kernel parameter * * Called from sysfs. * * Returns: 0 for success */ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; mutex_lock(&ft_mutex); netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; ft = fcoe_netdev_map_lookup(netdev); if (!ft) goto out_putdev; rc = ft->enable ? ft->enable(netdev) : -ENODEV; out_putdev: dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); return rc; } /** * libfcoe_init() - Initialization routine for libfcoe.ko */ static int __init libfcoe_init(void) { int rc = 0; rc = fcoe_transport_init(); if (rc) return rc; rc = fcoe_sysfs_setup(); if (rc) fcoe_transport_exit(); return rc; } module_init(libfcoe_init); /** * libfcoe_exit() - Tear down libfcoe.ko */ static void __exit libfcoe_exit(void) { fcoe_sysfs_teardown(); fcoe_transport_exit(); } module_exit(libfcoe_exit);
gpl-2.0
kykdev/lolliwiz_lentislte
crypto/crypto_user.c
1199
12591
/* * Crypto user configuration API. * * Copyright (C) 2011 secunet Security Networks AG * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <linux/crypto.h> #include <linux/cryptouser.h> #include <linux/sched.h> #include <net/netlink.h> #include <linux/security.h> #include <net/net_namespace.h> #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include "internal.h" #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) static DEFINE_MUTEX(crypto_cfg_mutex); /* The crypto netlink socket */ static struct sock *crypto_nlsk; struct crypto_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; u32 nlmsg_seq; u16 nlmsg_flags; }; static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) { struct crypto_alg *q, *alg = NULL; down_read(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { int match = 0; if ((q->cra_flags ^ p->cru_type) & p->cru_mask) continue; if (strlen(p->cru_driver_name)) match = !strcmp(q->cra_driver_name, p->cru_driver_name); else if (!exact) match = !strcmp(q->cra_name, p->cru_name); if (match) { alg = q; break; } } up_read(&crypto_alg_sem); return alg; } static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_cipher rcipher; strncpy(rcipher.type, "cipher", sizeof(rcipher.type)); rcipher.blocksize = alg->cra_blocksize; rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; rcipher.max_keysize = alg->cra_cipher.cia_max_keysize; if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER, sizeof(struct crypto_report_cipher), &rcipher)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rcomp; strncpy(rcomp.type, "compression", sizeof(rcomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rcomp)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strncpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); strncpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; strncpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; goto out; } if (alg->cra_type && alg->cra_type->report) { if (alg->cra_type->report(skb, alg)) goto nla_put_failure; goto out; } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_CIPHER: if (crypto_report_cipher(skb, alg)) goto nla_put_failure; break; case CRYPTO_ALG_TYPE_COMPRESS: if (crypto_report_comp(skb, alg)) goto nla_put_failure; break; } out: return 0; nla_put_failure: return -EMSGSIZE; } static int crypto_report_alg(struct crypto_alg *alg, struct crypto_dump_info *info) { struct sk_buff *in_skb = info->in_skb; struct sk_buff *skb = info->out_skb; struct nlmsghdr *nlh; struct crypto_user_alg *ualg; int err = 0; nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags); if (!nlh) { err = -EMSGSIZE; goto out; } ualg = nlmsg_data(nlh); err = crypto_report_one(alg, ualg, skb); if (err) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); out: return err; } static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) { struct crypto_user_alg *p = nlmsg_data(in_nlh); struct crypto_alg *alg; struct sk_buff *skb; struct crypto_dump_info info; int err; if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; if (!p->cru_driver_name[0]) return -EINVAL; alg = crypto_alg_match(p, 1); if (!alg) return -ENOENT; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; info.in_skb = in_skb; info.out_skb = skb; info.nlmsg_seq = in_nlh->nlmsg_seq; info.nlmsg_flags = 0; err = crypto_report_alg(alg, &info); if (err) return err; return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) { struct crypto_alg *alg; struct crypto_dump_info info; int err; if (cb->args[0]) goto out; cb->args[0] = 1; info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; list_for_each_entry(alg, &crypto_alg_list, cra_list) { err = crypto_report_alg(alg, &info); if (err) goto out_err; } out: return skb->len; out_err: return err; } static int crypto_dump_report_done(struct netlink_callback *cb) { return 0; } static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct crypto_alg *alg; struct crypto_user_alg *p = nlmsg_data(nlh); struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; LIST_HEAD(list); if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; if (priority && !strlen(p->cru_driver_name)) return -EINVAL; alg = crypto_alg_match(p, 1); if (!alg) return -ENOENT; down_write(&crypto_alg_sem); crypto_remove_spawns(alg, &list, NULL); if (priority) alg->cra_priority = nla_get_u32(priority); up_write(&crypto_alg_sem); crypto_remove_final(&list); return 0; } static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct crypto_alg *alg; struct crypto_user_alg *p = nlmsg_data(nlh); if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; alg = crypto_alg_match(p, 1); if (!alg) return -ENOENT; /* We can not unregister core algorithms such as aes-generic. * We would loose the reference in the crypto_alg_list to this algorithm * if we try to unregister. Unregistering such an algorithm without * removing the module is not possible, so we restrict to crypto * instances that are build from templates. */ if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE)) return -EINVAL; if (atomic_read(&alg->cra_refcnt) != 1) return -EBUSY; return crypto_unregister_instance(alg); } static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type, u32 mask) { int err; struct crypto_alg *alg; type = crypto_skcipher_type(type); mask = crypto_skcipher_mask(mask); for (;;) { alg = crypto_lookup_skcipher(name, type, mask); if (!IS_ERR(alg)) return alg; err = PTR_ERR(alg); if (err != -EAGAIN) break; if (signal_pending(current)) { err = -EINTR; break; } } return ERR_PTR(err); } static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type, u32 mask) { int err; struct crypto_alg *alg; type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); type |= CRYPTO_ALG_TYPE_AEAD; mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); mask |= CRYPTO_ALG_TYPE_MASK; for (;;) { alg = crypto_lookup_aead(name, type, mask); if (!IS_ERR(alg)) return alg; err = PTR_ERR(alg); if (err != -EAGAIN) break; if (signal_pending(current)) { err = -EINTR; break; } } return ERR_PTR(err); } static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { int exact = 0; const char *name; struct crypto_alg *alg; struct crypto_user_alg *p = nlmsg_data(nlh); struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) return -EINVAL; if (strlen(p->cru_driver_name)) exact = 1; if (priority && !exact) return -EINVAL; alg = crypto_alg_match(p, exact); if (alg) return -EEXIST; if (strlen(p->cru_driver_name)) name = p->cru_driver_name; else name = p->cru_name; switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask); break; case CRYPTO_ALG_TYPE_GIVCIPHER: case CRYPTO_ALG_TYPE_BLKCIPHER: case CRYPTO_ALG_TYPE_ABLKCIPHER: alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask); break; default: alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask); } if (IS_ERR(alg)) return PTR_ERR(alg); down_write(&crypto_alg_sem); if (priority) alg->cra_priority = nla_get_u32(priority); up_write(&crypto_alg_sem); crypto_mod_put(alg); return 0; } #define MSGSIZE(type) sizeof(struct type) static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), }; static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = { [CRYPTOCFGA_PRIORITY_VAL] = { .type = NLA_U32}, }; #undef MSGSIZE static const struct crypto_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); } crypto_dispatch[CRYPTO_NR_MSGTYPES] = { [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg}, [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg}, [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg}, [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report, .dump = crypto_dump_report, .done = crypto_dump_report_done}, }; static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct nlattr *attrs[CRYPTOCFGA_MAX+1]; const struct crypto_link *link; int type, err; type = nlh->nlmsg_type; if (type > CRYPTO_MSG_MAX) return -EINVAL; type -= CRYPTO_MSG_BASE; link = &crypto_dispatch[type]; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) && (nlh->nlmsg_flags & NLM_F_DUMP))) { struct crypto_alg *alg; u16 dump_alloc = 0; if (link->dump == NULL) return -EINVAL; list_for_each_entry(alg, &crypto_alg_list, cra_list) dump_alloc += CRYPTO_REPORT_MAXSIZE; { struct netlink_dump_control c = { .dump = link->dump, .done = link->done, .min_dump_alloc = dump_alloc, }; return netlink_dump_start(crypto_nlsk, skb, nlh, &c); } } err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, crypto_policy); if (err < 0) return err; if (link->doit == NULL) return -EINVAL; return link->doit(skb, nlh, attrs); } static void crypto_netlink_rcv(struct sk_buff *skb) { mutex_lock(&crypto_cfg_mutex); netlink_rcv_skb(skb, &crypto_user_rcv_msg); mutex_unlock(&crypto_cfg_mutex); } static int __init crypto_user_init(void) { struct netlink_kernel_cfg cfg = { .input = crypto_netlink_rcv, }; crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg); if (!crypto_nlsk) return -ENOMEM; return 0; } static void __exit crypto_user_exit(void) { netlink_kernel_release(crypto_nlsk); } module_init(crypto_user_init); module_exit(crypto_user_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); MODULE_DESCRIPTION("Crypto userspace configuration API");
gpl-2.0
soderstrom-rikard/CHIP-linux
drivers/input/tablet/hanwang.c
1967
12263
/* * USB Hanwang tablet support * * Copyright (c) 2010 Xing Wei <weixing@hanwang.com.cn> * */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #define DRIVER_AUTHOR "Xing Wei <weixing@hanwang.com.cn>" #define DRIVER_DESC "USB Hanwang tablet driver" #define DRIVER_LICENSE "GPL" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); #define USB_VENDOR_ID_HANWANG 0x0b57 #define HANWANG_TABLET_INT_CLASS 0x0003 #define HANWANG_TABLET_INT_SUB_CLASS 0x0001 #define HANWANG_TABLET_INT_PROTOCOL 0x0002 #define ART_MASTER_PKGLEN_MAX 10 /* device IDs */ #define STYLUS_DEVICE_ID 0x02 #define TOUCH_DEVICE_ID 0x03 #define CURSOR_DEVICE_ID 0x06 #define ERASER_DEVICE_ID 0x0A #define PAD_DEVICE_ID 0x0F /* match vendor and interface info */ #define HANWANG_TABLET_DEVICE(vend, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_VENDOR \ | USB_DEVICE_ID_MATCH_INT_INFO, \ .idVendor = (vend), \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) enum hanwang_tablet_type { HANWANG_ART_MASTER_III, HANWANG_ART_MASTER_HD, HANWANG_ART_MASTER_II, }; struct hanwang { unsigned char *data; dma_addr_t data_dma; struct input_dev *dev; struct usb_device *usbdev; struct urb *irq; const struct hanwang_features *features; unsigned int current_tool; unsigned int current_id; char name[64]; char phys[32]; }; struct hanwang_features { unsigned short pid; char *name; enum hanwang_tablet_type type; int pkg_len; int max_x; int max_y; int max_tilt_x; int max_tilt_y; int max_pressure; }; static const struct hanwang_features features_array[] = { { 0x8528, "Hanwang Art Master III 0906", HANWANG_ART_MASTER_III, ART_MASTER_PKGLEN_MAX, 0x5757, 0x3692, 0x3f, 0x7f, 2048 }, { 0x8529, "Hanwang Art Master III 0604", HANWANG_ART_MASTER_III, ART_MASTER_PKGLEN_MAX, 0x3d84, 0x2672, 0x3f, 0x7f, 2048 }, { 0x852a, "Hanwang Art Master III 1308", HANWANG_ART_MASTER_III, ART_MASTER_PKGLEN_MAX, 0x7f00, 0x4f60, 0x3f, 0x7f, 2048 }, { 0x8401, "Hanwang Art Master HD 5012", HANWANG_ART_MASTER_HD, ART_MASTER_PKGLEN_MAX, 0x678e, 0x4150, 0x3f, 0x7f, 1024 }, { 0x8503, "Hanwang Art Master II", HANWANG_ART_MASTER_II, ART_MASTER_PKGLEN_MAX, 0x27de, 0x1cfe, 0x3f, 0x7f, 1024 }, }; static const int hw_eventtypes[] = { EV_KEY, EV_ABS, EV_MSC, }; static const int hw_absevents[] = { ABS_X, ABS_Y, ABS_TILT_X, ABS_TILT_Y, ABS_WHEEL, ABS_RX, ABS_RY, ABS_PRESSURE, ABS_MISC, }; static const int hw_btnevents[] = { BTN_STYLUS, BTN_STYLUS2, BTN_TOOL_PEN, BTN_TOOL_RUBBER, BTN_TOOL_MOUSE, BTN_TOOL_FINGER, BTN_0, BTN_1, BTN_2, BTN_3, BTN_4, BTN_5, BTN_6, BTN_7, BTN_8, }; static const int hw_mscevents[] = { MSC_SERIAL, }; static void hanwang_parse_packet(struct hanwang *hanwang) { unsigned char *data = hanwang->data; struct input_dev *input_dev = hanwang->dev; struct usb_device *dev = hanwang->usbdev; enum hanwang_tablet_type type = hanwang->features->type; int i; u16 p; if (type == HANWANG_ART_MASTER_II) { hanwang->current_tool = BTN_TOOL_PEN; hanwang->current_id = STYLUS_DEVICE_ID; } switch (data[0]) { case 0x02: /* data packet */ switch (data[1]) { case 0x80: /* tool prox out */ if (type != HANWANG_ART_MASTER_II) { hanwang->current_id = 0; input_report_key(input_dev, hanwang->current_tool, 0); } break; case 0x00: /* artmaster ii pen leave */ if (type == HANWANG_ART_MASTER_II) { hanwang->current_id = 0; input_report_key(input_dev, hanwang->current_tool, 0); } break; case 0xc2: /* first time tool prox in */ switch (data[3] & 0xf0) { case 0x20: /* art_master III */ case 0x30: /* art_master_HD */ hanwang->current_id = STYLUS_DEVICE_ID; hanwang->current_tool = BTN_TOOL_PEN; input_report_key(input_dev, BTN_TOOL_PEN, 1); break; case 0xa0: /* art_master III */ case 0xb0: /* art_master_HD */ hanwang->current_id = ERASER_DEVICE_ID; hanwang->current_tool = BTN_TOOL_RUBBER; input_report_key(input_dev, BTN_TOOL_RUBBER, 1); break; default: hanwang->current_id = 0; dev_dbg(&dev->dev, "unknown tablet tool %02x\n", data[0]); break; } break; default: /* tool data packet */ switch (type) { case HANWANG_ART_MASTER_III: p = (data[6] << 3) | ((data[7] & 0xc0) >> 5) | (data[1] & 0x01); break; case HANWANG_ART_MASTER_HD: case HANWANG_ART_MASTER_II: p = (data[7] >> 6) | (data[6] << 2); break; default: p = 0; break; } input_report_abs(input_dev, ABS_X, be16_to_cpup((__be16 *)&data[2])); input_report_abs(input_dev, ABS_Y, be16_to_cpup((__be16 *)&data[4])); input_report_abs(input_dev, ABS_PRESSURE, p); input_report_abs(input_dev, ABS_TILT_X, data[7] & 0x3f); input_report_abs(input_dev, ABS_TILT_Y, data[8] & 0x7f); input_report_key(input_dev, BTN_STYLUS, data[1] & 0x02); if (type != HANWANG_ART_MASTER_II) input_report_key(input_dev, BTN_STYLUS2, data[1] & 0x04); else input_report_key(input_dev, BTN_TOOL_PEN, 1); break; } input_report_abs(input_dev, ABS_MISC, hanwang->current_id); input_event(input_dev, EV_MSC, MSC_SERIAL, hanwang->features->pid); break; case 0x0c: /* roll wheel */ hanwang->current_id = PAD_DEVICE_ID; switch (type) { case HANWANG_ART_MASTER_III: input_report_key(input_dev, BTN_TOOL_FINGER, data[1] || data[2] || data[3]); input_report_abs(input_dev, ABS_WHEEL, data[1]); input_report_key(input_dev, BTN_0, data[2]); for (i = 0; i < 8; i++) input_report_key(input_dev, BTN_1 + i, data[3] & (1 << i)); break; case HANWANG_ART_MASTER_HD: input_report_key(input_dev, BTN_TOOL_FINGER, data[1] || data[2] || data[3] || data[4] || data[5] || data[6]); input_report_abs(input_dev, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]); input_report_abs(input_dev, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]); input_report_key(input_dev, BTN_0, data[5] & 0x01); for (i = 0; i < 4; i++) { input_report_key(input_dev, BTN_1 + i, data[5] & (1 << i)); input_report_key(input_dev, BTN_5 + i, data[6] & (1 << i)); } break; case HANWANG_ART_MASTER_II: dev_dbg(&dev->dev, "error packet %02x\n", data[0]); return; } input_report_abs(input_dev, ABS_MISC, hanwang->current_id); input_event(input_dev, EV_MSC, MSC_SERIAL, 0xffffffff); break; default: dev_dbg(&dev->dev, "error packet %02x\n", data[0]); break; } input_sync(input_dev); } static void hanwang_irq(struct urb *urb) { struct hanwang *hanwang = urb->context; struct usb_device *dev = hanwang->usbdev; int retval; switch (urb->status) { case 0: /* success */; hanwang_parse_packet(hanwang); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_err(&dev->dev, "%s - urb shutting down with status: %d", __func__, urb->status); return; default: dev_err(&dev->dev, "%s - nonzero urb status received: %d", __func__, urb->status); break; } retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->dev, "%s - usb_submit_urb failed with result %d", __func__, retval); } static int hanwang_open(struct input_dev *dev) { struct hanwang *hanwang = input_get_drvdata(dev); hanwang->irq->dev = hanwang->usbdev; if (usb_submit_urb(hanwang->irq, GFP_KERNEL)) return -EIO; return 0; } static void hanwang_close(struct input_dev *dev) { struct hanwang *hanwang = input_get_drvdata(dev); usb_kill_urb(hanwang->irq); } static bool get_features(struct usb_device *dev, struct hanwang *hanwang) { int i; for (i = 0; i < ARRAY_SIZE(features_array); i++) { if (le16_to_cpu(dev->descriptor.idProduct) == features_array[i].pid) { hanwang->features = &features_array[i]; return true; } } return false; } static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct hanwang *hanwang; struct input_dev *input_dev; int error; int i; hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); input_dev = input_allocate_device(); if (!hanwang || !input_dev) { error = -ENOMEM; goto fail1; } if (!get_features(dev, hanwang)) { error = -ENXIO; goto fail1; } hanwang->data = usb_alloc_coherent(dev, hanwang->features->pkg_len, GFP_KERNEL, &hanwang->data_dma); if (!hanwang->data) { error = -ENOMEM; goto fail1; } hanwang->irq = usb_alloc_urb(0, GFP_KERNEL); if (!hanwang->irq) { error = -ENOMEM; goto fail2; } hanwang->usbdev = dev; hanwang->dev = input_dev; usb_make_path(dev, hanwang->phys, sizeof(hanwang->phys)); strlcat(hanwang->phys, "/input0", sizeof(hanwang->phys)); strlcpy(hanwang->name, hanwang->features->name, sizeof(hanwang->name)); input_dev->name = hanwang->name; input_dev->phys = hanwang->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, hanwang); input_dev->open = hanwang_open; input_dev->close = hanwang_close; for (i = 0; i < ARRAY_SIZE(hw_eventtypes); ++i) __set_bit(hw_eventtypes[i], input_dev->evbit); for (i = 0; i < ARRAY_SIZE(hw_absevents); ++i) __set_bit(hw_absevents[i], input_dev->absbit); for (i = 0; i < ARRAY_SIZE(hw_btnevents); ++i) __set_bit(hw_btnevents[i], input_dev->keybit); for (i = 0; i < ARRAY_SIZE(hw_mscevents); ++i) __set_bit(hw_mscevents[i], input_dev->mscbit); input_set_abs_params(input_dev, ABS_X, 0, hanwang->features->max_x, 4, 0); input_set_abs_params(input_dev, ABS_Y, 0, hanwang->features->max_y, 4, 0); input_set_abs_params(input_dev, ABS_TILT_X, 0, hanwang->features->max_tilt_x, 0, 0); input_set_abs_params(input_dev, ABS_TILT_Y, 0, hanwang->features->max_tilt_y, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, hanwang->features->max_pressure, 0, 0); endpoint = &intf->cur_altsetting->endpoint[0].desc; usb_fill_int_urb(hanwang->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), hanwang->data, hanwang->features->pkg_len, hanwang_irq, hanwang, endpoint->bInterval); hanwang->irq->transfer_dma = hanwang->data_dma; hanwang->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(hanwang->dev); if (error) goto fail3; usb_set_intfdata(intf, hanwang); return 0; fail3: usb_free_urb(hanwang->irq); fail2: usb_free_coherent(dev, hanwang->features->pkg_len, hanwang->data, hanwang->data_dma); fail1: input_free_device(input_dev); kfree(hanwang); return error; } static void hanwang_disconnect(struct usb_interface *intf) { struct hanwang *hanwang = usb_get_intfdata(intf); input_unregister_device(hanwang->dev); usb_free_urb(hanwang->irq); usb_free_coherent(interface_to_usbdev(intf), hanwang->features->pkg_len, hanwang->data, hanwang->data_dma); kfree(hanwang); usb_set_intfdata(intf, NULL); } static const struct usb_device_id hanwang_ids[] = { { HANWANG_TABLET_DEVICE(USB_VENDOR_ID_HANWANG, HANWANG_TABLET_INT_CLASS, HANWANG_TABLET_INT_SUB_CLASS, HANWANG_TABLET_INT_PROTOCOL) }, {} }; MODULE_DEVICE_TABLE(usb, hanwang_ids); static struct usb_driver hanwang_driver = { .name = "hanwang", .probe = hanwang_probe, .disconnect = hanwang_disconnect, .id_table = hanwang_ids, }; module_usb_driver(hanwang_driver);
gpl-2.0
AnguisCaptor/PwnKernel_Shamu_M
drivers/misc/vmw_vmci/vmci_guest.c
2223
21137
/* * VMware VMCI Driver * * Copyright (C) 2012 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/vmw_vmci_defs.h> #include <linux/vmw_vmci_api.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/vmalloc.h> #include "vmci_datagram.h" #include "vmci_doorbell.h" #include "vmci_context.h" #include "vmci_driver.h" #include "vmci_event.h" #define PCI_VENDOR_ID_VMWARE 0x15AD #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 #define VMCI_UTIL_NUM_RESOURCES 1 static bool vmci_disable_msi; module_param_named(disable_msi, vmci_disable_msi, bool, 0); MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); static bool vmci_disable_msix; module_param_named(disable_msix, vmci_disable_msix, bool, 0); MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); static u32 ctx_update_sub_id = VMCI_INVALID_ID; static u32 vm_context_id = VMCI_INVALID_ID; struct vmci_guest_device { struct device *dev; /* PCI device we are attached to */ void __iomem *iobase; unsigned int irq; unsigned int intr_type; bool exclusive_vectors; struct msix_entry msix_entries[VMCI_MAX_INTRS]; struct tasklet_struct datagram_tasklet; struct tasklet_struct bm_tasklet; void *data_buffer; void *notification_bitmap; }; /* vmci_dev singleton device and supporting data*/ static struct vmci_guest_device *vmci_dev_g; static DEFINE_SPINLOCK(vmci_dev_spinlock); static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); bool vmci_guest_code_active(void) { return atomic_read(&vmci_num_guest_devices) != 0; } u32 vmci_get_vm_context_id(void) { if (vm_context_id == VMCI_INVALID_ID) { struct vmci_datagram get_cid_msg; get_cid_msg.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_GET_CONTEXT_ID); get_cid_msg.src = VMCI_ANON_SRC_HANDLE; get_cid_msg.payload_size = 0; vm_context_id = vmci_send_datagram(&get_cid_msg); } return vm_context_id; } /* * VM to hypervisor call mechanism. We use the standard VMware naming * convention since shared code is calling this function as well. */ int vmci_send_datagram(struct vmci_datagram *dg) { unsigned long flags; int result; /* Check args. */ if (dg == NULL) return VMCI_ERROR_INVALID_ARGS; /* * Need to acquire spinlock on the device because the datagram * data may be spread over multiple pages and the monitor may * interleave device user rpc calls from multiple * VCPUs. Acquiring the spinlock precludes that * possibility. Disabling interrupts to avoid incoming * datagrams during a "rep out" and possibly landing up in * this function. */ spin_lock_irqsave(&vmci_dev_spinlock, flags); if (vmci_dev_g) { iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, dg, VMCI_DG_SIZE(dg)); result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); } else { result = VMCI_ERROR_UNAVAILABLE; } spin_unlock_irqrestore(&vmci_dev_spinlock, flags); return result; } EXPORT_SYMBOL_GPL(vmci_send_datagram); /* * Gets called with the new context id if updated or resumed. * Context id. */ static void vmci_guest_cid_update(u32 sub_id, const struct vmci_event_data *event_data, void *client_data) { const struct vmci_event_payld_ctx *ev_payload = vmci_event_data_const_payload(event_data); if (sub_id != ctx_update_sub_id) { pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); return; } if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { pr_devel("Invalid event data\n"); return; } pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", vm_context_id, ev_payload->context_id, event_data->event); vm_context_id = ev_payload->context_id; } /* * Verify that the host supports the hypercalls we need. If it does not, * try to find fallback hypercalls and use those instead. Returns * true if required hypercalls (or fallback hypercalls) are * supported by the host, false otherwise. */ static bool vmci_check_host_caps(struct pci_dev *pdev) { bool result; struct vmci_resource_query_msg *msg; u32 msg_size = sizeof(struct vmci_resource_query_hdr) + VMCI_UTIL_NUM_RESOURCES * sizeof(u32); struct vmci_datagram *check_msg; check_msg = kmalloc(msg_size, GFP_KERNEL); if (!check_msg) { dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); return false; } check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_RESOURCES_QUERY); check_msg->src = VMCI_ANON_SRC_HANDLE; check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); msg->num_resources = VMCI_UTIL_NUM_RESOURCES; msg->resources[0] = VMCI_GET_CONTEXT_ID; /* Checks that hyper calls are supported */ result = vmci_send_datagram(check_msg) == 0x01; kfree(check_msg); dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", __func__, result ? "PASSED" : "FAILED"); /* We need the vector. There are no fallbacks. */ return result; } /* * Reads datagrams from the data in port and dispatches them. We * always start reading datagrams into only the first page of the * datagram buffer. If the datagrams don't fit into one page, we * use the maximum datagram buffer size for the remainder of the * invocation. This is a simple heuristic for not penalizing * small datagrams. * * This function assumes that it has exclusive access to the data * in port for the duration of the call. */ static void vmci_dispatch_dgs(unsigned long data) { struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; u8 *dg_in_buffer = vmci_dev->data_buffer; struct vmci_datagram *dg; size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; size_t current_dg_in_buffer_size = PAGE_SIZE; size_t remaining_bytes; BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, vmci_dev->data_buffer, current_dg_in_buffer_size); dg = (struct vmci_datagram *)dg_in_buffer; remaining_bytes = current_dg_in_buffer_size; while (dg->dst.resource != VMCI_INVALID_ID || remaining_bytes > PAGE_SIZE) { unsigned dg_in_size; /* * When the input buffer spans multiple pages, a datagram can * start on any page boundary in the buffer. */ if (dg->dst.resource == VMCI_INVALID_ID) { dg = (struct vmci_datagram *)roundup( (uintptr_t)dg + 1, PAGE_SIZE); remaining_bytes = (size_t)(dg_in_buffer + current_dg_in_buffer_size - (u8 *)dg); continue; } dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); if (dg_in_size <= dg_in_buffer_size) { int result; /* * If the remaining bytes in the datagram * buffer doesn't contain the complete * datagram, we first make sure we have enough * room for it and then we read the reminder * of the datagram and possibly any following * datagrams. */ if (dg_in_size > remaining_bytes) { if (remaining_bytes != current_dg_in_buffer_size) { /* * We move the partial * datagram to the front and * read the reminder of the * datagram and possibly * following calls into the * following bytes. */ memmove(dg_in_buffer, dg_in_buffer + current_dg_in_buffer_size - remaining_bytes, remaining_bytes); dg = (struct vmci_datagram *) dg_in_buffer; } if (current_dg_in_buffer_size != dg_in_buffer_size) current_dg_in_buffer_size = dg_in_buffer_size; ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, vmci_dev->data_buffer + remaining_bytes, current_dg_in_buffer_size - remaining_bytes); } /* * We special case event datagrams from the * hypervisor. */ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && dg->dst.resource == VMCI_EVENT_HANDLER) { result = vmci_event_dispatch(dg); } else { result = vmci_datagram_invoke_guest_handler(dg); } if (result < VMCI_SUCCESS) dev_dbg(vmci_dev->dev, "Datagram with resource (ID=0x%x) failed (err=%d)\n", dg->dst.resource, result); /* On to the next datagram. */ dg = (struct vmci_datagram *)((u8 *)dg + dg_in_size); } else { size_t bytes_to_skip; /* * Datagram doesn't fit in datagram buffer of maximal * size. We drop it. */ dev_dbg(vmci_dev->dev, "Failed to receive datagram (size=%u bytes)\n", dg_in_size); bytes_to_skip = dg_in_size - remaining_bytes; if (current_dg_in_buffer_size != dg_in_buffer_size) current_dg_in_buffer_size = dg_in_buffer_size; for (;;) { ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, vmci_dev->data_buffer, current_dg_in_buffer_size); if (bytes_to_skip <= current_dg_in_buffer_size) break; bytes_to_skip -= current_dg_in_buffer_size; } dg = (struct vmci_datagram *)(dg_in_buffer + bytes_to_skip); } remaining_bytes = (size_t) (dg_in_buffer + current_dg_in_buffer_size - (u8 *)dg); if (remaining_bytes < VMCI_DG_HEADERSIZE) { /* Get the next batch of datagrams. */ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, vmci_dev->data_buffer, current_dg_in_buffer_size); dg = (struct vmci_datagram *)dg_in_buffer; remaining_bytes = current_dg_in_buffer_size; } } } /* * Scans the notification bitmap for raised flags, clears them * and handles the notifications. */ static void vmci_process_bitmap(unsigned long data) { struct vmci_guest_device *dev = (struct vmci_guest_device *)data; if (!dev->notification_bitmap) { dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); return; } vmci_dbell_scan_notification_entries(dev->notification_bitmap); } /* * Enable MSI-X. Try exclusive vectors first, then shared vectors. */ static int vmci_enable_msix(struct pci_dev *pdev, struct vmci_guest_device *vmci_dev) { int i; int result; for (i = 0; i < VMCI_MAX_INTRS; ++i) { vmci_dev->msix_entries[i].entry = i; vmci_dev->msix_entries[i].vector = i; } result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); if (result == 0) vmci_dev->exclusive_vectors = true; else if (result > 0) result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); return result; } /* * Interrupt handler for legacy or MSI interrupt, or for first MSI-X * interrupt (vector VMCI_INTR_DATAGRAM). */ static irqreturn_t vmci_interrupt(int irq, void *_dev) { struct vmci_guest_device *dev = _dev; /* * If we are using MSI-X with exclusive vectors then we simply schedule * the datagram tasklet, since we know the interrupt was meant for us. * Otherwise we must read the ICR to determine what to do. */ if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { tasklet_schedule(&dev->datagram_tasklet); } else { unsigned int icr; /* Acknowledge interrupt and determine what needs doing. */ icr = ioread32(dev->iobase + VMCI_ICR_ADDR); if (icr == 0 || icr == ~0) return IRQ_NONE; if (icr & VMCI_ICR_DATAGRAM) { tasklet_schedule(&dev->datagram_tasklet); icr &= ~VMCI_ICR_DATAGRAM; } if (icr & VMCI_ICR_NOTIFICATION) { tasklet_schedule(&dev->bm_tasklet); icr &= ~VMCI_ICR_NOTIFICATION; } if (icr != 0) dev_warn(dev->dev, "Ignoring unknown interrupt cause (%d)\n", icr); } return IRQ_HANDLED; } /* * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, * which is for the notification bitmap. Will only get called if we are * using MSI-X with exclusive vectors. */ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) { struct vmci_guest_device *dev = _dev; /* For MSI-X we can just assume it was meant for us. */ tasklet_schedule(&dev->bm_tasklet); return IRQ_HANDLED; } /* * Most of the initialization at module load time is done here. */ static int vmci_guest_probe_device(struct pci_dev *pdev, const struct pci_device_id *id) { struct vmci_guest_device *vmci_dev; void __iomem *iobase; unsigned int capabilities; unsigned long cmd; int vmci_err; int error; dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); error = pcim_enable_device(pdev); if (error) { dev_err(&pdev->dev, "Failed to enable VMCI device: %d\n", error); return error; } error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); if (error) { dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); return error; } iobase = pcim_iomap_table(pdev)[0]; dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", (unsigned long)iobase, pdev->irq); vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); if (!vmci_dev) { dev_err(&pdev->dev, "Can't allocate memory for VMCI device\n"); return -ENOMEM; } vmci_dev->dev = &pdev->dev; vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; vmci_dev->exclusive_vectors = false; vmci_dev->iobase = iobase; tasklet_init(&vmci_dev->datagram_tasklet, vmci_dispatch_dgs, (unsigned long)vmci_dev); tasklet_init(&vmci_dev->bm_tasklet, vmci_process_bitmap, (unsigned long)vmci_dev); vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); if (!vmci_dev->data_buffer) { dev_err(&pdev->dev, "Can't allocate memory for datagram buffer\n"); return -ENOMEM; } pci_set_master(pdev); /* To enable queue_pair functionality. */ /* * Verify that the VMCI Device supports the capabilities that * we need. If the device is missing capabilities that we would * like to use, check for fallback capabilities and use those * instead (so we can run a new VM on old hosts). Fail the load if * a required capability is missing and there is no fallback. * * Right now, we need datagrams. There are no fallbacks. */ capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); if (!(capabilities & VMCI_CAPS_DATAGRAM)) { dev_err(&pdev->dev, "Device does not support datagrams\n"); error = -ENXIO; goto err_free_data_buffer; } /* * If the hardware supports notifications, we will use that as * well. */ if (capabilities & VMCI_CAPS_NOTIFICATIONS) { vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE); if (!vmci_dev->notification_bitmap) { dev_warn(&pdev->dev, "Unable to allocate notification bitmap\n"); } else { memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); capabilities |= VMCI_CAPS_NOTIFICATIONS; } } dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); /* Let the host know which capabilities we intend to use. */ iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); /* Set up global device so that we can start sending datagrams */ spin_lock_irq(&vmci_dev_spinlock); vmci_dev_g = vmci_dev; spin_unlock_irq(&vmci_dev_spinlock); /* * Register notification bitmap with device if that capability is * used. */ if (capabilities & VMCI_CAPS_NOTIFICATIONS) { struct page *page = vmalloc_to_page(vmci_dev->notification_bitmap); unsigned long bitmap_ppn = page_to_pfn(page); if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { dev_warn(&pdev->dev, "VMCI device unable to register notification bitmap with PPN 0x%x\n", (u32) bitmap_ppn); goto err_remove_vmci_dev_g; } } /* Check host capabilities. */ if (!vmci_check_host_caps(pdev)) goto err_remove_bitmap; /* Enable device. */ /* * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can * update the internal context id when needed. */ vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, vmci_guest_cid_update, NULL, &ctx_update_sub_id); if (vmci_err < VMCI_SUCCESS) dev_warn(&pdev->dev, "Failed to subscribe to event (type=%d): %d\n", VMCI_EVENT_CTX_ID_UPDATE, vmci_err); /* * Enable interrupts. Try MSI-X first, then MSI, and then fallback on * legacy interrupts. */ if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; vmci_dev->irq = vmci_dev->msix_entries[0].vector; } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; vmci_dev->irq = pdev->irq; } else { vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; vmci_dev->irq = pdev->irq; } /* * Request IRQ for legacy or MSI interrupts, or for first * MSI-X vector. */ error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, KBUILD_MODNAME, vmci_dev); if (error) { dev_err(&pdev->dev, "Irq %u in use: %d\n", vmci_dev->irq, error); goto err_disable_msi; } /* * For MSI-X with exclusive vectors we need to request an * interrupt for each vector so that we get a separate * interrupt handler routine. This allows us to distinguish * between the vectors. */ if (vmci_dev->exclusive_vectors) { error = request_irq(vmci_dev->msix_entries[1].vector, vmci_interrupt_bm, 0, KBUILD_MODNAME, vmci_dev); if (error) { dev_err(&pdev->dev, "Failed to allocate irq %u: %d\n", vmci_dev->msix_entries[1].vector, error); goto err_free_irq; } } dev_dbg(&pdev->dev, "Registered device\n"); atomic_inc(&vmci_num_guest_devices); /* Enable specific interrupt bits. */ cmd = VMCI_IMR_DATAGRAM; if (capabilities & VMCI_CAPS_NOTIFICATIONS) cmd |= VMCI_IMR_NOTIFICATION; iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); /* Enable interrupts. */ iowrite32(VMCI_CONTROL_INT_ENABLE, vmci_dev->iobase + VMCI_CONTROL_ADDR); pci_set_drvdata(pdev, vmci_dev); return 0; err_free_irq: free_irq(vmci_dev->irq, &vmci_dev); tasklet_kill(&vmci_dev->datagram_tasklet); tasklet_kill(&vmci_dev->bm_tasklet); err_disable_msi: if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) pci_disable_msix(pdev); else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) pci_disable_msi(pdev); vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); if (vmci_err < VMCI_SUCCESS) dev_warn(&pdev->dev, "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); err_remove_bitmap: if (vmci_dev->notification_bitmap) { iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); vfree(vmci_dev->notification_bitmap); } err_remove_vmci_dev_g: spin_lock_irq(&vmci_dev_spinlock); vmci_dev_g = NULL; spin_unlock_irq(&vmci_dev_spinlock); err_free_data_buffer: vfree(vmci_dev->data_buffer); /* The rest are managed resources and will be freed by PCI core */ return error; } static void vmci_guest_remove_device(struct pci_dev *pdev) { struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); int vmci_err; dev_dbg(&pdev->dev, "Removing device\n"); atomic_dec(&vmci_num_guest_devices); vmci_qp_guest_endpoints_exit(); vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); if (vmci_err < VMCI_SUCCESS) dev_warn(&pdev->dev, "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); spin_lock_irq(&vmci_dev_spinlock); vmci_dev_g = NULL; spin_unlock_irq(&vmci_dev_spinlock); dev_dbg(&pdev->dev, "Resetting vmci device\n"); iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); /* * Free IRQ and then disable MSI/MSI-X as appropriate. For * MSI-X, we might have multiple vectors, each with their own * IRQ, which we must free too. */ free_irq(vmci_dev->irq, vmci_dev); if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { if (vmci_dev->exclusive_vectors) free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); pci_disable_msix(pdev); } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) { pci_disable_msi(pdev); } tasklet_kill(&vmci_dev->datagram_tasklet); tasklet_kill(&vmci_dev->bm_tasklet); if (vmci_dev->notification_bitmap) { /* * The device reset above cleared the bitmap state of the * device, so we can safely free it here. */ vfree(vmci_dev->notification_bitmap); } vfree(vmci_dev->data_buffer); /* The rest are managed resources and will be freed by PCI core */ } static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, { 0 }, }; MODULE_DEVICE_TABLE(pci, vmci_ids); static struct pci_driver vmci_guest_driver = { .name = KBUILD_MODNAME, .id_table = vmci_ids, .probe = vmci_guest_probe_device, .remove = vmci_guest_remove_device, }; int __init vmci_guest_init(void) { return pci_register_driver(&vmci_guest_driver); } void __exit vmci_guest_exit(void) { pci_unregister_driver(&vmci_guest_driver); }
gpl-2.0
drod2169/android_kernel_lge_bullhead
sound/soc/codecs/wm8750.c
2479
25297
/* * wm8750.c -- WM8750 ALSA SoC audio driver * * Copyright 2005 Openedhand Ltd. * * Author: Richard Purdie <richard@openedhand.com> * * Based on WM8753.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include "wm8750.h" /* * wm8750 register cache * We can't read the WM8750 register space when we * are using 2 wire for device control, so we cache them instead. */ static const struct reg_default wm8750_reg_defaults[] = { { 0, 0x0097 }, { 1, 0x0097 }, { 2, 0x0079 }, { 3, 0x0079 }, { 4, 0x0000 }, { 5, 0x0008 }, { 6, 0x0000 }, { 7, 0x000a }, { 8, 0x0000 }, { 9, 0x0000 }, { 10, 0x00ff }, { 11, 0x00ff }, { 12, 0x000f }, { 13, 0x000f }, { 14, 0x0000 }, { 15, 0x0000 }, { 16, 0x0000 }, { 17, 0x007b }, { 18, 0x0000 }, { 19, 0x0032 }, { 20, 0x0000 }, { 21, 0x00c3 }, { 22, 0x00c3 }, { 23, 0x00c0 }, { 24, 0x0000 }, { 25, 0x0000 }, { 26, 0x0000 }, { 27, 0x0000 }, { 28, 0x0000 }, { 29, 0x0000 }, { 30, 0x0000 }, { 31, 0x0000 }, { 32, 0x0000 }, { 33, 0x0000 }, { 34, 0x0050 }, { 35, 0x0050 }, { 36, 0x0050 }, { 37, 0x0050 }, { 38, 0x0050 }, { 39, 0x0050 }, { 40, 0x0079 }, { 41, 0x0079 }, { 42, 0x0079 }, }; /* codec private data */ struct wm8750_priv { unsigned int sysclk; }; #define wm8750_reset(c) snd_soc_write(c, WM8750_RESET, 0) /* * WM8750 Controls */ static const char *wm8750_bass[] = {"Linear Control", "Adaptive Boost"}; static const char *wm8750_bass_filter[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" }; static const char *wm8750_treble[] = {"8kHz", "4kHz"}; static const char *wm8750_3d_lc[] = {"200Hz", "500Hz"}; static const char *wm8750_3d_uc[] = {"2.2kHz", "1.5kHz"}; static const char *wm8750_3d_func[] = {"Capture", "Playback"}; static const char *wm8750_alc_func[] = {"Off", "Right", "Left", "Stereo"}; static const char *wm8750_ng_type[] = {"Constant PGA Gain", "Mute ADC Output"}; static const char *wm8750_line_mux[] = {"Line 1", "Line 2", "Line 3", "PGA", "Differential"}; static const char *wm8750_pga_sel[] = {"Line 1", "Line 2", "Line 3", "Differential"}; static const char *wm8750_out3[] = {"VREF", "ROUT1 + Vol", "MonoOut", "ROUT1"}; static const char *wm8750_diff_sel[] = {"Line 1", "Line 2"}; static const char *wm8750_adcpol[] = {"Normal", "L Invert", "R Invert", "L + R Invert"}; static const char *wm8750_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"}; static const char *wm8750_mono_mux[] = {"Stereo", "Mono (Left)", "Mono (Right)", "Digital Mono"}; static const struct soc_enum wm8750_enum[] = { SOC_ENUM_SINGLE(WM8750_BASS, 7, 2, wm8750_bass), SOC_ENUM_SINGLE(WM8750_BASS, 6, 2, wm8750_bass_filter), SOC_ENUM_SINGLE(WM8750_TREBLE, 6, 2, wm8750_treble), SOC_ENUM_SINGLE(WM8750_3D, 5, 2, wm8750_3d_lc), SOC_ENUM_SINGLE(WM8750_3D, 6, 2, wm8750_3d_uc), SOC_ENUM_SINGLE(WM8750_3D, 7, 2, wm8750_3d_func), SOC_ENUM_SINGLE(WM8750_ALC1, 7, 4, wm8750_alc_func), SOC_ENUM_SINGLE(WM8750_NGATE, 1, 2, wm8750_ng_type), SOC_ENUM_SINGLE(WM8750_LOUTM1, 0, 5, wm8750_line_mux), SOC_ENUM_SINGLE(WM8750_ROUTM1, 0, 5, wm8750_line_mux), SOC_ENUM_SINGLE(WM8750_LADCIN, 6, 4, wm8750_pga_sel), /* 10 */ SOC_ENUM_SINGLE(WM8750_RADCIN, 6, 4, wm8750_pga_sel), SOC_ENUM_SINGLE(WM8750_ADCTL2, 7, 4, wm8750_out3), SOC_ENUM_SINGLE(WM8750_ADCIN, 8, 2, wm8750_diff_sel), SOC_ENUM_SINGLE(WM8750_ADCDAC, 5, 4, wm8750_adcpol), SOC_ENUM_SINGLE(WM8750_ADCDAC, 1, 4, wm8750_deemph), SOC_ENUM_SINGLE(WM8750_ADCIN, 6, 4, wm8750_mono_mux), /* 16 */ }; static const struct snd_kcontrol_new wm8750_snd_controls[] = { SOC_DOUBLE_R("Capture Volume", WM8750_LINVOL, WM8750_RINVOL, 0, 63, 0), SOC_DOUBLE_R("Capture ZC Switch", WM8750_LINVOL, WM8750_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8750_LINVOL, WM8750_RINVOL, 7, 1, 1), SOC_DOUBLE_R("Headphone Playback ZC Switch", WM8750_LOUT1V, WM8750_ROUT1V, 7, 1, 0), SOC_DOUBLE_R("Speaker Playback ZC Switch", WM8750_LOUT2V, WM8750_ROUT2V, 7, 1, 0), SOC_ENUM("Playback De-emphasis", wm8750_enum[15]), SOC_ENUM("Capture Polarity", wm8750_enum[14]), SOC_SINGLE("Playback 6dB Attenuate", WM8750_ADCDAC, 7, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8750_ADCDAC, 8, 1, 0), SOC_DOUBLE_R("PCM Volume", WM8750_LDAC, WM8750_RDAC, 0, 255, 0), SOC_ENUM("Bass Boost", wm8750_enum[0]), SOC_ENUM("Bass Filter", wm8750_enum[1]), SOC_SINGLE("Bass Volume", WM8750_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8750_TREBLE, 0, 15, 1), SOC_ENUM("Treble Cut-off", wm8750_enum[2]), SOC_SINGLE("3D Switch", WM8750_3D, 0, 1, 0), SOC_SINGLE("3D Volume", WM8750_3D, 1, 15, 0), SOC_ENUM("3D Lower Cut-off", wm8750_enum[3]), SOC_ENUM("3D Upper Cut-off", wm8750_enum[4]), SOC_ENUM("3D Mode", wm8750_enum[5]), SOC_SINGLE("ALC Capture Target Volume", WM8750_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8750_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", wm8750_enum[6]), SOC_SINGLE("ALC Capture ZC Switch", WM8750_ALC2, 7, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8750_ALC2, 0, 15, 0), SOC_SINGLE("ALC Capture Decay Time", WM8750_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack Time", WM8750_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8750_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", wm8750_enum[4]), SOC_SINGLE("ALC Capture NG Switch", WM8750_NGATE, 0, 1, 0), SOC_SINGLE("Left ADC Capture Volume", WM8750_LADC, 0, 255, 0), SOC_SINGLE("Right ADC Capture Volume", WM8750_RADC, 0, 255, 0), SOC_SINGLE("ZC Timeout Switch", WM8750_ADCTL1, 0, 1, 0), SOC_SINGLE("Playback Invert Switch", WM8750_ADCTL1, 1, 1, 0), SOC_SINGLE("Right Speaker Playback Invert Switch", WM8750_ADCTL2, 4, 1, 0), /* Unimplemented */ /* ADCDAC Bit 0 - ADCHPD */ /* ADCDAC Bit 4 - HPOR */ /* ADCTL1 Bit 2,3 - DATSEL */ /* ADCTL1 Bit 4,5 - DMONOMIX */ /* ADCTL1 Bit 6,7 - VSEL */ /* ADCTL2 Bit 2 - LRCM */ /* ADCTL2 Bit 3 - TRI */ /* ADCTL3 Bit 5 - HPFLREN */ /* ADCTL3 Bit 6 - VROI */ /* ADCTL3 Bit 7,8 - ADCLRM */ /* ADCIN Bit 4 - LDCM */ /* ADCIN Bit 5 - RDCM */ SOC_DOUBLE_R("Mic Boost", WM8750_LADCIN, WM8750_RADCIN, 4, 3, 0), SOC_DOUBLE_R("Bypass Left Playback Volume", WM8750_LOUTM1, WM8750_LOUTM2, 4, 7, 1), SOC_DOUBLE_R("Bypass Right Playback Volume", WM8750_ROUTM1, WM8750_ROUTM2, 4, 7, 1), SOC_DOUBLE_R("Bypass Mono Playback Volume", WM8750_MOUTM1, WM8750_MOUTM2, 4, 7, 1), SOC_SINGLE("Mono Playback ZC Switch", WM8750_MOUTV, 7, 1, 0), SOC_DOUBLE_R("Headphone Playback Volume", WM8750_LOUT1V, WM8750_ROUT1V, 0, 127, 0), SOC_DOUBLE_R("Speaker Playback Volume", WM8750_LOUT2V, WM8750_ROUT2V, 0, 127, 0), SOC_SINGLE("Mono Playback Volume", WM8750_MOUTV, 0, 127, 0), }; /* * DAPM Controls */ /* Left Mixer */ static const struct snd_kcontrol_new wm8750_left_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8750_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8750_LOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8750_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8750_LOUTM2, 7, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new wm8750_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8750_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8750_ROUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM8750_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8750_ROUTM2, 7, 1, 0), }; /* Mono Mixer */ static const struct snd_kcontrol_new wm8750_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8750_MOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8750_MOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8750_MOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8750_MOUTM2, 7, 1, 0), }; /* Left Line Mux */ static const struct snd_kcontrol_new wm8750_left_line_controls = SOC_DAPM_ENUM("Route", wm8750_enum[8]); /* Right Line Mux */ static const struct snd_kcontrol_new wm8750_right_line_controls = SOC_DAPM_ENUM("Route", wm8750_enum[9]); /* Left PGA Mux */ static const struct snd_kcontrol_new wm8750_left_pga_controls = SOC_DAPM_ENUM("Route", wm8750_enum[10]); /* Right PGA Mux */ static const struct snd_kcontrol_new wm8750_right_pga_controls = SOC_DAPM_ENUM("Route", wm8750_enum[11]); /* Out 3 Mux */ static const struct snd_kcontrol_new wm8750_out3_controls = SOC_DAPM_ENUM("Route", wm8750_enum[12]); /* Differential Mux */ static const struct snd_kcontrol_new wm8750_diffmux_controls = SOC_DAPM_ENUM("Route", wm8750_enum[13]); /* Mono ADC Mux */ static const struct snd_kcontrol_new wm8750_monomux_controls = SOC_DAPM_ENUM("Route", wm8750_enum[16]); static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, &wm8750_left_mixer_controls[0], ARRAY_SIZE(wm8750_left_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, &wm8750_right_mixer_controls[0], ARRAY_SIZE(wm8750_right_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", WM8750_PWR2, 2, 0, &wm8750_mono_mixer_controls[0], ARRAY_SIZE(wm8750_mono_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 2", WM8750_PWR2, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8750_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 1", WM8750_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 1", WM8750_PWR2, 6, 0, NULL, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8750_PWR2, 7, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8750_PWR2, 8, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", WM8750_PWR1, 1, 0), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8750_PWR1, 2, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8750_PWR1, 3, 0), SND_SOC_DAPM_MUX("Left PGA Mux", WM8750_PWR1, 5, 0, &wm8750_left_pga_controls), SND_SOC_DAPM_MUX("Right PGA Mux", WM8750_PWR1, 4, 0, &wm8750_right_pga_controls), SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, &wm8750_left_line_controls), SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, &wm8750_right_line_controls), SND_SOC_DAPM_MUX("Out3 Mux", SND_SOC_NOPM, 0, 0, &wm8750_out3_controls), SND_SOC_DAPM_PGA("Out 3", WM8750_PWR2, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out 1", WM8750_PWR2, 2, 0, NULL, 0), SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, &wm8750_diffmux_controls), SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, &wm8750_monomux_controls), SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, &wm8750_monomux_controls), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("MONO1"), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_OUTPUT("VREF"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("LINPUT2"), SND_SOC_DAPM_INPUT("LINPUT3"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("RINPUT2"), SND_SOC_DAPM_INPUT("RINPUT3"), }; static const struct snd_soc_dapm_route wm8750_dapm_routes[] = { /* left mixer */ {"Left Mixer", "Playback Switch", "Left DAC"}, {"Left Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Left Mixer", "Right Playback Switch", "Right DAC"}, {"Left Mixer", "Right Bypass Switch", "Right Line Mux"}, /* right mixer */ {"Right Mixer", "Left Playback Switch", "Left DAC"}, {"Right Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Right Mixer", "Playback Switch", "Right DAC"}, {"Right Mixer", "Right Bypass Switch", "Right Line Mux"}, /* left out 1 */ {"Left Out 1", NULL, "Left Mixer"}, {"LOUT1", NULL, "Left Out 1"}, /* left out 2 */ {"Left Out 2", NULL, "Left Mixer"}, {"LOUT2", NULL, "Left Out 2"}, /* right out 1 */ {"Right Out 1", NULL, "Right Mixer"}, {"ROUT1", NULL, "Right Out 1"}, /* right out 2 */ {"Right Out 2", NULL, "Right Mixer"}, {"ROUT2", NULL, "Right Out 2"}, /* mono mixer */ {"Mono Mixer", "Left Playback Switch", "Left DAC"}, {"Mono Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Mono Mixer", "Right Playback Switch", "Right DAC"}, {"Mono Mixer", "Right Bypass Switch", "Right Line Mux"}, /* mono out */ {"Mono Out 1", NULL, "Mono Mixer"}, {"MONO1", NULL, "Mono Out 1"}, /* out 3 */ {"Out3 Mux", "VREF", "VREF"}, {"Out3 Mux", "ROUT1 + Vol", "ROUT1"}, {"Out3 Mux", "ROUT1", "Right Mixer"}, {"Out3 Mux", "MonoOut", "MONO1"}, {"Out 3", NULL, "Out3 Mux"}, {"OUT3", NULL, "Out 3"}, /* Left Line Mux */ {"Left Line Mux", "Line 1", "LINPUT1"}, {"Left Line Mux", "Line 2", "LINPUT2"}, {"Left Line Mux", "Line 3", "LINPUT3"}, {"Left Line Mux", "PGA", "Left PGA Mux"}, {"Left Line Mux", "Differential", "Differential Mux"}, /* Right Line Mux */ {"Right Line Mux", "Line 1", "RINPUT1"}, {"Right Line Mux", "Line 2", "RINPUT2"}, {"Right Line Mux", "Line 3", "RINPUT3"}, {"Right Line Mux", "PGA", "Right PGA Mux"}, {"Right Line Mux", "Differential", "Differential Mux"}, /* Left PGA Mux */ {"Left PGA Mux", "Line 1", "LINPUT1"}, {"Left PGA Mux", "Line 2", "LINPUT2"}, {"Left PGA Mux", "Line 3", "LINPUT3"}, {"Left PGA Mux", "Differential", "Differential Mux"}, /* Right PGA Mux */ {"Right PGA Mux", "Line 1", "RINPUT1"}, {"Right PGA Mux", "Line 2", "RINPUT2"}, {"Right PGA Mux", "Line 3", "RINPUT3"}, {"Right PGA Mux", "Differential", "Differential Mux"}, /* Differential Mux */ {"Differential Mux", "Line 1", "LINPUT1"}, {"Differential Mux", "Line 1", "RINPUT1"}, {"Differential Mux", "Line 2", "LINPUT2"}, {"Differential Mux", "Line 2", "RINPUT2"}, /* Left ADC Mux */ {"Left ADC Mux", "Stereo", "Left PGA Mux"}, {"Left ADC Mux", "Mono (Left)", "Left PGA Mux"}, {"Left ADC Mux", "Digital Mono", "Left PGA Mux"}, /* Right ADC Mux */ {"Right ADC Mux", "Stereo", "Right PGA Mux"}, {"Right ADC Mux", "Mono (Right)", "Right PGA Mux"}, {"Right ADC Mux", "Digital Mono", "Right PGA Mux"}, /* ADC */ {"Left ADC", NULL, "Left ADC Mux"}, {"Right ADC", NULL, "Right ADC Mux"}, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:5; u8 usb:1; }; /* codec hifi mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 1536, 0x6, 0x0}, {11289600, 8000, 1408, 0x16, 0x0}, {18432000, 8000, 2304, 0x7, 0x0}, {16934400, 8000, 2112, 0x17, 0x0}, {12000000, 8000, 1500, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 1024, 0x18, 0x0}, {16934400, 11025, 1536, 0x19, 0x0}, {12000000, 11025, 1088, 0x19, 0x1}, /* 16k */ {12288000, 16000, 768, 0xa, 0x0}, {18432000, 16000, 1152, 0xb, 0x0}, {12000000, 16000, 750, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 512, 0x1a, 0x0}, {16934400, 22050, 768, 0x1b, 0x0}, {12000000, 22050, 544, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 384, 0xc, 0x0}, {18432000, 32000, 576, 0xd, 0x0}, {12000000, 32000, 375, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x10, 0x0}, {16934400, 44100, 384, 0x11, 0x0}, {12000000, 44100, 272, 0x11, 0x1}, /* 48k */ {12288000, 48000, 256, 0x0, 0x0}, {18432000, 48000, 384, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0x1e, 0x0}, {16934400, 88200, 192, 0x1f, 0x0}, {12000000, 88200, 136, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 128, 0xe, 0x0}, {18432000, 96000, 192, 0xf, 0x0}, {12000000, 96000, 125, 0xe, 0x1}, }; static inline int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } printk(KERN_ERR "wm8750: could not get coeff for mclk %d @ rate %d\n", mclk, rate); return -EINVAL; } static int wm8750_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: wm8750->sysclk = freq; return 0; } return -EINVAL; } static int wm8750_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface = 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } snd_soc_write(codec, WM8750_IFACE, iface); return 0; } static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec); u16 iface = snd_soc_read(codec, WM8750_IFACE) & 0x1f3; u16 srate = snd_soc_read(codec, WM8750_SRATE) & 0x1c0; int coeff = get_coeff(wm8750->sysclk, params_rate(params)); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x000c; break; } /* set iface & srate */ snd_soc_write(codec, WM8750_IFACE, iface); if (coeff >= 0) snd_soc_write(codec, WM8750_SRATE, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); return 0; } static int wm8750_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8750_ADCDAC) & 0xfff7; if (mute) snd_soc_write(codec, WM8750_ADCDAC, mute_reg | 0x8); else snd_soc_write(codec, WM8750_ADCDAC, mute_reg); return 0; } static int wm8750_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 pwr_reg = snd_soc_read(codec, WM8750_PWR1) & 0xfe3e; switch (level) { case SND_SOC_BIAS_ON: /* set vmid to 50k and unmute dac */ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { snd_soc_cache_sync(codec); /* Set VMID to 5k */ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1); /* ...and ramp */ msleep(1000); } /* mute dac and set vmid to 500k, enable VREF */ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8750_PWR1, 0x0001); break; } codec->dapm.bias_level = level; return 0; } #define WM8750_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define WM8750_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8750_dai_ops = { .hw_params = wm8750_pcm_hw_params, .digital_mute = wm8750_mute, .set_fmt = wm8750_set_dai_fmt, .set_sysclk = wm8750_set_dai_sysclk, }; static struct snd_soc_dai_driver wm8750_dai = { .name = "wm8750-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8750_RATES, .formats = WM8750_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8750_RATES, .formats = WM8750_FORMATS,}, .ops = &wm8750_dai_ops, }; static int wm8750_suspend(struct snd_soc_codec *codec) { wm8750_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8750_resume(struct snd_soc_codec *codec) { wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8750_probe(struct snd_soc_codec *codec) { int ret; ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP); if (ret < 0) { printk(KERN_ERR "wm8750: failed to set cache I/O: %d\n", ret); return ret; } ret = wm8750_reset(codec); if (ret < 0) { printk(KERN_ERR "wm8750: failed to reset: %d\n", ret); return ret; } /* charge output caps */ wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* set the update bits */ snd_soc_update_bits(codec, WM8750_LDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_LOUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_LOUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_ROUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_LINVOL, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_RINVOL, 0x0100, 0x0100); return ret; } static int wm8750_remove(struct snd_soc_codec *codec) { wm8750_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8750 = { .probe = wm8750_probe, .remove = wm8750_remove, .suspend = wm8750_suspend, .resume = wm8750_resume, .set_bias_level = wm8750_set_bias_level, .controls = wm8750_snd_controls, .num_controls = ARRAY_SIZE(wm8750_snd_controls), .dapm_widgets = wm8750_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets), .dapm_routes = wm8750_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8750_dapm_routes), }; static const struct of_device_id wm8750_of_match[] = { { .compatible = "wlf,wm8750", }, { .compatible = "wlf,wm8987", }, { } }; MODULE_DEVICE_TABLE(of, wm8750_of_match); static const struct regmap_config wm8750_regmap = { .reg_bits = 7, .val_bits = 9, .max_register = WM8750_MOUTV, .reg_defaults = wm8750_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8750_reg_defaults), .cache_type = REGCACHE_RBTREE, }; #if defined(CONFIG_SPI_MASTER) static int wm8750_spi_probe(struct spi_device *spi) { struct wm8750_priv *wm8750; struct regmap *regmap; int ret; wm8750 = devm_kzalloc(&spi->dev, sizeof(struct wm8750_priv), GFP_KERNEL); if (wm8750 == NULL) return -ENOMEM; regmap = devm_regmap_init_spi(spi, &wm8750_regmap); if (IS_ERR(regmap)) return PTR_ERR(regmap); spi_set_drvdata(spi, wm8750); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8750, &wm8750_dai, 1); return ret; } static int wm8750_spi_remove(struct spi_device *spi) { snd_soc_unregister_codec(&spi->dev); return 0; } static const struct spi_device_id wm8750_spi_ids[] = { { "wm8750", 0 }, { "wm8987", 0 }, { }, }; MODULE_DEVICE_TABLE(spi, wm8750_spi_ids); static struct spi_driver wm8750_spi_driver = { .driver = { .name = "wm8750", .owner = THIS_MODULE, .of_match_table = wm8750_of_match, }, .id_table = wm8750_spi_ids, .probe = wm8750_spi_probe, .remove = wm8750_spi_remove, }; #endif /* CONFIG_SPI_MASTER */ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static int wm8750_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8750_priv *wm8750; struct regmap *regmap; int ret; wm8750 = devm_kzalloc(&i2c->dev, sizeof(struct wm8750_priv), GFP_KERNEL); if (wm8750 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8750); regmap = devm_regmap_init_i2c(i2c, &wm8750_regmap); if (IS_ERR(regmap)) return PTR_ERR(regmap); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8750, &wm8750_dai, 1); return ret; } static int wm8750_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8750_i2c_id[] = { { "wm8750", 0 }, { "wm8987", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8750_i2c_id); static struct i2c_driver wm8750_i2c_driver = { .driver = { .name = "wm8750", .owner = THIS_MODULE, .of_match_table = wm8750_of_match, }, .probe = wm8750_i2c_probe, .remove = wm8750_i2c_remove, .id_table = wm8750_i2c_id, }; #endif static int __init wm8750_modinit(void) { int ret = 0; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8750_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register wm8750 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8750_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register wm8750 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8750_modinit); static void __exit wm8750_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8750_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8750_spi_driver); #endif } module_exit(wm8750_exit); MODULE_DESCRIPTION("ASoC WM8750 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL");
gpl-2.0
BanBxda/Sense_4.3
drivers/s390/scsi/zfcp_ccw.c
3503
7210
/* * zfcp device driver * * Registration and callback for the s390 common I/O layer. * * Copyright IBM Corporation 2002, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include "zfcp_ext.h" #include "zfcp_reqlist.h" #define ZFCP_MODEL_PRIV 0x4 static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock); struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev) { struct zfcp_adapter *adapter; unsigned long flags; spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags); adapter = dev_get_drvdata(&cdev->dev); if (adapter) kref_get(&adapter->ref); spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); return adapter; } void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter) { unsigned long flags; spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags); kref_put(&adapter->ref, zfcp_adapter_release); spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); } static int zfcp_ccw_activate(struct ccw_device *cdev) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccresu2"); zfcp_erp_wait(adapter); flush_work(&adapter->scan_work); zfcp_ccw_adapter_put(adapter); return 0; } static struct ccw_device_id zfcp_ccw_device_id[] = { { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) }, {}, }; MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); /** * zfcp_ccw_priv_sch - check if subchannel is privileged * @adapter: Adapter/Subchannel to check */ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) { return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV; } /** * zfcp_ccw_probe - probe function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer for each FCP * device found on the current system. This is only a stub to make cio * work: To only allocate adapter resources for devices actually used, * the allocation is deferred to the first call to ccw_set_online. */ static int zfcp_ccw_probe(struct ccw_device *cdev) { return 0; } /** * zfcp_ccw_remove - remove function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and removes an adapter * from the system. Task of this function is to get rid of all units and * ports that belong to this adapter. And in addition all resources of this * adapter will be freed too. */ static void zfcp_ccw_remove(struct ccw_device *cdev) { struct zfcp_adapter *adapter; struct zfcp_port *port, *p; struct zfcp_unit *unit, *u; LIST_HEAD(unit_remove_lh); LIST_HEAD(port_remove_lh); ccw_device_set_offline(cdev); adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return; write_lock_irq(&adapter->port_list_lock); list_for_each_entry_safe(port, p, &adapter->port_list, list) { write_lock(&port->unit_list_lock); list_for_each_entry_safe(unit, u, &port->unit_list, list) list_move(&unit->list, &unit_remove_lh); write_unlock(&port->unit_list_lock); list_move(&port->list, &port_remove_lh); } write_unlock_irq(&adapter->port_list_lock); zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ list_for_each_entry_safe(unit, u, &unit_remove_lh, list) zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); list_for_each_entry_safe(port, p, &port_remove_lh, list) zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); zfcp_adapter_unregister(adapter); } /** * zfcp_ccw_set_online - set_online function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and sets an * adapter into state online. The first call will allocate all * adapter resources that will be retained until the device is removed * via zfcp_ccw_remove. * * Setting an fcp device online means that it will be registered with * the SCSI stack, that the QDIO queues will be set up and that the * adapter will be opened. */ static int zfcp_ccw_set_online(struct ccw_device *cdev) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) { adapter = zfcp_adapter_enqueue(cdev); if (IS_ERR(adapter)) { dev_err(&cdev->dev, "Setting up data structures for the " "FCP adapter failed\n"); return PTR_ERR(adapter); } kref_get(&adapter->ref); } /* initialize request counter */ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); adapter->req_no = 0; zfcp_ccw_activate(cdev); zfcp_ccw_adapter_put(adapter); return 0; } /** * zfcp_ccw_set_offline - set_offline function of zfcp driver * @cdev: pointer to belonging ccw device * * This function gets called by the common i/o layer and sets an adapter * into state offline. */ static int zfcp_ccw_set_offline(struct ccw_device *cdev) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1"); zfcp_erp_wait(adapter); zfcp_ccw_adapter_put(adapter); return 0; } /** * zfcp_ccw_notify - ccw notify function * @cdev: pointer to belonging ccw device * @event: indicates if adapter was detached or attached * * This function gets called by the common i/o layer if an adapter has gone * or reappeared. */ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 1; switch (event) { case CIO_GONE: dev_warn(&cdev->dev, "The FCP device has been detached\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); break; case CIO_NO_PATH: dev_warn(&cdev->dev, "The CHPID for the FCP device is offline\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); break; case CIO_OPER: dev_info(&cdev->dev, "The FCP device is operational again\n"); zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccnoti4"); break; case CIO_BOXED: dev_warn(&cdev->dev, "The FCP device did not respond within " "the specified time\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5"); break; } zfcp_ccw_adapter_put(adapter); return 1; } /** * zfcp_ccw_shutdown - handle shutdown from cio * @cdev: device for adapter to shutdown. */ static void zfcp_ccw_shutdown(struct ccw_device *cdev) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return; zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1"); zfcp_erp_wait(adapter); zfcp_erp_thread_kill(adapter); zfcp_ccw_adapter_put(adapter); } struct ccw_driver zfcp_ccw_driver = { .driver = { .owner = THIS_MODULE, .name = "zfcp", }, .ids = zfcp_ccw_device_id, .probe = zfcp_ccw_probe, .remove = zfcp_ccw_remove, .set_online = zfcp_ccw_set_online, .set_offline = zfcp_ccw_set_offline, .notify = zfcp_ccw_notify, .shutdown = zfcp_ccw_shutdown, .freeze = zfcp_ccw_set_offline, .thaw = zfcp_ccw_activate, .restore = zfcp_ccw_activate, };
gpl-2.0
victor2002/a770k_kernel
sound/core/seq/seq_system.c
4015
5379
/* * ALSA sequencer System services Client * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include "seq_system.h" #include "seq_timer.h" #include "seq_queue.h" /* internal client that provide system services, access to timer etc. */ /* * Port "Timer" * - send tempo /start/stop etc. events to this port to manipulate the * queue's timer. The queue address is specified in * data.queue.queue. * - this port supports subscription. The received timer events are * broadcasted to all subscribed clients. The modified tempo * value is stored on data.queue.value. * The modifier client/port is not send. * * Port "Announce" * - does not receive message * - supports supscription. For each client or port attaching to or * detaching from the system an announcement is send to the subscribed * clients. * * Idea: the subscription mechanism might also work handy for distributing * synchronisation and timing information. In this case we would ideally have * a list of subscribers for each type of sync (time, tick), for each timing * queue. * * NOTE: the queue to be started, stopped, etc. must be specified * in data.queue.addr.queue field. queue is used only for * scheduling, and no longer referred as affected queue. * They are used only for timer broadcast (see above). * -- iwai */ /* client id of our system client */ static int sysclient = -1; /* port id numbers for this client */ static int announce_port = -1; /* fill standard header data, source port & channel are filled in */ static int setheader(struct snd_seq_event * ev, int client, int port) { if (announce_port < 0) return -ENODEV; memset(ev, 0, sizeof(struct snd_seq_event)); ev->flags &= ~SNDRV_SEQ_EVENT_LENGTH_MASK; ev->flags |= SNDRV_SEQ_EVENT_LENGTH_FIXED; ev->source.client = sysclient; ev->source.port = announce_port; ev->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; /* fill data */ /*ev->data.addr.queue = SNDRV_SEQ_ADDRESS_UNKNOWN;*/ ev->data.addr.client = client; ev->data.addr.port = port; return 0; } /* entry points for broadcasting system events */ void snd_seq_system_broadcast(int client, int port, int type) { struct snd_seq_event ev; if (setheader(&ev, client, port) < 0) return; ev.type = type; snd_seq_kernel_client_dispatch(sysclient, &ev, 0, 0); } /* entry points for broadcasting system events */ int snd_seq_system_notify(int client, int port, struct snd_seq_event *ev) { ev->flags = SNDRV_SEQ_EVENT_LENGTH_FIXED; ev->source.client = sysclient; ev->source.port = announce_port; ev->dest.client = client; ev->dest.port = port; return snd_seq_kernel_client_dispatch(sysclient, ev, 0, 0); } /* call-back handler for timer events */ static int event_input_timer(struct snd_seq_event * ev, int direct, void *private_data, int atomic, int hop) { return snd_seq_control_queue(ev, atomic, hop); } /* register our internal client */ int __init snd_seq_system_client_init(void) { struct snd_seq_port_callback pcallbacks; struct snd_seq_port_info *port; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; memset(&pcallbacks, 0, sizeof(pcallbacks)); pcallbacks.owner = THIS_MODULE; pcallbacks.event_input = event_input_timer; /* register client */ sysclient = snd_seq_create_kernel_client(NULL, 0, "System"); /* register timer */ strcpy(port->name, "Timer"); port->capability = SNDRV_SEQ_PORT_CAP_WRITE; /* accept queue control */ port->capability |= SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ; /* for broadcast */ port->kernel = &pcallbacks; port->type = 0; port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT; port->addr.client = sysclient; port->addr.port = SNDRV_SEQ_PORT_SYSTEM_TIMER; snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port); /* register announcement port */ strcpy(port->name, "Announce"); port->capability = SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ; /* for broadcast only */ port->kernel = NULL; port->type = 0; port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT; port->addr.client = sysclient; port->addr.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE; snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port); announce_port = port->addr.port; kfree(port); return 0; } /* unregister our internal client */ void __exit snd_seq_system_client_done(void) { int oldsysclient = sysclient; if (oldsysclient >= 0) { sysclient = -1; announce_port = -1; snd_seq_delete_kernel_client(oldsysclient); } }
gpl-2.0
VincenzoDo/my-kernel
net/netfilter/xt_NETMAP.c
4527
4795
/* * (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk> * Copyright (c) 2011 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ipv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_nat.h> static unsigned int netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_range *range = par->targinfo; struct nf_nat_range newrange; struct nf_conn *ct; enum ip_conntrack_info ctinfo; union nf_inet_addr new_addr, netmask; unsigned int i; ct = nf_ct_get(skb, &ctinfo); for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++) netmask.ip6[i] = ~(range->min_addr.ip6[i] ^ range->max_addr.ip6[i]); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT) new_addr.in6 = ipv6_hdr(skb)->daddr; else new_addr.in6 = ipv6_hdr(skb)->saddr; for (i = 0; i < ARRAY_SIZE(new_addr.ip6); i++) { new_addr.ip6[i] &= ~netmask.ip6[i]; new_addr.ip6[i] |= range->min_addr.ip6[i] & netmask.ip6[i]; } newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr = new_addr; newrange.max_addr = new_addr; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); } static int netmap_tg6_checkentry(const struct xt_tgchk_param *par) { const struct nf_nat_range *range = par->targinfo; if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) return -EINVAL; return 0; } static unsigned int netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 new_ip, netmask; const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; struct nf_nat_range newrange; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_POST_ROUTING || par->hooknum == NF_INET_LOCAL_OUT || par->hooknum == NF_INET_LOCAL_IN); ct = nf_ct_get(skb, &ctinfo); netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT) new_ip = ip_hdr(skb)->daddr & ~netmask; else new_ip = ip_hdr(skb)->saddr & ~netmask; new_ip |= mr->range[0].min_ip & netmask; memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = new_ip; newrange.max_addr.ip = new_ip; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); } static int netmap_tg4_check(const struct xt_tgchk_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } if (mr->rangesize != 1) { pr_debug("bad rangesize %u.\n", mr->rangesize); return -EINVAL; } return 0; } static struct xt_target netmap_tg_reg[] __read_mostly = { { .name = "NETMAP", .family = NFPROTO_IPV6, .revision = 0, .target = netmap_tg6, .targetsize = sizeof(struct nf_nat_range), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .checkentry = netmap_tg6_checkentry, .me = THIS_MODULE, }, { .name = "NETMAP", .family = NFPROTO_IPV4, .revision = 0, .target = netmap_tg4, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .checkentry = netmap_tg4_check, .me = THIS_MODULE, }, }; static int __init netmap_tg_init(void) { return xt_register_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg)); } static void netmap_tg_exit(void) { xt_unregister_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg)); } module_init(netmap_tg_init); module_exit(netmap_tg_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of subnets"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_ALIAS("ip6t_NETMAP"); MODULE_ALIAS("ipt_NETMAP");
gpl-2.0
philenotfound/beagleboneblack-kernel
net/netfilter/xt_NETMAP.c
4527
4795
/* * (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk> * Copyright (c) 2011 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ipv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_nat.h> static unsigned int netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_range *range = par->targinfo; struct nf_nat_range newrange; struct nf_conn *ct; enum ip_conntrack_info ctinfo; union nf_inet_addr new_addr, netmask; unsigned int i; ct = nf_ct_get(skb, &ctinfo); for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++) netmask.ip6[i] = ~(range->min_addr.ip6[i] ^ range->max_addr.ip6[i]); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT) new_addr.in6 = ipv6_hdr(skb)->daddr; else new_addr.in6 = ipv6_hdr(skb)->saddr; for (i = 0; i < ARRAY_SIZE(new_addr.ip6); i++) { new_addr.ip6[i] &= ~netmask.ip6[i]; new_addr.ip6[i] |= range->min_addr.ip6[i] & netmask.ip6[i]; } newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr = new_addr; newrange.max_addr = new_addr; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); } static int netmap_tg6_checkentry(const struct xt_tgchk_param *par) { const struct nf_nat_range *range = par->targinfo; if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) return -EINVAL; return 0; } static unsigned int netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 new_ip, netmask; const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; struct nf_nat_range newrange; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_POST_ROUTING || par->hooknum == NF_INET_LOCAL_OUT || par->hooknum == NF_INET_LOCAL_IN); ct = nf_ct_get(skb, &ctinfo); netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT) new_ip = ip_hdr(skb)->daddr & ~netmask; else new_ip = ip_hdr(skb)->saddr & ~netmask; new_ip |= mr->range[0].min_ip & netmask; memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = new_ip; newrange.max_addr.ip = new_ip; newrange.min_proto = mr->range[0].min; newrange.max_proto = mr->range[0].max; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); } static int netmap_tg4_check(const struct xt_tgchk_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } if (mr->rangesize != 1) { pr_debug("bad rangesize %u.\n", mr->rangesize); return -EINVAL; } return 0; } static struct xt_target netmap_tg_reg[] __read_mostly = { { .name = "NETMAP", .family = NFPROTO_IPV6, .revision = 0, .target = netmap_tg6, .targetsize = sizeof(struct nf_nat_range), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .checkentry = netmap_tg6_checkentry, .me = THIS_MODULE, }, { .name = "NETMAP", .family = NFPROTO_IPV4, .revision = 0, .target = netmap_tg4, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .checkentry = netmap_tg4_check, .me = THIS_MODULE, }, }; static int __init netmap_tg_init(void) { return xt_register_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg)); } static void netmap_tg_exit(void) { xt_unregister_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg)); } module_init(netmap_tg_init); module_exit(netmap_tg_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of subnets"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_ALIAS("ip6t_NETMAP"); MODULE_ALIAS("ipt_NETMAP");
gpl-2.0
garwynn/D710VMUB_FJ12_Kernel
ipc/ipcns_notifier.c
13231
2265
/* * linux/ipc/ipcns_notifier.c * Copyright (C) 2007 BULL SA. Nadia Derbey * * Notification mechanism for ipc namespaces: * The callback routine registered in the memory chain invokes the ipcns * notifier chain with the IPCNS_MEMCHANGED event. * Each callback routine registered in the ipcns namespace recomputes msgmni * for the owning namespace. */ #include <linux/msg.h> #include <linux/rcupdate.h> #include <linux/notifier.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include "util.h" static BLOCKING_NOTIFIER_HEAD(ipcns_chain); static int ipcns_callback(struct notifier_block *self, unsigned long action, void *arg) { struct ipc_namespace *ns; switch (action) { case IPCNS_MEMCHANGED: /* amount of lowmem has changed */ case IPCNS_CREATED: case IPCNS_REMOVED: /* * It's time to recompute msgmni */ ns = container_of(self, struct ipc_namespace, ipcns_nb); /* * No need to get a reference on the ns: the 1st job of * free_ipc_ns() is to unregister the callback routine. * blocking_notifier_chain_unregister takes the wr lock to do * it. * When this callback routine is called the rd lock is held by * blocking_notifier_call_chain. * So the ipc ns cannot be freed while we are here. */ recompute_msgmni(ns); break; default: break; } return NOTIFY_OK; } int register_ipcns_notifier(struct ipc_namespace *ns) { int rc; memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); ns->ipcns_nb.notifier_call = ipcns_callback; ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); if (!rc) ns->auto_msgmni = 1; return rc; } int cond_register_ipcns_notifier(struct ipc_namespace *ns) { int rc; memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); ns->ipcns_nb.notifier_call = ipcns_callback; ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; rc = blocking_notifier_chain_cond_register(&ipcns_chain, &ns->ipcns_nb); if (!rc) ns->auto_msgmni = 1; return rc; } void unregister_ipcns_notifier(struct ipc_namespace *ns) { blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb); ns->auto_msgmni = 0; } int ipcns_notify(unsigned long val) { return blocking_notifier_call_chain(&ipcns_chain, val, NULL); }
gpl-2.0
Constellation/linux-3.6.5
drivers/watchdog/wm831x_wdt.c
176
7802
/* * Watchdog driver for the wm831x PMICs * * Copyright (C) 2009 Wolfson Microelectronics * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #include <linux/gpio.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/watchdog.h> static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); struct wm831x_wdt_drvdata { struct watchdog_device wdt; struct wm831x *wm831x; struct mutex lock; int update_gpio; int update_state; }; /* We can't use the sub-second values here but they're included * for completeness. */ static struct { unsigned int time; /* Seconds */ u16 val; /* WDOG_TO value */ } wm831x_wdt_cfgs[] = { { 1, 2 }, { 2, 3 }, { 4, 4 }, { 8, 5 }, { 16, 6 }, { 32, 7 }, { 33, 7 }, /* Actually 32.768s so include both, others round down */ }; static int wm831x_wdt_start(struct watchdog_device *wdt_dev) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret; mutex_lock(&driver_data->lock); ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, WM831X_WDOG_ENA, WM831X_WDOG_ENA); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } mutex_unlock(&driver_data->lock); return ret; } static int wm831x_wdt_stop(struct watchdog_device *wdt_dev) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret; mutex_lock(&driver_data->lock); ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, WM831X_WDOG_ENA, 0); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } mutex_unlock(&driver_data->lock); return ret; } static int wm831x_wdt_ping(struct watchdog_device *wdt_dev) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret; u16 reg; mutex_lock(&driver_data->lock); if (driver_data->update_gpio) { gpio_set_value_cansleep(driver_data->update_gpio, driver_data->update_state); driver_data->update_state = !driver_data->update_state; ret = 0; goto out; } reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); if (!(reg & WM831X_WDOG_RST_SRC)) { dev_err(wm831x->dev, "Hardware watchdog update unsupported\n"); ret = -EINVAL; goto out; } reg |= WM831X_WDOG_RESET; ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } out: mutex_unlock(&driver_data->lock); return ret; } static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev, unsigned int timeout) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret, i; for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++) if (wm831x_wdt_cfgs[i].time == timeout) break; if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) return -EINVAL; ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, WM831X_WDOG_TO_MASK, wm831x_wdt_cfgs[i].val); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } wdt_dev->timeout = timeout; return ret; } static const struct watchdog_info wm831x_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "WM831x Watchdog", }; static const struct watchdog_ops wm831x_wdt_ops = { .owner = THIS_MODULE, .start = wm831x_wdt_start, .stop = wm831x_wdt_stop, .ping = wm831x_wdt_ping, .set_timeout = wm831x_wdt_set_timeout, }; static int __devinit wm831x_wdt_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *chip_pdata; struct wm831x_watchdog_pdata *pdata; struct wm831x_wdt_drvdata *driver_data; struct watchdog_device *wm831x_wdt; int reg, ret, i; ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG); if (ret < 0) { dev_err(wm831x->dev, "Failed to read watchdog status: %d\n", ret); goto err; } reg = ret; if (reg & WM831X_WDOG_DEBUG) dev_warn(wm831x->dev, "Watchdog is paused\n"); driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data), GFP_KERNEL); if (!driver_data) { dev_err(wm831x->dev, "Unable to alloacate watchdog device\n"); ret = -ENOMEM; goto err; } mutex_init(&driver_data->lock); driver_data->wm831x = wm831x; wm831x_wdt = &driver_data->wdt; wm831x_wdt->info = &wm831x_wdt_info; wm831x_wdt->ops = &wm831x_wdt_ops; watchdog_set_nowayout(wm831x_wdt, nowayout); watchdog_set_drvdata(wm831x_wdt, driver_data); reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); reg &= WM831X_WDOG_TO_MASK; for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++) if (wm831x_wdt_cfgs[i].val == reg) break; if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) dev_warn(wm831x->dev, "Unknown watchdog timeout: %x\n", reg); else wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time; /* Apply any configuration */ if (pdev->dev.parent->platform_data) { chip_pdata = pdev->dev.parent->platform_data; pdata = chip_pdata->watchdog; } else { pdata = NULL; } if (pdata) { reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK | WM831X_WDOG_RST_SRC); reg |= pdata->primary << WM831X_WDOG_PRIMACT_SHIFT; reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT; reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT; if (pdata->update_gpio) { ret = gpio_request_one(pdata->update_gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, "Watchdog update"); if (ret < 0) { dev_err(wm831x->dev, "Failed to request update GPIO: %d\n", ret); goto err; } driver_data->update_gpio = pdata->update_gpio; /* Make sure the watchdog takes hardware updates */ reg |= WM831X_WDOG_RST_SRC; } ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); goto err_gpio; } } ret = watchdog_register_device(&driver_data->wdt); if (ret != 0) { dev_err(wm831x->dev, "watchdog_register_device() failed: %d\n", ret); goto err_gpio; } dev_set_drvdata(&pdev->dev, driver_data); return 0; err_gpio: if (driver_data->update_gpio) gpio_free(driver_data->update_gpio); err: return ret; } static int __devexit wm831x_wdt_remove(struct platform_device *pdev) { struct wm831x_wdt_drvdata *driver_data = dev_get_drvdata(&pdev->dev); watchdog_unregister_device(&driver_data->wdt); if (driver_data->update_gpio) gpio_free(driver_data->update_gpio); return 0; } static struct platform_driver wm831x_wdt_driver = { .probe = wm831x_wdt_probe, .remove = __devexit_p(wm831x_wdt_remove), .driver = { .name = "wm831x-watchdog", }, }; module_platform_driver(wm831x_wdt_driver); MODULE_AUTHOR("Mark Brown"); MODULE_DESCRIPTION("WM831x Watchdog"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-watchdog");
gpl-2.0
milokim/linux
drivers/cpuidle/coupled.c
176
26601
/* * coupled.c - helper functions to enter the same idle state on multiple cpus * * Copyright (c) 2011 Google, Inc. * * Author: Colin Cross <ccross@android.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "cpuidle.h" /** * DOC: Coupled cpuidle states * * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the * cpus cannot be independently powered down, either due to * sequencing restrictions (on Tegra 2, cpu 0 must be the last to * power down), or due to HW bugs (on OMAP4460, a cpu powering up * will corrupt the gic state unless the other cpu runs a work * around). Each cpu has a power state that it can enter without * coordinating with the other cpu (usually Wait For Interrupt, or * WFI), and one or more "coupled" power states that affect blocks * shared between the cpus (L2 cache, interrupt controller, and * sometimes the whole SoC). Entering a coupled power state must * be tightly controlled on both cpus. * * This file implements a solution, where each cpu will wait in the * WFI state until all cpus are ready to enter a coupled state, at * which point the coupled state function will be called on all * cpus at approximately the same time. * * Once all cpus are ready to enter idle, they are woken by an smp * cross call. At this point, there is a chance that one of the * cpus will find work to do, and choose not to enter idle. A * final pass is needed to guarantee that all cpus will call the * power state enter function at the same time. During this pass, * each cpu will increment the ready counter, and continue once the * ready counter matches the number of online coupled cpus. If any * cpu exits idle, the other cpus will decrement their counter and * retry. * * requested_state stores the deepest coupled idle state each cpu * is ready for. It is assumed that the states are indexed from * shallowest (highest power, lowest exit latency) to deepest * (lowest power, highest exit latency). The requested_state * variable is not locked. It is only written from the cpu that * it stores (or by the on/offlining cpu if that cpu is offline), * and only read after all the cpus are ready for the coupled idle * state are are no longer updating it. * * Three atomic counters are used. alive_count tracks the number * of cpus in the coupled set that are currently or soon will be * online. waiting_count tracks the number of cpus that are in * the waiting loop, in the ready loop, or in the coupled idle state. * ready_count tracks the number of cpus that are in the ready loop * or in the coupled idle state. * * To use coupled cpuidle states, a cpuidle driver must: * * Set struct cpuidle_device.coupled_cpus to the mask of all * coupled cpus, usually the same as cpu_possible_mask if all cpus * are part of the same cluster. The coupled_cpus mask must be * set in the struct cpuidle_device for each cpu. * * Set struct cpuidle_device.safe_state to a state that is not a * coupled state. This is usually WFI. * * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each * state that affects multiple cpus. * * Provide a struct cpuidle_state.enter function for each state * that affects multiple cpus. This function is guaranteed to be * called on all cpus at approximately the same time. The driver * should ensure that the cpus all abort together if any cpu tries * to abort once the function is called. The function should return * with interrupts still disabled. */ /** * struct cpuidle_coupled - data for set of cpus that share a coupled idle state * @coupled_cpus: mask of cpus that are part of the coupled set * @requested_state: array of requested states for cpus in the coupled set * @ready_waiting_counts: combined count of cpus in ready or waiting loops * @online_count: count of cpus that are online * @refcnt: reference count of cpuidle devices that are using this struct * @prevent: flag to prevent coupled idle while a cpu is hotplugging */ struct cpuidle_coupled { cpumask_t coupled_cpus; int requested_state[NR_CPUS]; atomic_t ready_waiting_counts; atomic_t abort_barrier; int online_count; int refcnt; int prevent; }; #define WAITING_BITS 16 #define MAX_WAITING_CPUS (1 << WAITING_BITS) #define WAITING_MASK (MAX_WAITING_CPUS - 1) #define READY_MASK (~WAITING_MASK) #define CPUIDLE_COUPLED_NOT_IDLE (-1) static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); /* * The cpuidle_coupled_poke_pending mask is used to avoid calling * __smp_call_function_single with the per cpu call_single_data struct already * in use. This prevents a deadlock where two cpus are waiting for each others * call_single_data struct to be available */ static cpumask_t cpuidle_coupled_poke_pending; /* * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked * once to minimize entering the ready loop with a poke pending, which would * require aborting and retrying. */ static cpumask_t cpuidle_coupled_poked; /** * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus * @dev: cpuidle_device of the calling cpu * @a: atomic variable to hold the barrier * * No caller to this function will return from this function until all online * cpus in the same coupled group have called this function. Once any caller * has returned from this function, the barrier is immediately available for * reuse. * * The atomic variable must be initialized to 0 before any cpu calls * this function, will be reset to 0 before any cpu returns from this function. * * Must only be called from within a coupled idle state handler * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). * * Provides full smp barrier semantics before and after calling. */ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) { int n = dev->coupled->online_count; smp_mb__before_atomic(); atomic_inc(a); while (atomic_read(a) < n) cpu_relax(); if (atomic_inc_return(a) == n * 2) { atomic_set(a, 0); return; } while (atomic_read(a) > n) cpu_relax(); } /** * cpuidle_state_is_coupled - check if a state is part of a coupled set * @drv: struct cpuidle_driver for the platform * @state: index of the target state in drv->states * * Returns true if the target state is coupled with cpus besides this one */ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) { return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; } /** * cpuidle_coupled_state_verify - check if the coupled states are correctly set. * @drv: struct cpuidle_driver for the platform * * Returns 0 for valid state values, a negative error code otherwise: * * -EINVAL if any coupled state(safe_state_index) is wrongly set. */ int cpuidle_coupled_state_verify(struct cpuidle_driver *drv) { int i; for (i = drv->state_count - 1; i >= 0; i--) { if (cpuidle_state_is_coupled(drv, i) && (drv->safe_state_index == i || drv->safe_state_index < 0 || drv->safe_state_index >= drv->state_count)) return -EINVAL; } return 0; } /** * cpuidle_coupled_set_ready - mark a cpu as ready * @coupled: the struct coupled that contains the current cpu */ static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) { atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); } /** * cpuidle_coupled_set_not_ready - mark a cpu as not ready * @coupled: the struct coupled that contains the current cpu * * Decrements the ready counter, unless the ready (and thus the waiting) counter * is equal to the number of online cpus. Prevents a race where one cpu * decrements the waiting counter and then re-increments it just before another * cpu has decremented its ready counter, leading to the ready counter going * down from the number of online cpus without going through the coupled idle * state. * * Returns 0 if the counter was decremented successfully, -EINVAL if the ready * counter was equal to the number of online cpus. */ static inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) { int all; int ret; all = coupled->online_count | (coupled->online_count << WAITING_BITS); ret = atomic_add_unless(&coupled->ready_waiting_counts, -MAX_WAITING_CPUS, all); return ret ? 0 : -EINVAL; } /** * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready * @coupled: the struct coupled that contains the current cpu * * Returns true if all of the cpus in a coupled set are out of the ready loop. */ static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) { int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; return r == 0; } /** * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready * @coupled: the struct coupled that contains the current cpu * * Returns true if all cpus coupled to this target state are in the ready loop */ static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) { int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; return r == coupled->online_count; } /** * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting * @coupled: the struct coupled that contains the current cpu * * Returns true if all cpus coupled to this target state are in the wait loop */ static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) { int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; return w == coupled->online_count; } /** * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting * @coupled: the struct coupled that contains the current cpu * * Returns true if all of the cpus in a coupled set are out of the waiting loop. */ static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) { int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; return w == 0; } /** * cpuidle_coupled_get_state - determine the deepest idle state * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * * Returns the deepest idle state that all coupled cpus can enter */ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, struct cpuidle_coupled *coupled) { int i; int state = INT_MAX; /* * Read barrier ensures that read of requested_state is ordered after * reads of ready_count. Matches the write barriers * cpuidle_set_state_waiting. */ smp_rmb(); for_each_cpu(i, &coupled->coupled_cpus) if (cpu_online(i) && coupled->requested_state[i] < state) state = coupled->requested_state[i]; return state; } static void cpuidle_coupled_handle_poke(void *info) { int cpu = (unsigned long)info; cpumask_set_cpu(cpu, &cpuidle_coupled_poked); cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); } /** * cpuidle_coupled_poke - wake up a cpu that may be waiting * @cpu: target cpu * * Ensures that the target cpu exits it's waiting idle state (if it is in it) * and will see updates to waiting_count before it re-enters it's waiting idle * state. * * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu * either has or will soon have a pending IPI that will wake it out of idle, * or it is currently processing the IPI and is not in idle. */ static void cpuidle_coupled_poke(int cpu) { struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) smp_call_function_single_async(cpu, csd); } /** * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * * Calls cpuidle_coupled_poke on all other online cpus. */ static void cpuidle_coupled_poke_others(int this_cpu, struct cpuidle_coupled *coupled) { int cpu; for_each_cpu(cpu, &coupled->coupled_cpus) if (cpu != this_cpu && cpu_online(cpu)) cpuidle_coupled_poke(cpu); } /** * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * * Updates the requested idle state for the specified cpuidle device. * Returns the number of waiting cpus. */ static int cpuidle_coupled_set_waiting(int cpu, struct cpuidle_coupled *coupled, int next_state) { coupled->requested_state[cpu] = next_state; /* * The atomic_inc_return provides a write barrier to order the write * to requested_state with the later write that increments ready_count. */ return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; } /** * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop * @dev: struct cpuidle_device for this cpu * @coupled: the struct coupled that contains the current cpu * * Removes the requested idle state for the specified cpuidle device. */ static void cpuidle_coupled_set_not_waiting(int cpu, struct cpuidle_coupled *coupled) { /* * Decrementing waiting count can race with incrementing it in * cpuidle_coupled_set_waiting, but that's OK. Worst case, some * cpus will increment ready_count and then spin until they * notice that this cpu has cleared it's requested_state. */ atomic_dec(&coupled->ready_waiting_counts); coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; } /** * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop * @cpu: the current cpu * @coupled: the struct coupled that contains the current cpu * * Marks this cpu as no longer in the ready and waiting loops. Decrements * the waiting count first to prevent another cpu looping back in and seeing * this cpu as waiting just before it exits idle. */ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) { cpuidle_coupled_set_not_waiting(cpu, coupled); atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); } /** * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed * @cpu - this cpu * * Turns on interrupts and spins until any outstanding poke interrupts have * been processed and the poke bit has been cleared. * * Other interrupts may also be processed while interrupts are enabled, so * need_resched() must be tested after this function returns to make sure * the interrupt didn't schedule work that should take the cpu out of idle. * * Returns 0 if no poke was pending, 1 if a poke was cleared. */ static int cpuidle_coupled_clear_pokes(int cpu) { if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) return 0; local_irq_enable(); while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) cpu_relax(); local_irq_disable(); return 1; } static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) { cpumask_t cpus; int ret; cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); return ret; } /** * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus * @dev: struct cpuidle_device for the current cpu * @drv: struct cpuidle_driver for the platform * @next_state: index of the requested state in drv->states * * Coordinate with coupled cpus to enter the target state. This is a two * stage process. In the first stage, the cpus are operating independently, * and may call into cpuidle_enter_state_coupled at completely different times. * To save as much power as possible, the first cpus to call this function will * go to an intermediate state (the cpuidle_device's safe state), and wait for * all the other cpus to call this function. Once all coupled cpus are idle, * the second stage will start. Each coupled cpu will spin until all cpus have * guaranteed that they will call the target_state. * * This function must be called with interrupts disabled. It may enable * interrupts while preparing for idle, and it will always return with * interrupts enabled. */ int cpuidle_enter_state_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int next_state) { int entered_state = -1; struct cpuidle_coupled *coupled = dev->coupled; int w; if (!coupled) return -EINVAL; while (coupled->prevent) { cpuidle_coupled_clear_pokes(dev->cpu); if (need_resched()) { local_irq_enable(); return entered_state; } entered_state = cpuidle_enter_state(dev, drv, drv->safe_state_index); local_irq_disable(); } /* Read barrier ensures online_count is read after prevent is cleared */ smp_rmb(); reset: cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); /* * If this is the last cpu to enter the waiting state, poke * all the other cpus out of their waiting state so they can * enter a deeper state. This can race with one of the cpus * exiting the waiting state due to an interrupt and * decrementing waiting_count, see comment below. */ if (w == coupled->online_count) { cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); cpuidle_coupled_poke_others(dev->cpu, coupled); } retry: /* * Wait for all coupled cpus to be idle, using the deepest state * allowed for a single cpu. If this was not the poking cpu, wait * for at least one poke before leaving to avoid a race where * two cpus could arrive at the waiting loop at the same time, * but the first of the two to arrive could skip the loop without * processing the pokes from the last to arrive. */ while (!cpuidle_coupled_cpus_waiting(coupled) || !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { if (cpuidle_coupled_clear_pokes(dev->cpu)) continue; if (need_resched()) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } if (coupled->prevent) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } entered_state = cpuidle_enter_state(dev, drv, drv->safe_state_index); local_irq_disable(); } cpuidle_coupled_clear_pokes(dev->cpu); if (need_resched()) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } /* * Make sure final poke status for this cpu is visible before setting * cpu as ready. */ smp_wmb(); /* * All coupled cpus are probably idle. There is a small chance that * one of the other cpus just became active. Increment the ready count, * and spin until all coupled cpus have incremented the counter. Once a * cpu has incremented the ready counter, it cannot abort idle and must * spin until either all cpus have incremented the ready counter, or * another cpu leaves idle and decrements the waiting counter. */ cpuidle_coupled_set_ready(coupled); while (!cpuidle_coupled_cpus_ready(coupled)) { /* Check if any other cpus bailed out of idle. */ if (!cpuidle_coupled_cpus_waiting(coupled)) if (!cpuidle_coupled_set_not_ready(coupled)) goto retry; cpu_relax(); } /* * Make sure read of all cpus ready is done before reading pending pokes */ smp_rmb(); /* * There is a small chance that a cpu left and reentered idle after this * cpu saw that all cpus were waiting. The cpu that reentered idle will * have sent this cpu a poke, which will still be pending after the * ready loop. The pending interrupt may be lost by the interrupt * controller when entering the deep idle state. It's not possible to * clear a pending interrupt without turning interrupts on and handling * it, and it's too late to turn on interrupts here, so reset the * coupled idle state of all cpus and retry. */ if (cpuidle_coupled_any_pokes_pending(coupled)) { cpuidle_coupled_set_done(dev->cpu, coupled); /* Wait for all cpus to see the pending pokes */ cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); goto reset; } /* all cpus have acked the coupled state */ next_state = cpuidle_coupled_get_state(dev, coupled); entered_state = cpuidle_enter_state(dev, drv, next_state); cpuidle_coupled_set_done(dev->cpu, coupled); out: /* * Normal cpuidle states are expected to return with irqs enabled. * That leads to an inefficiency where a cpu receiving an interrupt * that brings it out of idle will process that interrupt before * exiting the idle enter function and decrementing ready_count. All * other cpus will need to spin waiting for the cpu that is processing * the interrupt. If the driver returns with interrupts disabled, * all other cpus will loop back into the safe idle state instead of * spinning, saving power. * * Calling local_irq_enable here allows coupled states to return with * interrupts disabled, but won't cause problems for drivers that * exit with interrupts enabled. */ local_irq_enable(); /* * Wait until all coupled cpus have exited idle. There is no risk that * a cpu exits and re-enters the ready state because this cpu has * already decremented its waiting_count. */ while (!cpuidle_coupled_no_cpus_ready(coupled)) cpu_relax(); return entered_state; } static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) { cpumask_t cpus; cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); coupled->online_count = cpumask_weight(&cpus); } /** * cpuidle_coupled_register_device - register a coupled cpuidle device * @dev: struct cpuidle_device for the current cpu * * Called from cpuidle_register_device to handle coupled idle init. Finds the * cpuidle_coupled struct for this set of coupled cpus, or creates one if none * exists yet. */ int cpuidle_coupled_register_device(struct cpuidle_device *dev) { int cpu; struct cpuidle_device *other_dev; struct call_single_data *csd; struct cpuidle_coupled *coupled; if (cpumask_empty(&dev->coupled_cpus)) return 0; for_each_cpu(cpu, &dev->coupled_cpus) { other_dev = per_cpu(cpuidle_devices, cpu); if (other_dev && other_dev->coupled) { coupled = other_dev->coupled; goto have_coupled; } } /* No existing coupled info found, create a new one */ coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); if (!coupled) return -ENOMEM; coupled->coupled_cpus = dev->coupled_cpus; have_coupled: dev->coupled = coupled; if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) coupled->prevent++; cpuidle_coupled_update_online_cpus(coupled); coupled->refcnt++; csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); csd->func = cpuidle_coupled_handle_poke; csd->info = (void *)(unsigned long)dev->cpu; return 0; } /** * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device * @dev: struct cpuidle_device for the current cpu * * Called from cpuidle_unregister_device to tear down coupled idle. Removes the * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if * this was the last cpu in the set. */ void cpuidle_coupled_unregister_device(struct cpuidle_device *dev) { struct cpuidle_coupled *coupled = dev->coupled; if (cpumask_empty(&dev->coupled_cpus)) return; if (--coupled->refcnt) kfree(coupled); dev->coupled = NULL; } /** * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state * @coupled: the struct coupled that contains the cpu that is changing state * * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that * cpu_online_mask doesn't change while cpus are coordinating coupled idle. */ static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) { int cpu = get_cpu(); /* Force all cpus out of the waiting loop. */ coupled->prevent++; cpuidle_coupled_poke_others(cpu, coupled); put_cpu(); while (!cpuidle_coupled_no_cpus_waiting(coupled)) cpu_relax(); } /** * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state * @coupled: the struct coupled that contains the cpu that is changing state * * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that * cpu_online_mask doesn't change while cpus are coordinating coupled idle. */ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) { int cpu = get_cpu(); /* * Write barrier ensures readers see the new online_count when they * see prevent == 0. */ smp_wmb(); coupled->prevent--; /* Force cpus out of the prevent loop. */ cpuidle_coupled_poke_others(cpu, coupled); put_cpu(); } /** * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions * @nb: notifier block * @action: hotplug transition * @hcpu: target cpu number * * Called when a cpu is brought on or offline using hotplug. Updates the * coupled cpu set appropriately */ static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; struct cpuidle_device *dev; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: case CPU_DOWN_PREPARE: case CPU_ONLINE: case CPU_DEAD: case CPU_UP_CANCELED: case CPU_DOWN_FAILED: break; default: return NOTIFY_OK; } mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); if (!dev || !dev->coupled) goto out; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: case CPU_DOWN_PREPARE: cpuidle_coupled_prevent_idle(dev->coupled); break; case CPU_ONLINE: case CPU_DEAD: cpuidle_coupled_update_online_cpus(dev->coupled); /* Fall through */ case CPU_UP_CANCELED: case CPU_DOWN_FAILED: cpuidle_coupled_allow_idle(dev->coupled); break; } out: mutex_unlock(&cpuidle_lock); return NOTIFY_OK; } static struct notifier_block cpuidle_coupled_cpu_notifier = { .notifier_call = cpuidle_coupled_cpu_notify, }; static int __init cpuidle_coupled_init(void) { return register_cpu_notifier(&cpuidle_coupled_cpu_notifier); } core_initcall(cpuidle_coupled_init);
gpl-2.0
siburu/livedump-kernel
drivers/staging/rtl8712/rtl871x_cmd.c
176
32648
/****************************************************************************** * rtl871x_cmd.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_CMD_C_ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <linux/usb/ch9.h> #include <linux/circ_buf.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #include <linux/atomic.h> #include <linux/semaphore.h> #include <linux/rtnetlink.h> #include "osdep_service.h" #include "drv_types.h" #include "recv_osdep.h" #include "mlme_osdep.h" /* Caller and the r8712_cmd_thread can protect cmd_q by spin_lock. No irqsave is necessary. */ static sint _init_cmd_priv(struct cmd_priv *pcmdpriv) { sema_init(&(pcmdpriv->cmd_queue_sema), 0); sema_init(&(pcmdpriv->terminate_cmdthread_sema), 0); _init_queue(&(pcmdpriv->cmd_queue)); /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ pcmdpriv->cmd_seq = 1; pcmdpriv->cmd_allocated_buf = _malloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ); if (pcmdpriv->cmd_allocated_buf == NULL) return _FAIL; pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((addr_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1)); pcmdpriv->rsp_allocated_buf = _malloc(MAX_RSPSZ + 4); if (pcmdpriv->rsp_allocated_buf == NULL) return _FAIL; pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((addr_t)(pcmdpriv->rsp_allocated_buf) & 3); pcmdpriv->cmd_issued_cnt = 0; pcmdpriv->cmd_done_cnt = 0; pcmdpriv->rsp_cnt = 0; return _SUCCESS; } static sint _init_evt_priv(struct evt_priv *pevtpriv) { /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ pevtpriv->event_seq = 0; pevtpriv->evt_allocated_buf = _malloc(MAX_EVTSZ + 4); if (pevtpriv->evt_allocated_buf == NULL) return _FAIL; pevtpriv->evt_buf = pevtpriv->evt_allocated_buf + 4 - ((addr_t)(pevtpriv->evt_allocated_buf) & 3); pevtpriv->evt_done_cnt = 0; return _SUCCESS; } static void _free_evt_priv(struct evt_priv *pevtpriv) { kfree(pevtpriv->evt_allocated_buf); } static void _free_cmd_priv(struct cmd_priv *pcmdpriv) { if (pcmdpriv) { kfree(pcmdpriv->cmd_allocated_buf); kfree(pcmdpriv->rsp_allocated_buf); } } /* Calling Context: _enqueue_cmd can only be called between kernel thread, since only spin_lock is used. ISR/Call-Back functions can't call this sub-function. */ static sint _enqueue_cmd(struct __queue *queue, struct cmd_obj *obj) { unsigned long irqL; if (obj == NULL) return _SUCCESS; spin_lock_irqsave(&queue->lock, irqL); list_insert_tail(&obj->list, &queue->queue); spin_unlock_irqrestore(&queue->lock, irqL); return _SUCCESS; } static struct cmd_obj *_dequeue_cmd(struct __queue *queue) { unsigned long irqL; struct cmd_obj *obj; spin_lock_irqsave(&(queue->lock), irqL); if (is_list_empty(&(queue->queue))) obj = NULL; else { obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list); list_delete(&obj->list); } spin_unlock_irqrestore(&(queue->lock), irqL); return obj; } u32 r8712_init_cmd_priv(struct cmd_priv *pcmdpriv) { return _init_cmd_priv(pcmdpriv); } u32 r8712_init_evt_priv(struct evt_priv *pevtpriv) { return _init_evt_priv(pevtpriv); } void r8712_free_evt_priv(struct evt_priv *pevtpriv) { _free_evt_priv(pevtpriv); } void r8712_free_cmd_priv(struct cmd_priv *pcmdpriv) { _free_cmd_priv(pcmdpriv); } u32 r8712_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj) { int res; if (pcmdpriv->padapter->eeprompriv.bautoload_fail_flag == true) return _FAIL; res = _enqueue_cmd(&pcmdpriv->cmd_queue, obj); up(&pcmdpriv->cmd_queue_sema); return res; } u32 r8712_enqueue_cmd_ex(struct cmd_priv *pcmdpriv, struct cmd_obj *obj) { unsigned long irqL; struct __queue *queue; if (obj == NULL) return _SUCCESS; if (pcmdpriv->padapter->eeprompriv.bautoload_fail_flag == true) return _FAIL; queue = &pcmdpriv->cmd_queue; spin_lock_irqsave(&queue->lock, irqL); list_insert_tail(&obj->list, &queue->queue); spin_unlock_irqrestore(&queue->lock, irqL); up(&pcmdpriv->cmd_queue_sema); return _SUCCESS; } struct cmd_obj *r8712_dequeue_cmd(struct __queue *queue) { return _dequeue_cmd(queue); } void r8712_free_cmd_obj(struct cmd_obj *pcmd) { if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) kfree((unsigned char *)pcmd->parmbuf); if (pcmd->rsp != NULL) { if (pcmd->rspsz != 0) kfree((unsigned char *)pcmd->rsp); } kfree((unsigned char *)pcmd); } /* r8712_sitesurvey_cmd(~) ### NOTE:#### (!!!!) MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock */ u8 r8712_sitesurvey_cmd(struct _adapter *padapter, struct ndis_802_11_ssid *pssid) { struct cmd_obj *ph2c; struct sitesurvey_parm *psurveyPara; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psurveyPara = (struct sitesurvey_parm *)_malloc( sizeof(struct sitesurvey_parm)); if (psurveyPara == NULL) { kfree((unsigned char *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey)); psurveyPara->bsslimit = cpu_to_le32(48); psurveyPara->passive_mode = cpu_to_le32(pmlmepriv->passive_mode); psurveyPara->ss_ssidlen = 0; memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1); if ((pssid != NULL) && (pssid->SsidLength)) { memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength); psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength); } set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); r8712_enqueue_cmd(pcmdpriv, ph2c); _set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_SITE_SURVEY); padapter->blnEnableRxFF0Filter = 0; return _SUCCESS; } u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset) { struct cmd_obj *ph2c; struct setdatarate_parm *pbsetdataratepara; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; pbsetdataratepara = (struct setdatarate_parm *)_malloc( sizeof(struct setdatarate_parm)); if (pbsetdataratepara == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate)); pbsetdataratepara->mac_id = 5; memcpy(pbsetdataratepara->datarates, rateset, NumRates); r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan) { struct cmd_obj *ph2c; struct SetChannelPlan_param *psetchplanpara; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psetchplanpara = (struct SetChannelPlan_param *) _malloc(sizeof(struct SetChannelPlan_param)); if (psetchplanpara == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara, GEN_CMD_CODE(_SetChannelPlan)); psetchplanpara->ChannelPlan = chplan; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset) { struct cmd_obj *ph2c; struct setbasicrate_parm *pssetbasicratepara; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; pssetbasicratepara = (struct setbasicrate_parm *)_malloc( sizeof(struct setbasicrate_parm)); if (pssetbasicratepara == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, pssetbasicratepara, _SetBasicRate_CMD_); memcpy(pssetbasicratepara->basicrates, rateset, NumRates); r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } /* power tracking mechanism setting */ u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type) { struct cmd_obj *ph2c; struct writePTM_parm *pwriteptmparm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; pwriteptmparm = (struct writePTM_parm *) _malloc(sizeof(struct writePTM_parm)); if (pwriteptmparm == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetPT)); pwriteptmparm->type = type; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type) { struct cmd_obj *ph2c; struct writePTM_parm *pwriteptmparm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; pwriteptmparm = (struct writePTM_parm *) _malloc(sizeof(struct setdig_parm)); if (pwriteptmparm == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetDIG)); pwriteptmparm->type = type; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type) { struct cmd_obj *ph2c; struct writePTM_parm *pwriteptmparm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; pwriteptmparm = (struct writePTM_parm *) _malloc(sizeof(struct setra_parm)); if (pwriteptmparm == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetRA)); pwriteptmparm->type = type; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val) { struct cmd_obj *ph2c; struct writeRF_parm *pwriterfparm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; pwriterfparm = (struct writeRF_parm *)_malloc( sizeof(struct writeRF_parm)); if (pwriterfparm == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, pwriterfparm, GEN_CMD_CODE(_SetRFReg)); pwriterfparm->offset = offset; pwriterfparm->value = val; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval) { struct cmd_obj *ph2c; struct readRF_parm *prdrfparm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; prdrfparm = (struct readRF_parm *)_malloc(sizeof(struct readRF_parm)); if (prdrfparm == NULL) { kfree((u8 *) ph2c); return _FAIL; } _init_listhead(&ph2c->list); ph2c->cmdcode = GEN_CMD_CODE(_GetRFReg); ph2c->parmbuf = (unsigned char *)prdrfparm; ph2c->cmdsz = sizeof(struct readRF_parm); ph2c->rsp = pval; ph2c->rspsz = sizeof(struct readRF_rsp); prdrfparm->offset = offset; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { kfree(pcmd->parmbuf); kfree(pcmd); padapter->mppriv.workparam.bcompleted = true; } void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { kfree(pcmd->parmbuf); kfree(pcmd); padapter->mppriv.workparam.bcompleted = true; } u8 r8712_createbss_cmd(struct _adapter *padapter) { struct cmd_obj *pcmd; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network; padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK); pcmd = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (pcmd == NULL) return _FAIL; _init_listhead(&pcmd->list); pcmd->cmdcode = _CreateBss_CMD_; pcmd->parmbuf = (unsigned char *)pdev_network; pcmd->cmdsz = r8712_get_ndis_wlan_bssid_ex_sz(( struct ndis_wlan_bssid_ex *) pdev_network); pcmd->rsp = NULL; pcmd->rspsz = 0; /* notes: translate IELength & Length after assign to cmdsz; */ pdev_network->Length = cpu_to_le32(pcmd->cmdsz); pdev_network->IELength = cpu_to_le32(pdev_network->IELength); pdev_network->Ssid.SsidLength = cpu_to_le32( pdev_network->Ssid.SsidLength); r8712_enqueue_cmd(pcmdpriv, pcmd); return _SUCCESS; } u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork) { u8 *auth; uint t_len = 0; struct ndis_wlan_bssid_ex *psecnetwork; struct cmd_obj *pcmd; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; enum NDIS_802_11_NETWORK_INFRASTRUCTURE ndis_network_mode = pnetwork-> network.InfrastructureMode; padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK); pcmd = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (pcmd == NULL) return _FAIL; t_len = sizeof(u32) + 6 * sizeof(unsigned char) + 2 + sizeof(struct ndis_802_11_ssid) + sizeof(u32) + sizeof(s32) + sizeof(enum NDIS_802_11_NETWORK_TYPE) + sizeof(struct NDIS_802_11_CONFIGURATION) + sizeof(enum NDIS_802_11_NETWORK_INFRASTRUCTURE) + sizeof(NDIS_802_11_RATES_EX) + sizeof(u32) + MAX_IE_SZ; /* for hidden ap to set fw_state here */ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) != true) { switch (ndis_network_mode) { case Ndis802_11IBSS: pmlmepriv->fw_state |= WIFI_ADHOC_STATE; break; case Ndis802_11Infrastructure: pmlmepriv->fw_state |= WIFI_STATION_STATE; break; case Ndis802_11APMode: case Ndis802_11AutoUnknown: case Ndis802_11InfrastructureMax: break; } } psecnetwork = (struct ndis_wlan_bssid_ex *)&psecuritypriv->sec_bss; if (psecnetwork == NULL) { kfree(pcmd); return _FAIL; } memset(psecnetwork, 0, t_len); memcpy(psecnetwork, &pnetwork->network, t_len); auth = &psecuritypriv->authenticator_ie[0]; psecuritypriv->authenticator_ie[0] = (unsigned char) psecnetwork->IELength; if ((psecnetwork->IELength-12) < (256 - 1)) memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength-12); else memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256-1)); psecnetwork->IELength = 0; /* If the the driver wants to use the bssid to create the connection. * If not, we copy the connecting AP's MAC address to it so that * the driver just has the bssid information for PMKIDList searching. */ if (pmlmepriv->assoc_by_bssid == false) memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.MacAddress[0], ETH_ALEN); psecnetwork->IELength = r8712_restruct_sec_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength); pqospriv->qos_option = 0; if (pregistrypriv->wmm_enable) { u32 tmp_len; tmp_len = r8712_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength, psecnetwork->IELength); if (psecnetwork->IELength != tmp_len) { psecnetwork->IELength = tmp_len; pqospriv->qos_option = 1; /* WMM IE in beacon */ } else pqospriv->qos_option = 0; /* no WMM IE in beacon */ } if (pregistrypriv->ht_enable) { /* For WEP mode, we will use the bg mode to do the connection * to avoid some IOT issues, especially for Realtek 8192u * SoftAP. */ if ((padapter->securitypriv.PrivacyAlgrthm != _WEP40_) && (padapter->securitypriv.PrivacyAlgrthm != _WEP104_)) { /* restructure_ht_ie */ r8712_restructure_ht_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength, &psecnetwork->IELength); } } psecuritypriv->supplicant_ie[0] = (u8)psecnetwork->IELength; if (psecnetwork->IELength < 255) memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0], psecnetwork->IELength); else memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0], 255); /* get cmdsz before endian conversion */ pcmd->cmdsz = r8712_get_ndis_wlan_bssid_ex_sz(psecnetwork); #ifdef __BIG_ENDIAN /* wlan_network endian conversion */ psecnetwork->Length = cpu_to_le32(psecnetwork->Length); psecnetwork->Ssid.SsidLength = cpu_to_le32( psecnetwork->Ssid.SsidLength); psecnetwork->Privacy = cpu_to_le32(psecnetwork->Privacy); psecnetwork->Rssi = cpu_to_le32(psecnetwork->Rssi); psecnetwork->NetworkTypeInUse = cpu_to_le32( psecnetwork->NetworkTypeInUse); psecnetwork->Configuration.ATIMWindow = cpu_to_le32( psecnetwork->Configuration.ATIMWindow); psecnetwork->Configuration.BeaconPeriod = cpu_to_le32( psecnetwork->Configuration.BeaconPeriod); psecnetwork->Configuration.DSConfig = cpu_to_le32( psecnetwork->Configuration.DSConfig); psecnetwork->Configuration.FHConfig.DwellTime = cpu_to_le32( psecnetwork->Configuration.FHConfig.DwellTime); psecnetwork->Configuration.FHConfig.HopPattern = cpu_to_le32( psecnetwork->Configuration.FHConfig.HopPattern); psecnetwork->Configuration.FHConfig.HopSet = cpu_to_le32( psecnetwork->Configuration.FHConfig.HopSet); psecnetwork->Configuration.FHConfig.Length = cpu_to_le32( psecnetwork->Configuration.FHConfig.Length); psecnetwork->Configuration.Length = cpu_to_le32( psecnetwork->Configuration.Length); psecnetwork->InfrastructureMode = cpu_to_le32( psecnetwork->InfrastructureMode); psecnetwork->IELength = cpu_to_le32(psecnetwork->IELength); #endif _init_listhead(&pcmd->list); pcmd->cmdcode = _JoinBss_CMD_; pcmd->parmbuf = (unsigned char *)psecnetwork; pcmd->rsp = NULL; pcmd->rspsz = 0; r8712_enqueue_cmd(pcmdpriv, pcmd); return _SUCCESS; } u8 r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */ { struct cmd_obj *pdisconnect_cmd; struct disconnect_parm *pdisconnect; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; pdisconnect_cmd = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (pdisconnect_cmd == NULL) return _FAIL; pdisconnect = (struct disconnect_parm *)_malloc( sizeof(struct disconnect_parm)); if (pdisconnect == NULL) { kfree((u8 *)pdisconnect_cmd); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(pdisconnect_cmd, pdisconnect, _DisConnect_CMD_); r8712_enqueue_cmd(pcmdpriv, pdisconnect_cmd); return _SUCCESS; } u8 r8712_setopmode_cmd(struct _adapter *padapter, enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype) { struct cmd_obj *ph2c; struct setopmode_parm *psetop; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psetop = (struct setopmode_parm *)_malloc( sizeof(struct setopmode_parm)); if (psetop == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_); psetop->mode = (u8)networktype; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key) { struct cmd_obj *ph2c; struct set_stakey_parm *psetstakey_para; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct set_stakey_rsp *psetstakey_rsp = NULL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct sta_info *sta = (struct sta_info *)psta; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psetstakey_para = (struct set_stakey_parm *)_malloc( sizeof(struct set_stakey_parm)); if (psetstakey_para == NULL) { kfree((u8 *) ph2c); return _FAIL; } psetstakey_rsp = (struct set_stakey_rsp *)_malloc( sizeof(struct set_stakey_rsp)); if (psetstakey_rsp == NULL) { kfree((u8 *) ph2c); kfree((u8 *) psetstakey_para); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_); ph2c->rsp = (u8 *) psetstakey_rsp; ph2c->rspsz = sizeof(struct set_stakey_rsp); memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) psetstakey_para->algorithm = (unsigned char) psecuritypriv->PrivacyAlgrthm; else GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false); if (unicast_key == true) memcpy(&psetstakey_para->key, &sta->x_UncstKey, 16); else memcpy(&psetstakey_para->key, &psecuritypriv->XGrpKey[ psecuritypriv->XGrpKeyid - 1]. skey, 16); r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode) { struct cmd_obj *ph2c; struct setrfintfs_parm *psetrfintfsparm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psetrfintfsparm = (struct setrfintfs_parm *)_malloc( sizeof(struct setrfintfs_parm)); if (psetrfintfsparm == NULL) { kfree((unsigned char *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetrfintfsparm, GEN_CMD_CODE(_SetRFIntFs)); psetrfintfsparm->rfintfs = mode; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setrttbl_cmd(struct _adapter *padapter, struct setratable_parm *prate_table) { struct cmd_obj *ph2c; struct setratable_parm *psetrttblparm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psetrttblparm = (struct setratable_parm *)_malloc( sizeof(struct setratable_parm)); if (psetrttblparm == NULL) { kfree((unsigned char *)ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); memcpy(psetrttblparm, prate_table, sizeof(struct setratable_parm)); r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval) { struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct cmd_obj *ph2c; struct readTSSI_parm *prdtssiparm; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; prdtssiparm = (struct readTSSI_parm *) _malloc(sizeof(struct readTSSI_parm)); if (prdtssiparm == NULL) { kfree((unsigned char *) ph2c); return _FAIL; } _init_listhead(&ph2c->list); ph2c->cmdcode = GEN_CMD_CODE(_ReadTSSI); ph2c->parmbuf = (unsigned char *)prdtssiparm; ph2c->cmdsz = sizeof(struct readTSSI_parm); ph2c->rsp = pval; ph2c->rspsz = sizeof(struct readTSSI_rsp); prdtssiparm->offset = offset; r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr) { struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct cmd_obj *ph2c; struct SetMacAddr_param *psetMacAddr_para; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psetMacAddr_para = (struct SetMacAddr_param *)_malloc( sizeof(struct SetMacAddr_param)); if (psetMacAddr_para == NULL) { kfree((u8 *) ph2c); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetMacAddr_para, _SetMacAddress_CMD_); memcpy(psetMacAddr_para->MacAddr, mac_addr, ETH_ALEN); r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_setassocsta_cmd(struct _adapter *padapter, u8 *mac_addr) { struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct cmd_obj *ph2c; struct set_assocsta_parm *psetassocsta_para; struct set_stakey_rsp *psetassocsta_rsp = NULL; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; psetassocsta_para = (struct set_assocsta_parm *) _malloc(sizeof(struct set_assocsta_parm)); if (psetassocsta_para == NULL) { kfree((u8 *) ph2c); return _FAIL; } psetassocsta_rsp = (struct set_stakey_rsp *)_malloc( sizeof(struct set_assocsta_rsp)); if (psetassocsta_rsp == NULL) { kfree((u8 *)ph2c); kfree((u8 *)psetassocsta_para); return _FAIL; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetassocsta_para, _SetAssocSta_CMD_); ph2c->rsp = (u8 *) psetassocsta_rsp; ph2c->rspsz = sizeof(struct set_assocsta_rsp); memcpy(psetassocsta_para->addr, mac_addr, ETH_ALEN); r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid) { struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct cmd_obj *ph2c; struct addBaReq_parm *paddbareq_parm; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; paddbareq_parm = (struct addBaReq_parm *)_malloc( sizeof(struct addBaReq_parm)); if (paddbareq_parm == NULL) { kfree((unsigned char *)ph2c); return _FAIL; } paddbareq_parm->tid = tid; init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq)); r8712_enqueue_cmd_ex(pcmdpriv, ph2c); return _SUCCESS; } u8 r8712_wdg_wk_cmd(struct _adapter *padapter) { struct cmd_obj *ph2c; struct drvint_cmd_parm *pdrvintcmd_param; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; pdrvintcmd_param = (struct drvint_cmd_parm *)_malloc( sizeof(struct drvint_cmd_parm)); if (pdrvintcmd_param == NULL) { kfree((unsigned char *)ph2c); return _FAIL; } pdrvintcmd_param->i_cid = WDG_WK_CID; pdrvintcmd_param->sz = 0; pdrvintcmd_param->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvintcmd_param, _DRV_INT_CMD_); r8712_enqueue_cmd_ex(pcmdpriv, ph2c); return _SUCCESS; } void r8712_survey_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (pcmd->res != H2C_SUCCESS) clr_fwstate(pmlmepriv, _FW_UNDER_SURVEY); r8712_free_cmd_obj(pcmd); } void r8712_disassoc_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { unsigned long irqL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (pcmd->res != H2C_SUCCESS) { spin_lock_irqsave(&pmlmepriv->lock, irqL); set_fwstate(pmlmepriv, _FW_LINKED); spin_unlock_irqrestore(&pmlmepriv->lock, irqL); return; } r8712_free_cmd_obj(pcmd); } void r8712_joinbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if ((pcmd->res != H2C_SUCCESS)) _set_timer(&pmlmepriv->assoc_timer, 1); r8712_free_cmd_obj(pcmd); } void r8712_createbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { unsigned long irqL; u8 timer_cancelled; struct sta_info *psta = NULL; struct wlan_network *pwlan = NULL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ndis_wlan_bssid_ex *pnetwork = (struct ndis_wlan_bssid_ex *) pcmd->parmbuf; struct wlan_network *tgt_network = &(pmlmepriv->cur_network); if ((pcmd->res != H2C_SUCCESS)) _set_timer(&pmlmepriv->assoc_timer, 1); _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled); #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->Length = le32_to_cpu(pnetwork->Length); pnetwork->Ssid.SsidLength = le32_to_cpu(pnetwork->Ssid.SsidLength); pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy); pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi); pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse); pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork-> Configuration.ATIMWindow); pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork-> Configuration.DSConfig); pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork-> Configuration.FHConfig.DwellTime); pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork-> Configuration.FHConfig.HopPattern); pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork-> Configuration.FHConfig.HopSet); pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork-> Configuration.FHConfig.Length); pnetwork->Configuration.Length = le32_to_cpu(pnetwork-> Configuration.Length); pnetwork->InfrastructureMode = le32_to_cpu(pnetwork-> InfrastructureMode); pnetwork->IELength = le32_to_cpu(pnetwork->IELength); #endif spin_lock_irqsave(&pmlmepriv->lock, irqL); if ((pmlmepriv->fw_state) & WIFI_AP_STATE) { psta = r8712_get_stainfo(&padapter->stapriv, pnetwork->MacAddress); if (!psta) { psta = r8712_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress); if (psta == NULL) goto createbss_cmd_fail ; } r8712_indicate_connect(padapter); } else { pwlan = _r8712_alloc_network(pmlmepriv); if (pwlan == NULL) { pwlan = r8712_get_oldest_wlan_network( &pmlmepriv->scanned_queue); if (pwlan == NULL) goto createbss_cmd_fail; pwlan->last_scanned = jiffies; } else list_insert_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue); pnetwork->Length = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork); memcpy(&(pwlan->network), pnetwork, pnetwork->Length); pwlan->fixed = true; memcpy(&tgt_network->network, pnetwork, (r8712_get_ndis_wlan_bssid_ex_sz(pnetwork))); if (pmlmepriv->fw_state & _FW_UNDER_LINKING) pmlmepriv->fw_state ^= _FW_UNDER_LINKING; /* we will set _FW_LINKED when there is one more sat to * join us (stassoc_event_callback) */ } createbss_cmd_fail: spin_unlock_irqrestore(&pmlmepriv->lock, irqL); r8712_free_cmd_obj(pcmd); } void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { struct sta_priv *pstapriv = &padapter->stapriv; struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *) (pcmd->rsp); struct sta_info *psta = r8712_get_stainfo(pstapriv, psetstakey_rsp->addr); if (psta == NULL) goto exit; psta->aid = psta->mac_id = psetstakey_rsp->keyid; /*CAM_ID(CAM_ENTRY)*/ exit: r8712_free_cmd_obj(pcmd); } void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd) { unsigned long irqL; struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf); struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *) (pcmd->rsp); struct sta_info *psta = r8712_get_stainfo(pstapriv, passocsta_parm->addr); if (psta == NULL) return; psta->aid = psta->mac_id = passocsta_rsp->cam_id; spin_lock_irqsave(&pmlmepriv->lock, irqL); if ((check_fwstate(pmlmepriv, WIFI_MP_STATE)) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) pmlmepriv->fw_state ^= _FW_UNDER_LINKING; set_fwstate(pmlmepriv, _FW_LINKED); spin_unlock_irqrestore(&pmlmepriv->lock, irqL); r8712_free_cmd_obj(pcmd); } u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO) { struct cmd_obj *ph2c; struct DisconnectCtrlEx_param *param; struct cmd_priv *pcmdpriv = &adapter->cmdpriv; ph2c = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (ph2c == NULL) return _FAIL; param = (struct DisconnectCtrlEx_param *) _malloc(sizeof(struct DisconnectCtrlEx_param)); if (param == NULL) { kfree((unsigned char *) ph2c); return _FAIL; } memset(param, 0, sizeof(struct DisconnectCtrlEx_param)); param->EnableDrvCtrl = (unsigned char)enableDrvCtrl; param->TryPktCnt = (unsigned char)tryPktCnt; param->TryPktInterval = (unsigned char)tryPktInterval; param->FirstStageTO = (unsigned int)firstStageTO; init_h2fwcmd_w_parm_no_rsp(ph2c, param, GEN_CMD_CODE(_DisconnectCtrlEx)); r8712_enqueue_cmd(pcmdpriv, ph2c); return _SUCCESS; }
gpl-2.0
victor2002/a770k_kernel
arch/arm/mach-msm/rpc_pmapp.c
176
13129
/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/slab.h> #include <linux/err.h> #include <asm/mach-types.h> #include <mach/board.h> #include <mach/rpc_pmapp.h> #include <mach/msm_rpcrouter.h> #include <mach/vreg.h> #define PMAPP_RPC_PROG 0x30000060 #define PMAPP_RPC_VER_1_1 0x00010001 #define PMAPP_RPC_VER_1_2 0x00010002 #define PMAPP_RPC_VER_2_1 0x00020001 #define PMAPP_RPC_VER_3_1 0x00030001 #define PMAPP_RPC_VER_5_1 0x00050001 #define VBUS_SESS_VALID_CB_PROC 1 #define PM_VOTE_USB_PWR_SEL_SWITCH_APP__HSUSB (1 << 2) #define PM_USB_PWR_SEL_SWITCH_ID 0 #define PMAPP_RPC_TIMEOUT (5*HZ) #define PMAPP_DISPLAY_CLOCK_CONFIG_PROC 21 #define PMAPP_VREG_LEVEL_VOTE_PROC 23 #define PMAPP_SMPS_CLOCK_VOTE_PROC 26 #define PMAPP_CLOCK_VOTE_PROC 27 #define PMAPP_SMPS_MODE_VOTE_PROC 28 #define PMAPP_VREG_PINCNTRL_VOTE_PROC 30 /* Clock voter name max length */ #define PMAPP_CLOCK_VOTER_ID_LEN 4 struct rpc_pmapp_ids { unsigned long reg_for_vbus_valid; unsigned long vote_for_vbus_valid_switch; }; static struct rpc_pmapp_ids rpc_ids; static struct msm_rpc_client *client; static void rpc_pmapp_init_rpc_ids(unsigned long vers) { if (vers == PMAPP_RPC_VER_1_1) { rpc_ids.reg_for_vbus_valid = 5; rpc_ids.vote_for_vbus_valid_switch = 6; } else if (vers == PMAPP_RPC_VER_1_2) { rpc_ids.reg_for_vbus_valid = 16; rpc_ids.vote_for_vbus_valid_switch = 17; } else if (vers == PMAPP_RPC_VER_2_1) { rpc_ids.reg_for_vbus_valid = 0; /* NA */ rpc_ids.vote_for_vbus_valid_switch = 0; /* NA */ } } struct usb_pwr_sel_switch_args { uint32_t cmd; uint32_t switch_id; uint32_t app_mask; }; static int usb_pwr_sel_switch_arg_cb(struct msm_rpc_client *client, void *buf, void *data) { struct usb_pwr_sel_switch_args *args = buf; args->cmd = cpu_to_be32(*(uint32_t *)data); args->switch_id = cpu_to_be32(PM_USB_PWR_SEL_SWITCH_ID); args->app_mask = cpu_to_be32(PM_VOTE_USB_PWR_SEL_SWITCH_APP__HSUSB); return sizeof(struct usb_pwr_sel_switch_args); } static int msm_pm_app_vote_usb_pwr_sel_switch(uint32_t cmd) { return msm_rpc_client_req(client, rpc_ids.vote_for_vbus_valid_switch, usb_pwr_sel_switch_arg_cb, &cmd, NULL, NULL, -1); } struct vbus_sess_valid_args { uint32_t cb_id; }; static int vbus_sess_valid_arg_cb(struct msm_rpc_client *client, void *buf, void *data) { struct vbus_sess_valid_args *args = buf; args->cb_id = cpu_to_be32(*(uint32_t *)data); return sizeof(struct vbus_sess_valid_args); } int pmic_vote_3p3_pwr_sel_switch(int boost) { int ret; ret = msm_pm_app_vote_usb_pwr_sel_switch(boost); return ret; } EXPORT_SYMBOL(pmic_vote_3p3_pwr_sel_switch); struct vbus_sn_notification_args { uint32_t cb_id; uint32_t vbus; /* vbus = 0 if VBUS is present */ }; static int vbus_notification_cb(struct msm_rpc_client *client, void *buffer, int in_size) { struct vbus_sn_notification_args *args; struct rpc_request_hdr *req = buffer; int rc; uint32_t accept_status; void (*cb_func)(int); uint32_t cb_id; int vbus; args = (struct vbus_sn_notification_args *) (req + 1); cb_id = be32_to_cpu(args->cb_id); vbus = be32_to_cpu(args->vbus); cb_func = msm_rpc_get_cb_func(client, cb_id); if (cb_func) { cb_func(!vbus); accept_status = RPC_ACCEPTSTAT_SUCCESS; } else accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR; msm_rpc_start_accepted_reply(client, be32_to_cpu(req->xid), accept_status); rc = msm_rpc_send_accepted_reply(client, 0); if (rc) pr_err("%s: send accepted reply failed: %d\n", __func__, rc); return rc; } static int pm_app_usb_cb_func(struct msm_rpc_client *client, void *buffer, int in_size) { int rc; struct rpc_request_hdr *req = buffer; switch (be32_to_cpu(req->procedure)) { case VBUS_SESS_VALID_CB_PROC: rc = vbus_notification_cb(client, buffer, in_size); break; default: pr_err("%s: procedure not supported %d\n", __func__, be32_to_cpu(req->procedure)); msm_rpc_start_accepted_reply(client, be32_to_cpu(req->xid), RPC_ACCEPTSTAT_PROC_UNAVAIL); rc = msm_rpc_send_accepted_reply(client, 0); if (rc) pr_err("%s: sending reply failed: %d\n", __func__, rc); break; } return rc; } int msm_pm_app_rpc_init(void (*callback)(int online)) { uint32_t cb_id, rc; if (!machine_is_qsd8x50_ffa() && !machine_is_qsd8x50a_ffa() && !machine_is_msm7x27_ffa()) return -ENOTSUPP; client = msm_rpc_register_client("pmapp_usb", PMAPP_RPC_PROG, PMAPP_RPC_VER_2_1, 1, pm_app_usb_cb_func); if (!IS_ERR(client)) { rpc_pmapp_init_rpc_ids(PMAPP_RPC_VER_2_1); goto done; } client = msm_rpc_register_client("pmapp_usb", PMAPP_RPC_PROG, PMAPP_RPC_VER_1_2, 1, pm_app_usb_cb_func); if (!IS_ERR(client)) { rpc_pmapp_init_rpc_ids(PMAPP_RPC_VER_1_2); goto done; } client = msm_rpc_register_client("pmapp_usb", PMAPP_RPC_PROG, PMAPP_RPC_VER_1_1, 1, pm_app_usb_cb_func); if (!IS_ERR(client)) rpc_pmapp_init_rpc_ids(PMAPP_RPC_VER_1_1); else return PTR_ERR(client); done: cb_id = msm_rpc_add_cb_func(client, (void *)callback); /* In case of NULL callback funtion, cb_id would be -1 */ if ((int) cb_id < -1) return cb_id; rc = msm_rpc_client_req(client, rpc_ids.reg_for_vbus_valid, vbus_sess_valid_arg_cb, &cb_id, NULL, NULL, -1); return rc; } EXPORT_SYMBOL(msm_pm_app_rpc_init); void msm_pm_app_rpc_deinit(void(*callback)(int online)) { if (client) { msm_rpc_remove_cb_func(client, (void *)callback); msm_rpc_unregister_client(client); } } EXPORT_SYMBOL(msm_pm_app_rpc_deinit); /* error bit flags defined by modem side */ #define PM_ERR_FLAG__PAR1_OUT_OF_RANGE (0x0001) #define PM_ERR_FLAG__PAR2_OUT_OF_RANGE (0x0002) #define PM_ERR_FLAG__PAR3_OUT_OF_RANGE (0x0004) #define PM_ERR_FLAG__PAR4_OUT_OF_RANGE (0x0008) #define PM_ERR_FLAG__PAR5_OUT_OF_RANGE (0x0010) #define PM_ERR_FLAG__ALL_PARMS_OUT_OF_RANGE (0x001F) /* all 5 previous */ #define PM_ERR_FLAG__SBI_OPT_ERR (0x0080) #define PM_ERR_FLAG__FEATURE_NOT_SUPPORTED (0x0100) #define PMAPP_BUFF_SIZE 256 struct pmapp_buf { char *start; /* buffer start addr */ char *end; /* buffer end addr */ int size; /* buffer size */ char *data; /* payload begin addr */ int len; /* payload len */ }; static DEFINE_MUTEX(pmapp_mtx); struct pmapp_ctrl { int inited; struct pmapp_buf tbuf; struct pmapp_buf rbuf; struct msm_rpc_endpoint *endpoint; }; static struct pmapp_ctrl pmapp_ctrl = { .inited = -1, }; static int pmapp_rpc_set_only(uint data0, uint data1, uint data2, uint data3, int num, int proc); static int pmapp_buf_init(void) { struct pmapp_ctrl *pm = &pmapp_ctrl; memset(&pmapp_ctrl, 0, sizeof(pmapp_ctrl)); pm->tbuf.start = kmalloc(PMAPP_BUFF_SIZE, GFP_KERNEL); if (pm->tbuf.start == NULL) { printk(KERN_ERR "%s:%u\n", __func__, __LINE__); return -ENOMEM; } pm->tbuf.data = pm->tbuf.start; pm->tbuf.size = PMAPP_BUFF_SIZE; pm->tbuf.end = pm->tbuf.start + PMAPP_BUFF_SIZE; pm->tbuf.len = 0; pm->rbuf.start = kmalloc(PMAPP_BUFF_SIZE, GFP_KERNEL); if (pm->rbuf.start == NULL) { kfree(pm->tbuf.start); printk(KERN_ERR "%s:%u\n", __func__, __LINE__); return -ENOMEM; } pm->rbuf.data = pm->rbuf.start; pm->rbuf.size = PMAPP_BUFF_SIZE; pm->rbuf.end = pm->rbuf.start + PMAPP_BUFF_SIZE; pm->rbuf.len = 0; pm->inited = 1; return 0; } static inline void pmapp_buf_reserve(struct pmapp_buf *bp, int len) { bp->data += len; } static inline void pmapp_buf_reset(struct pmapp_buf *bp) { bp->data = bp->start; bp->len = 0; } static int modem_to_linux_err(uint err) { if (err == 0) return 0; if (err & PM_ERR_FLAG__ALL_PARMS_OUT_OF_RANGE) return -EINVAL; /* PM_ERR_FLAG__PAR[1..5]_OUT_OF_RANGE */ if (err & PM_ERR_FLAG__SBI_OPT_ERR) return -EIO; if (err & PM_ERR_FLAG__FEATURE_NOT_SUPPORTED) return -ENOSYS; return -EPERM; } static int pmapp_put_tx_data(struct pmapp_buf *tp, uint datav) { uint *lp; if ((tp->size - tp->len) < sizeof(datav)) { printk(KERN_ERR "%s: OVERFLOW size=%d len=%d\n", __func__, tp->size, tp->len); return -1; } lp = (uint *)tp->data; *lp = cpu_to_be32(datav); tp->data += sizeof(datav); tp->len += sizeof(datav); return sizeof(datav); } static int pmapp_pull_rx_data(struct pmapp_buf *rp, uint *datap) { uint *lp; if (rp->len < sizeof(*datap)) { printk(KERN_ERR "%s: UNDERRUN len=%d\n", __func__, rp->len); return -1; } lp = (uint *)rp->data; *datap = be32_to_cpu(*lp); rp->data += sizeof(*datap); rp->len -= sizeof(*datap); return sizeof(*datap); } static int pmapp_rpc_req_reply(struct pmapp_buf *tbuf, struct pmapp_buf *rbuf, int proc) { struct pmapp_ctrl *pm = &pmapp_ctrl; int ans, len; if ((pm->endpoint == NULL) || IS_ERR(pm->endpoint)) { pm->endpoint = msm_rpc_connect_compatible(PMAPP_RPC_PROG, PMAPP_RPC_VER_5_1, 0); if (IS_ERR(pm->endpoint)) { pm->endpoint = msm_rpc_connect_compatible( PMAPP_RPC_PROG, PMAPP_RPC_VER_3_1, 0); } if (IS_ERR(pm->endpoint)) { pm->endpoint = msm_rpc_connect_compatible( PMAPP_RPC_PROG, PMAPP_RPC_VER_2_1, 0); } if (IS_ERR(pm->endpoint)) { ans = PTR_ERR(pm->endpoint); printk(KERN_ERR "%s: init rpc failed! ans = %d\n", __func__, ans); return ans; } } /* * data is point to next available space at this moment, * move it back to beginning of request header and increase * the length */ tbuf->data = tbuf->start; tbuf->len += sizeof(struct rpc_request_hdr); len = msm_rpc_call_reply(pm->endpoint, proc, tbuf->data, tbuf->len, rbuf->data, rbuf->size, PMAPP_RPC_TIMEOUT); if (len <= 0) { printk(KERN_ERR "%s: rpc failed! len = %d\n", __func__, len); pm->endpoint = NULL; /* re-connect later ? */ return len; } rbuf->len = len; /* strip off rpc_reply_hdr */ rbuf->data += sizeof(struct rpc_reply_hdr); rbuf->len -= sizeof(struct rpc_reply_hdr); return rbuf->len; } static int pmapp_rpc_set_only(uint data0, uint data1, uint data2, uint data3, int num, int proc) { struct pmapp_ctrl *pm = &pmapp_ctrl; struct pmapp_buf *tp; struct pmapp_buf *rp; int stat; if (mutex_lock_interruptible(&pmapp_mtx)) return -ERESTARTSYS; if (pm->inited <= 0) { stat = pmapp_buf_init(); if (stat < 0) { mutex_unlock(&pmapp_mtx); return stat; } } tp = &pm->tbuf; rp = &pm->rbuf; pmapp_buf_reset(tp); pmapp_buf_reserve(tp, sizeof(struct rpc_request_hdr)); pmapp_buf_reset(rp); if (num > 0) pmapp_put_tx_data(tp, data0); if (num > 1) pmapp_put_tx_data(tp, data1); if (num > 2) pmapp_put_tx_data(tp, data2); if (num > 3) pmapp_put_tx_data(tp, data3); stat = pmapp_rpc_req_reply(tp, rp, proc); if (stat < 0) { mutex_unlock(&pmapp_mtx); return stat; } pmapp_pull_rx_data(rp, &stat); /* result from server */ mutex_unlock(&pmapp_mtx); return modem_to_linux_err(stat); } int pmapp_display_clock_config(uint enable) { return pmapp_rpc_set_only(enable, 0, 0, 0, 1, PMAPP_DISPLAY_CLOCK_CONFIG_PROC); } EXPORT_SYMBOL(pmapp_display_clock_config); int pmapp_clock_vote(const char *voter_id, uint clock_id, uint vote) { if (strlen(voter_id) != PMAPP_CLOCK_VOTER_ID_LEN) return -EINVAL; return pmapp_rpc_set_only(*((uint *) voter_id), clock_id, vote, 0, 3, PMAPP_CLOCK_VOTE_PROC); } EXPORT_SYMBOL(pmapp_clock_vote); int pmapp_smps_clock_vote(const char *voter_id, uint vreg_id, uint vote) { if (strlen(voter_id) != PMAPP_CLOCK_VOTER_ID_LEN) return -EINVAL; return pmapp_rpc_set_only(*((uint *) voter_id), vreg_id, vote, 0, 3, PMAPP_SMPS_CLOCK_VOTE_PROC); } EXPORT_SYMBOL(pmapp_smps_clock_vote); int pmapp_vreg_level_vote(const char *voter_id, uint vreg_id, uint level) { if (strlen(voter_id) != PMAPP_CLOCK_VOTER_ID_LEN) return -EINVAL; return pmapp_rpc_set_only(*((uint *) voter_id), vreg_id, level, 0, 3, PMAPP_VREG_LEVEL_VOTE_PROC); } EXPORT_SYMBOL(pmapp_vreg_level_vote); int pmapp_smps_mode_vote(const char *voter_id, uint vreg_id, uint mode) { if (strlen(voter_id) != PMAPP_CLOCK_VOTER_ID_LEN) return -EINVAL; return pmapp_rpc_set_only(*((uint *) voter_id), vreg_id, mode, 0, 3, PMAPP_SMPS_MODE_VOTE_PROC); } EXPORT_SYMBOL(pmapp_smps_mode_vote); int pmapp_vreg_pincntrl_vote(const char *voter_id, uint vreg_id, uint clock_id, uint vote) { if (strlen(voter_id) != PMAPP_CLOCK_VOTER_ID_LEN) return -EINVAL; return pmapp_rpc_set_only(*((uint *) voter_id), vreg_id, clock_id, vote, 4, PMAPP_VREG_PINCNTRL_VOTE_PROC); } EXPORT_SYMBOL(pmapp_vreg_pincntrl_vote);
gpl-2.0
explora26/kernel-hikey-linaro
drivers/scsi/bnx2i/bnx2i_iscsi.c
432
63142
/* * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver. * * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * Copyright (c) 2014, QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) * Maintained by: QLogic-Storage-Upstream@qlogic.com */ #include <linux/slab.h> #include <scsi/scsi_tcq.h> #include <scsi/libiscsi.h> #include "bnx2i.h" struct scsi_transport_template *bnx2i_scsi_xport_template; struct iscsi_transport bnx2i_iscsi_transport; static struct scsi_host_template bnx2i_host_template; /* * Global endpoint resource info */ static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); static int bnx2i_adapter_ready(struct bnx2i_hba *hba) { int retval = 0; if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) retval = -EPERM; return retval; } /** * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks * @cmd: iscsi cmd struct pointer * @buf_off: absolute buffer offset * @start_bd_off: u32 pointer to return the offset within the BD * indicated by 'start_bd_idx' on which 'buf_off' falls * @start_bd_idx: index of the BD on which 'buf_off' falls * * identifies & marks various bd info for scsi command's imm data, * unsolicited data and the first solicited data seq. */ static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, u32 *start_bd_off, u32 *start_bd_idx) { struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; u32 cur_offset = 0; u32 cur_bd_idx = 0; if (buf_off) { while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { cur_offset += bd_tbl->buffer_length; cur_bd_idx++; bd_tbl++; } } *start_bd_off = buf_off - cur_offset; *start_bd_idx = cur_bd_idx; } /** * bnx2i_setup_write_cmd_bd_info - sets up BD various information * @task: transport layer's cmd struct pointer * * identifies & marks various bd info for scsi command's immediate data, * unsolicited data and first solicited data seq which includes BD start * index & BD buf off. his function takes into account iscsi parameter such * as immediate data and unsolicited data is support on this connection. */ static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) { struct bnx2i_cmd *cmd = task->dd_data; u32 start_bd_offset; u32 start_bd_idx; u32 buffer_offset = 0; u32 cmd_len = cmd->req.total_data_transfer_length; /* if ImmediateData is turned off & IntialR2T is turned on, * there will be no immediate or unsolicited data, just return. */ if (!iscsi_task_has_unsol_data(task) && !task->imm_count) return; /* Immediate data */ buffer_offset += task->imm_count; if (task->imm_count == cmd_len) return; if (iscsi_task_has_unsol_data(task)) { bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, &start_bd_offset, &start_bd_idx); cmd->req.ud_buffer_offset = start_bd_offset; cmd->req.ud_start_bd_index = start_bd_idx; buffer_offset += task->unsol_r2t.data_length; } if (buffer_offset != cmd_len) { bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, &start_bd_offset, &start_bd_idx); if ((start_bd_offset > task->conn->session->first_burst) || (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { int i = 0; iscsi_conn_printk(KERN_ALERT, task->conn, "bnx2i- error, buf offset 0x%x " "bd_valid %d use_sg %d\n", buffer_offset, cmd->io_tbl.bd_valid, scsi_sg_count(cmd->scsi_cmd)); for (i = 0; i < cmd->io_tbl.bd_valid; i++) iscsi_conn_printk(KERN_ALERT, task->conn, "bnx2i err, bd[%d]: len %x\n", i, cmd->io_tbl.bd_tbl[i].\ buffer_length); } cmd->req.sd_buffer_offset = start_bd_offset; cmd->req.sd_start_bd_index = start_bd_idx; } } /** * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table * @hba: adapter instance * @cmd: iscsi cmd struct pointer * * map SG list */ static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; struct scatterlist *sg; int byte_count = 0; int bd_count = 0; int sg_count; int sg_len; u64 addr; int i; BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); sg_count = scsi_dma_map(sc); scsi_for_each_sg(sc, sg, sg_count, i) { sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); bd[bd_count].buffer_addr_lo = addr & 0xffffffff; bd[bd_count].buffer_addr_hi = addr >> 32; bd[bd_count].buffer_length = sg_len; bd[bd_count].flags = 0; if (bd_count == 0) bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; byte_count += sg_len; bd_count++; } if (bd_count) bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; BUG_ON(byte_count != scsi_bufflen(sc)); return bd_count; } /** * bnx2i_iscsi_map_sg_list - maps SG list * @cmd: iscsi cmd struct pointer * * creates BD list table for the command */ static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) { int bd_count; bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); if (!bd_count) { struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; bd[0].buffer_length = bd[0].flags = 0; } cmd->io_tbl.bd_valid = bd_count; } /** * bnx2i_iscsi_unmap_sg_list - unmaps SG list * @cmd: iscsi cmd struct pointer * * unmap IO buffers and invalidate the BD table */ void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; if (cmd->io_tbl.bd_valid && sc) { scsi_dma_unmap(sc); cmd->io_tbl.bd_valid = 0; } } static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) { memset(&cmd->req, 0x00, sizeof(cmd->req)); cmd->req.op_code = 0xFF; cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; cmd->req.bd_list_addr_hi = (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); } /** * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' * @hba: pointer to adapter instance * @conn: pointer to iscsi connection * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) * * update iscsi cid table entry with connection pointer. This enables * driver to quickly get hold of connection structure pointer in * completion/interrupt thread using iscsi context ID */ static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn, u32 iscsi_cid) { if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "conn bind - entry #%d not free\n", iscsi_cid); return -EBUSY; } hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; return 0; } /** * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) */ struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, u16 iscsi_cid) { if (!hba->cid_que.conn_cid_tbl) { printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); return NULL; } else if (iscsi_cid >= hba->max_active_conns) { printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); return NULL; } return hba->cid_que.conn_cid_tbl[iscsi_cid]; } /** * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool * @hba: pointer to adapter instance */ static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) { int idx; if (!hba->cid_que.cid_free_cnt) return -1; idx = hba->cid_que.cid_q_cons_idx; hba->cid_que.cid_q_cons_idx++; if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) hba->cid_que.cid_q_cons_idx = 0; hba->cid_que.cid_free_cnt--; return hba->cid_que.cid_que[idx]; } /** * bnx2i_free_iscsi_cid - returns tcp port to free list * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to free */ static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) { int idx; if (iscsi_cid == (u16) -1) return; hba->cid_que.cid_free_cnt++; idx = hba->cid_que.cid_q_prod_idx; hba->cid_que.cid_que[idx] = iscsi_cid; hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; hba->cid_que.cid_q_prod_idx++; if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) hba->cid_que.cid_q_prod_idx = 0; } /** * bnx2i_setup_free_cid_que - sets up free iscsi cid queue * @hba: pointer to adapter instance * * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, * and initialize table attributes */ static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) { int mem_size; int i; mem_size = hba->max_active_conns * sizeof(u32); mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); if (!hba->cid_que.cid_que_base) return -ENOMEM; mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); if (!hba->cid_que.conn_cid_tbl) { kfree(hba->cid_que.cid_que_base); hba->cid_que.cid_que_base = NULL; return -ENOMEM; } hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; hba->cid_que.cid_q_prod_idx = 0; hba->cid_que.cid_q_cons_idx = 0; hba->cid_que.cid_q_max_idx = hba->max_active_conns; hba->cid_que.cid_free_cnt = hba->max_active_conns; for (i = 0; i < hba->max_active_conns; i++) { hba->cid_que.cid_que[i] = i; hba->cid_que.conn_cid_tbl[i] = NULL; } return 0; } /** * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources * @hba: pointer to adapter instance */ static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) { kfree(hba->cid_que.cid_que_base); hba->cid_que.cid_que_base = NULL; kfree(hba->cid_que.conn_cid_tbl); hba->cid_que.conn_cid_tbl = NULL; } /** * bnx2i_alloc_ep - allocates ep structure from global pool * @hba: pointer to adapter instance * * routine allocates a free endpoint structure from global pool and * a tcp port to be used for this connection. Global resource lock, * 'bnx2i_resc_lock' is held while accessing shared global data structures */ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) { struct iscsi_endpoint *ep; struct bnx2i_endpoint *bnx2i_ep; u32 ec_div; ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); if (!ep) { printk(KERN_ERR "bnx2i: Could not allocate ep\n"); return NULL; } bnx2i_ep = ep->dd_data; bnx2i_ep->cls_ep = ep; INIT_LIST_HEAD(&bnx2i_ep->link); bnx2i_ep->state = EP_STATE_IDLE; bnx2i_ep->ep_iscsi_cid = (u16) -1; bnx2i_ep->hba = hba; bnx2i_ep->hba_age = hba->age; ec_div = event_coal_div; while (ec_div >>= 1) bnx2i_ep->ec_shift += 1; hba->ofld_conns_active++; init_waitqueue_head(&bnx2i_ep->ofld_wait); return ep; } /** * bnx2i_free_ep - free endpoint * @ep: pointer to iscsi endpoint structure */ static void bnx2i_free_ep(struct iscsi_endpoint *ep) { struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; unsigned long flags; spin_lock_irqsave(&bnx2i_resc_lock, flags); bnx2i_ep->state = EP_STATE_IDLE; bnx2i_ep->hba->ofld_conns_active--; if (bnx2i_ep->ep_iscsi_cid != (u16) -1) bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); if (bnx2i_ep->conn) { bnx2i_ep->conn->ep = NULL; bnx2i_ep->conn = NULL; } bnx2i_ep->hba = NULL; spin_unlock_irqrestore(&bnx2i_resc_lock, flags); iscsi_destroy_endpoint(ep); } /** * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command * @hba: adapter instance pointer * @session: iscsi session pointer * @cmd: iscsi command structure */ static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, struct bnx2i_cmd *cmd) { struct io_bdt *io = &cmd->io_tbl; struct iscsi_bd *bd; io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), &io->bd_tbl_dma, GFP_KERNEL); if (!io->bd_tbl) { iscsi_session_printk(KERN_ERR, session, "Could not " "allocate bdt.\n"); return -ENOMEM; } io->bd_valid = 0; return 0; } /** * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table * @hba: adapter instance pointer * @session: iscsi session pointer * @cmd: iscsi command structure */ static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct bnx2i_cmd *cmd = task->dd_data; if (cmd->io_tbl.bd_tbl) dma_free_coherent(&hba->pcidev->dev, ISCSI_MAX_BDS_PER_CMD * sizeof(struct iscsi_bd), cmd->io_tbl.bd_tbl, cmd->io_tbl.bd_tbl_dma); } } /** * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session * @hba: adapter instance pointer * @session: iscsi session pointer */ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct bnx2i_cmd *cmd = task->dd_data; task->hdr = &cmd->hdr; task->hdr_max = sizeof(struct iscsi_hdr); if (bnx2i_alloc_bdt(hba, session, cmd)) goto free_bdts; } return 0; free_bdts: bnx2i_destroy_cmd_pool(hba, session); return -ENOMEM; } /** * bnx2i_setup_mp_bdt - allocate BD table resources * @hba: pointer to adapter structure * * Allocate memory for dummy buffer and associated BD * table to be used by middle path (MP) requests */ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) { int rc = 0; struct iscsi_bd *mp_bdt; u64 addr; hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &hba->mp_bd_dma, GFP_KERNEL); if (!hba->mp_bd_tbl) { printk(KERN_ERR "unable to allocate Middle Path BDT\n"); rc = -1; goto out; } hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; rc = -1; goto out; } mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; addr = (unsigned long) hba->dummy_buf_dma; mp_bdt->buffer_addr_lo = addr & 0xffffffff; mp_bdt->buffer_addr_hi = addr >> 32; mp_bdt->buffer_length = CNIC_PAGE_SIZE; mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; out: return rc; } /** * bnx2i_free_mp_bdt - releases ITT back to free pool * @hba: pointer to adapter instance * * free MP dummy buffer and associated BD table */ static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) { if (hba->mp_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; } if (hba->dummy_buffer) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } return; } /** * bnx2i_drop_session - notifies iscsid of connection error. * @hba: adapter instance pointer * @session: iscsi session pointer * * This notifies iscsid that there is a error, so it can initiate * recovery. * * This relies on caller using the iscsi class iterator so the object * is refcounted and does not disapper from under us. */ void bnx2i_drop_session(struct iscsi_cls_session *cls_session) { iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } /** * bnx2i_ep_destroy_list_add - add an entry to EP destroy list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_destroy_list); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_destroy_list_del - add an entry to EP destroy list * * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_ofld_list); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints * * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to find * */ struct bnx2i_endpoint * bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; struct bnx2i_endpoint *ep; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_ofld_list) { ep = (struct bnx2i_endpoint *)list; if (ep->ep_iscsi_cid == iscsi_cid) break; ep = NULL; } read_unlock_bh(&hba->ep_rdwr_lock); if (!ep) printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); return ep; } /** * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to find * */ struct bnx2i_endpoint * bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; struct bnx2i_endpoint *ep; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_destroy_list) { ep = (struct bnx2i_endpoint *)list; if (ep->ep_iscsi_cid == iscsi_cid) break; ep = NULL; } read_unlock_bh(&hba->ep_rdwr_lock); if (!ep) printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); return ep; } /** * bnx2i_ep_active_list_add - add an entry to ep active list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_active_list); write_unlock_bh(&hba->ep_rdwr_lock); } /** * bnx2i_ep_active_list_del - deletes an entry to ep active list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); } /** * bnx2i_setup_host_queue_size - assigns shost->can_queue param * @hba: pointer to adapter instance * @shost: scsi host pointer * * Initializes 'can_queue' parameter based on how many outstanding commands * the device can handle. Each device 5708/5709/57710 has different * capabilities */ static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, struct Scsi_Host *shost) { if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; else shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; } /** * bnx2i_alloc_hba - allocate and init adapter instance * @cnic: cnic device pointer * * allocate & initialize adapter structure and call other * support routines to do per adapter initialization */ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) { struct Scsi_Host *shost; struct bnx2i_hba *hba; shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); if (!shost) return NULL; shost->dma_boundary = cnic->pcidev->dma_mask; shost->transportt = bnx2i_scsi_xport_template; shost->max_id = ISCSI_MAX_CONNS_PER_HBA; shost->max_channel = 0; shost->max_lun = 512; shost->max_cmd_len = 16; hba = iscsi_host_priv(shost); hba->shost = shost; hba->netdev = cnic->netdev; /* Get PCI related information and update hba struct members */ hba->pcidev = cnic->pcidev; pci_dev_get(hba->pcidev); hba->pci_did = hba->pcidev->device; hba->pci_vid = hba->pcidev->vendor; hba->pci_sdid = hba->pcidev->subsystem_device; hba->pci_svid = hba->pcidev->subsystem_vendor; hba->pci_func = PCI_FUNC(hba->pcidev->devfn); hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); bnx2i_identify_device(hba, cnic); bnx2i_setup_host_queue_size(hba, shost); hba->reg_base = pci_resource_start(hba->pcidev, 0); if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2); if (!hba->regview) goto ioreg_map_err; } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba->regview = pci_iomap(hba->pcidev, 0, 4096); if (!hba->regview) goto ioreg_map_err; } if (bnx2i_setup_mp_bdt(hba)) goto mp_bdt_mem_err; INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_active_list); INIT_LIST_HEAD(&hba->ep_destroy_list); rwlock_init(&hba->ep_rdwr_lock); hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; /* different values for 5708/5709/57710 */ hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; if (bnx2i_setup_free_cid_que(hba)) goto cid_que_err; /* SQ/RQ/CQ size can be changed via sysfx interface */ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) hba->max_sqes = sq_size; else hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; } else { /* 5706/5708/5709 */ if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) hba->max_sqes = sq_size; else hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; } hba->max_rqes = rq_size; hba->max_cqes = hba->max_sqes + rq_size; if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; hba->num_ccell = hba->max_sqes / 2; spin_lock_init(&hba->lock); mutex_init(&hba->net_dev_lock); init_waitqueue_head(&hba->eh_wait); if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba->hba_shutdown_tmo = 30 * HZ; hba->conn_teardown_tmo = 20 * HZ; hba->conn_ctx_destroy_tmo = 6 * HZ; } else { /* 5706/5708/5709 */ hba->hba_shutdown_tmo = 20 * HZ; hba->conn_teardown_tmo = 10 * HZ; hba->conn_ctx_destroy_tmo = 2 * HZ; } #ifdef CONFIG_32BIT spin_lock_init(&hba->stat_lock); #endif memset(&hba->stats, 0, sizeof(struct iscsi_stats_info)); if (iscsi_host_add(shost, &hba->pcidev->dev)) goto free_dump_mem; return hba; free_dump_mem: bnx2i_release_free_cid_que(hba); cid_que_err: bnx2i_free_mp_bdt(hba); mp_bdt_mem_err: if (hba->regview) { pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } ioreg_map_err: pci_dev_put(hba->pcidev); scsi_host_put(shost); return NULL; } /** * bnx2i_free_hba- releases hba structure and resources held by the adapter * @hba: pointer to adapter instance * * free adapter structure and call various cleanup routines. */ void bnx2i_free_hba(struct bnx2i_hba *hba) { struct Scsi_Host *shost = hba->shost; iscsi_host_remove(shost); INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_active_list); INIT_LIST_HEAD(&hba->ep_destroy_list); pci_dev_put(hba->pcidev); if (hba->regview) { pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } bnx2i_free_mp_bdt(hba); bnx2i_release_free_cid_que(hba); iscsi_host_free(shost); } /** * bnx2i_conn_free_login_resources - free DMA resources used for login process * @hba: pointer to adapter instance * @bnx2i_conn: iscsi connection pointer * * Login related resources, mostly BDT & payload DMA memory is freed */ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { if (bnx2i_conn->gen_pdu.resp_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.resp_bd_tbl, bnx2i_conn->gen_pdu.resp_bd_dma); bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.req_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.resp_buf) { dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_dma_addr); bnx2i_conn->gen_pdu.resp_buf = NULL; } if (bnx2i_conn->gen_pdu.req_buf) { dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.req_buf, bnx2i_conn->gen_pdu.req_dma_addr); bnx2i_conn->gen_pdu.req_buf = NULL; } } /** * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. * @hba: pointer to adapter instance * @bnx2i_conn: iscsi connection pointer * * Mgmt task DNA resources are allocated in this routine. */ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { /* Allocate memory for login request/response buffers */ bnx2i_conn->gen_pdu.req_buf = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &bnx2i_conn->gen_pdu.req_dma_addr, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_buf == NULL) goto login_req_buf_failure; bnx2i_conn->gen_pdu.req_buf_size = 0; bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; bnx2i_conn->gen_pdu.resp_buf = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &bnx2i_conn->gen_pdu.resp_dma_addr, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_buf == NULL) goto login_resp_buf_failure; bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; bnx2i_conn->gen_pdu.req_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) goto login_req_bd_tbl_failure; bnx2i_conn->gen_pdu.resp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.resp_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) goto login_resp_bd_tbl_failure; return 0; login_resp_bd_tbl_failure: dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; login_req_bd_tbl_failure: dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_dma_addr); bnx2i_conn->gen_pdu.resp_buf = NULL; login_resp_buf_failure: dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.req_buf, bnx2i_conn->gen_pdu.req_dma_addr); bnx2i_conn->gen_pdu.req_buf = NULL; login_req_buf_failure: iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, "login resource alloc failed!!\n"); return -ENOMEM; } /** * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. * @bnx2i_conn: iscsi connection pointer * * Allocates buffers and BD tables before shipping requests to cnic * for PDUs prepared by 'iscsid' daemon */ static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) { struct iscsi_bd *bd_tbl; bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; bd_tbl->buffer_addr_hi = (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - bnx2i_conn->gen_pdu.req_buf; bd_tbl->reserved0 = 0; bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; bd_tbl->reserved0 = 0; bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; } /** * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. * @task: transport layer task pointer * * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, * Nop-out and Logout requests flow through this path. */ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) { struct bnx2i_cmd *cmd = task->dd_data; struct bnx2i_conn *bnx2i_conn = cmd->conn; int rc = 0; char *buf; int data_len; bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: bnx2i_send_iscsi_login(bnx2i_conn, task); break; case ISCSI_OP_NOOP_OUT: data_len = bnx2i_conn->gen_pdu.req_buf_size; buf = bnx2i_conn->gen_pdu.req_buf; if (data_len) rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, buf, data_len, 1); else rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, NULL, 0, 1); break; case ISCSI_OP_LOGOUT: rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); break; case ISCSI_OP_SCSI_TMFUNC: rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); break; case ISCSI_OP_TEXT: rc = bnx2i_send_iscsi_text(bnx2i_conn, task); break; default: iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "send_gen: unsupported op 0x%x\n", task->hdr->opcode); } return rc; } /********************************************************************** * SCSI-ML Interface **********************************************************************/ /** * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe * @sc: SCSI-ML command pointer * @cmd: iscsi cmd pointer */ static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) { u32 dword; int lpcnt; u8 *srcp; u32 *dstp; u32 scsi_lun[2]; int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); srcp = (u8 *) sc->cmnd; dstp = (u32 *) cmd->req.cdb; while (lpcnt--) { memcpy(&dword, (const void *) srcp, 4); *dstp = cpu_to_be32(dword); srcp += 4; dstp++; } if (sc->cmd_len & 0x3) { dword = (u32) srcp[0] | ((u32) srcp[1] << 8); *dstp = cpu_to_be32(dword); } } static void bnx2i_cleanup_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct bnx2i_hba *hba = bnx2i_conn->hba; /* * mgmt task or cmd was never sent to us to transmit. */ if (!task->sc || task->state == ISCSI_TASK_PENDING) return; /* * need to clean-up task context to claim dma buffers */ if (task->state == ISCSI_TASK_ABRT_TMF) { bnx2i_send_cmd_cleanup_req(hba, task->dd_data); spin_unlock_bh(&conn->session->back_lock); wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); spin_lock_bh(&conn->session->back_lock); } bnx2i_iscsi_unmap_sg_list(task->dd_data); } /** * bnx2i_mtask_xmit - transmit mtask to chip for further processing * @conn: transport layer conn structure pointer * @task: transport layer command structure pointer */ static int bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct bnx2i_hba *hba = bnx2i_conn->hba; struct bnx2i_cmd *cmd = task->dd_data; memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); bnx2i_setup_cmd_wqe_template(cmd); bnx2i_conn->gen_pdu.req_buf_size = task->data_count; /* Tx PDU/data length count */ ADD_STATS_64(hba, tx_pdus, 1); ADD_STATS_64(hba, tx_bytes, task->data_count); if (task->data_count) { memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, task->data_count); bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf + task->data_count; } cmd->conn = conn->dd_data; cmd->scsi_cmd = NULL; return bnx2i_iscsi_send_generic_request(task); } /** * bnx2i_task_xmit - transmit iscsi command to chip for further processing * @task: transport layer command structure pointer * * maps SG buffers and send request to chip/firmware in the form of SQ WQE */ static int bnx2i_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct scsi_cmnd *sc = task->sc; struct bnx2i_cmd *cmd = task->dd_data; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > hba->max_sqes) return -ENOMEM; /* * If there is no scsi_cmnd this must be a mgmt task */ if (!sc) return bnx2i_mtask_xmit(conn, task); bnx2i_setup_cmd_wqe_template(cmd); cmd->req.op_code = ISCSI_OP_SCSI_CMD; cmd->conn = bnx2i_conn; cmd->scsi_cmd = sc; cmd->req.total_data_transfer_length = scsi_bufflen(sc); cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); bnx2i_iscsi_map_sg_list(cmd); bnx2i_cpy_scsi_cdb(sc, cmd); cmd->req.op_attr = ISCSI_ATTR_SIMPLE; if (sc->sc_data_direction == DMA_TO_DEVICE) { cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; cmd->req.itt = task->itt | (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); bnx2i_setup_write_cmd_bd_info(task); } else { if (scsi_bufflen(sc)) cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; cmd->req.itt = task->itt | (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); } cmd->req.num_bds = cmd->io_tbl.bd_valid; if (!cmd->io_tbl.bd_valid) { cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); cmd->req.num_bds = 1; } bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); return 0; } /** * bnx2i_session_create - create a new iscsi session * @cmds_max: max commands supported * @qdepth: scsi queue depth to support * @initial_cmdsn: initial iscsi CMDSN to be used for this session * * Creates a new iSCSI session instance on given device. */ static struct iscsi_cls_session * bnx2i_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn) { struct Scsi_Host *shost; struct iscsi_cls_session *cls_session; struct bnx2i_hba *hba; struct bnx2i_endpoint *bnx2i_ep; if (!ep) { printk(KERN_ERR "bnx2i: missing ep.\n"); return NULL; } bnx2i_ep = ep->dd_data; shost = bnx2i_ep->hba->shost; hba = iscsi_host_priv(shost); if (bnx2i_adapter_ready(hba)) return NULL; /* * user can override hw limit as long as it is within * the min/max. */ if (cmds_max > hba->max_sqes) cmds_max = hba->max_sqes; else if (cmds_max < BNX2I_SQ_WQES_MIN) cmds_max = BNX2I_SQ_WQES_MIN; cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, cmds_max, 0, sizeof(struct bnx2i_cmd), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) return NULL; if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) goto session_teardown; return cls_session; session_teardown: iscsi_session_teardown(cls_session); return NULL; } /** * bnx2i_session_destroy - destroys iscsi session * @cls_session: pointer to iscsi cls session * * Destroys previously created iSCSI session instance and releases * all resources held by it */ static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); bnx2i_destroy_cmd_pool(hba, session); iscsi_session_teardown(cls_session); } /** * bnx2i_conn_create - create iscsi connection instance * @cls_session: pointer to iscsi cls session * @cid: iscsi cid as per rfc (not NX2's CID terminology) * * Creates a new iSCSI connection instance for a given session */ static struct iscsi_cls_conn * bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_conn *bnx2i_conn; struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), cid); if (!cls_conn) return NULL; conn = cls_conn->dd_data; bnx2i_conn = conn->dd_data; bnx2i_conn->cls_conn = cls_conn; bnx2i_conn->hba = hba; atomic_set(&bnx2i_conn->work_cnt, 0); /* 'ep' ptr will be assigned in bind() call */ bnx2i_conn->ep = NULL; init_completion(&bnx2i_conn->cmd_cleanup_cmpl); if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { iscsi_conn_printk(KERN_ALERT, conn, "conn_new: login resc alloc failed!!\n"); goto free_conn; } return cls_conn; free_conn: iscsi_conn_teardown(cls_conn); return NULL; } /** * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together * @cls_session: pointer to iscsi cls session * @cls_conn: pointer to iscsi cls conn * @transport_fd: 64-bit EP handle * @is_leading: leading connection on this session? * * Binds together iSCSI session instance, iSCSI connection instance * and the TCP connection. This routine returns error code if * TCP connection does not belong on the device iSCSI sess/conn * is bound */ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_endpoint *bnx2i_ep; struct iscsi_endpoint *ep; int ret_code; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) return -EINVAL; /* * Forcefully terminate all in progress connection recovery at the * earliest, either in bind(), send_pdu(LOGIN), or conn_start() */ if (bnx2i_adapter_ready(hba)) return -EIO; bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) /* Peer disconnect via' FIN or RST */ return -EINVAL; if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) return -EINVAL; if (bnx2i_ep->hba != hba) { /* Error - TCP connection does not belong to this device */ iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "conn bind, ep=0x%p (%s) does not", bnx2i_ep, bnx2i_ep->hba->netdev->name); iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "belong to hba (%s)\n", hba->netdev->name); return -EEXIST; } bnx2i_ep->conn = bnx2i_conn; bnx2i_conn->ep = bnx2i_ep; bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, bnx2i_ep->ep_iscsi_cid); /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 * driver needs to explicitly replenish RQ index during setup. */ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) bnx2i_put_rq_buf(bnx2i_conn, 0); bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); return ret_code; } /** * bnx2i_conn_destroy - destroy iscsi connection instance & release resources * @cls_conn: pointer to iscsi cls conn * * Destroy an iSCSI connection instance and release memory resources held by * this connection */ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct Scsi_Host *shost; struct bnx2i_hba *hba; struct bnx2i_work *work, *tmp; unsigned cpu = 0; struct bnx2i_percpu_s *p; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); hba = iscsi_host_priv(shost); bnx2i_conn_free_login_resources(hba, bnx2i_conn); if (atomic_read(&bnx2i_conn->work_cnt)) { for_each_online_cpu(cpu) { p = &per_cpu(bnx2i_percpu, cpu); spin_lock_bh(&p->p_work_lock); list_for_each_entry_safe(work, tmp, &p->work_list, list) { if (work->session == conn->session && work->bnx2i_conn == bnx2i_conn) { list_del_init(&work->list); kfree(work); if (!atomic_dec_and_test( &bnx2i_conn->work_cnt)) break; } } spin_unlock_bh(&p->p_work_lock); } } iscsi_conn_teardown(cls_conn); } /** * bnx2i_ep_get_param - return iscsi ep parameter to caller * @ep: pointer to iscsi endpoint * @param: parameter type identifier * @buf: buffer pointer * * returns iSCSI ep parameters */ static int bnx2i_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; struct bnx2i_hba *hba = bnx2i_ep->hba; int len = -ENOTCONN; if (!hba) return -ENOTCONN; switch (param) { case ISCSI_PARAM_CONN_PORT: mutex_lock(&hba->net_dev_lock); if (bnx2i_ep->cm_sk) len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port); mutex_unlock(&hba->net_dev_lock); break; case ISCSI_PARAM_CONN_ADDRESS: mutex_lock(&hba->net_dev_lock); if (bnx2i_ep->cm_sk) len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip); mutex_unlock(&hba->net_dev_lock); break; default: return -ENOSYS; } return len; } /** * bnx2i_host_get_param - returns host (adapter) related parameters * @shost: scsi host pointer * @param: parameter type identifier * @buf: buffer pointer */ static int bnx2i_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct bnx2i_hba *hba = iscsi_host_priv(shost); int len = 0; switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); break; case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "%s\n", hba->netdev->name); break; case ISCSI_HOST_PARAM_IPADDRESS: { struct list_head *active_list = &hba->ep_active_list; read_lock_bh(&hba->ep_rdwr_lock); if (!list_empty(&hba->ep_active_list)) { struct bnx2i_endpoint *bnx2i_ep; struct cnic_sock *csk; bnx2i_ep = list_first_entry(active_list, struct bnx2i_endpoint, link); csk = bnx2i_ep->cm_sk; if (test_bit(SK_F_IPV6, &csk->flags)) len = sprintf(buf, "%pI6\n", csk->src_ip); else len = sprintf(buf, "%pI4\n", csk->src_ip); } read_unlock_bh(&hba->ep_rdwr_lock); break; } default: return iscsi_host_get_param(shost, param, buf); } return len; } /** * bnx2i_conn_start - completes iscsi connection migration to FFP * @cls_conn: pointer to iscsi cls conn * * last call in FFP migration to handover iscsi conn to the driver */ static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; bnx2i_update_iscsi_conn(conn); /* * this should normally not sleep for a long time so it should * not disrupt the caller. */ bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; add_timer(&bnx2i_conn->ep->ofld_timer); /* update iSCSI context for this conn, wait for CNIC to complete */ wait_event_interruptible(bnx2i_conn->ep->ofld_wait, bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_conn->ep->ofld_timer); iscsi_conn_start(cls_conn); return 0; } /** * bnx2i_conn_get_stats - returns iSCSI stats * @cls_conn: pointer to iscsi cls conn * @stats: pointer to iscsi statistic struct */ static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->digest_err = 0; stats->timeout_err = 0; strcpy(stats->custom[0].desc, "eh_abort_cnt"); stats->custom[0].value = conn->eh_abort_cnt; stats->custom_length = 1; } /** * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices * @dst_addr: target IP address * * check if route resolves to BNX2 device */ static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) { struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; struct bnx2i_hba *hba; struct cnic_dev *cnic = NULL; hba = get_adapter_list_head(); if (hba && hba->cnic) cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); if (!cnic) { printk(KERN_ALERT "bnx2i: no route," "can't connect using cnic\n"); goto no_nx2_route; } hba = bnx2i_find_hba_for_cnic(cnic); if (!hba) goto no_nx2_route; if (bnx2i_adapter_ready(hba)) { printk(KERN_ALERT "bnx2i: check route, hba not found\n"); goto no_nx2_route; } if (hba->netdev->mtu > hba->mtu_supported) { printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", hba->netdev->name, hba->netdev->mtu); printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", hba->mtu_supported); goto no_nx2_route; } return hba; no_nx2_route: return NULL; } /** * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources * @hba: pointer to adapter instance * @ep: endpoint (transport identifier) structure * * destroys cm_sock structure and on chip iscsi context */ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) hba->cnic->cm_destroy(ep->cm_sk); if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && ep->state == EP_STATE_DISCONN_TIMEDOUT) { if (ep->conn && ep->conn->cls_conn && ep->conn->cls_conn->dd_data) { struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; /* Must suspend all rx queue activity for this ep */ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); } /* CONN_DISCONNECT timeout may or may not be an issue depending * on what transcribed in TCP layer, different targets behave * differently */ printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, " "please submit GRC Dump, NW/PCIe trace, " "driver msgs to developers for analysis\n", hba->netdev->name); } ep->state = EP_STATE_CLEANUP_START; init_timer(&ep->ofld_timer); ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; ep->ofld_timer.function = bnx2i_ep_ofld_timer; ep->ofld_timer.data = (unsigned long) ep; add_timer(&ep->ofld_timer); bnx2i_ep_destroy_list_add(hba, ep); /* destroy iSCSI context, wait for it to complete */ if (bnx2i_send_conn_destroy(hba, ep)) ep->state = EP_STATE_CLEANUP_CMPL; wait_event_interruptible(ep->ofld_wait, (ep->state != EP_STATE_CLEANUP_START)); if (signal_pending(current)) flush_signals(current); del_timer_sync(&ep->ofld_timer); bnx2i_ep_destroy_list_del(hba, ep); if (ep->state != EP_STATE_CLEANUP_CMPL) /* should never happen */ printk(KERN_ALERT "bnx2i - conn destroy failed\n"); return 0; } /** * bnx2i_ep_connect - establish TCP connection to target portal * @shost: scsi host * @dst_addr: target IP address * @non_blocking: blocking or non-blocking call * * this routine initiates the TCP/IP connection by invoking Option-2 i/f * with l5_core and the CNIC. This is a multi-step process of resolving * route to target, create a iscsi connection context, handshaking with * CNIC module to create/initialize the socket struct and finally * sending down option-2 request to complete TCP 3-way handshake */ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { u32 iscsi_cid = BNX2I_CID_RESERVED; struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; struct sockaddr_in6 *desti6; struct bnx2i_endpoint *bnx2i_ep; struct bnx2i_hba *hba; struct cnic_dev *cnic; struct cnic_sockaddr saddr; struct iscsi_endpoint *ep; int rc = 0; if (shost) { /* driver is given scsi host to work with */ hba = iscsi_host_priv(shost); } else /* * check if the given destination can be reached through * a iscsi capable NetXtreme2 device */ hba = bnx2i_check_route(dst_addr); if (!hba) { rc = -EINVAL; goto nohba; } mutex_lock(&hba->net_dev_lock); if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) { rc = -EPERM; goto check_busy; } cnic = hba->cnic; ep = bnx2i_alloc_ep(hba); if (!ep) { rc = -ENOMEM; goto check_busy; } bnx2i_ep = ep->dd_data; atomic_set(&bnx2i_ep->num_active_cmds, 0); iscsi_cid = bnx2i_alloc_iscsi_cid(hba); if (iscsi_cid == -1) { printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " "iscsi cid\n", hba->netdev->name); rc = -ENOMEM; bnx2i_free_ep(ep); goto check_busy; } bnx2i_ep->hba_age = hba->age; rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); if (rc != 0) { printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error" "\n", hba->netdev->name); rc = -ENOMEM; goto qp_resc_err; } bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; bnx2i_ep->state = EP_STATE_OFLD_START; bnx2i_ep_ofld_list_add(hba, bnx2i_ep); init_timer(&bnx2i_ep->ofld_timer); bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; add_timer(&bnx2i_ep->ofld_timer); if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", hba->netdev->name, bnx2i_ep->ep_iscsi_cid); rc = -EBUSY; } else rc = -ENOSPC; printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe" "\n", hba->netdev->name); bnx2i_ep_ofld_list_del(hba, bnx2i_ep); goto conn_failed; } /* Wait for CNIC hardware to setup conn context and return 'cid' */ wait_event_interruptible(bnx2i_ep->ofld_wait, bnx2i_ep->state != EP_STATE_OFLD_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_ep->ofld_timer); bnx2i_ep_ofld_list_del(hba, bnx2i_ep); if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", hba->netdev->name, bnx2i_ep->ep_iscsi_cid); rc = -EBUSY; } else rc = -ENOSPC; goto conn_failed; } rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); if (rc) { rc = -EINVAL; /* Need to terminate and cleanup the connection */ goto release_ep; } bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; bnx2i_ep->cm_sk->snd_buf = 256 * 1024; clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); memset(&saddr, 0, sizeof(saddr)); if (dst_addr->sa_family == AF_INET) { desti = (struct sockaddr_in *) dst_addr; saddr.remote.v4 = *desti; saddr.local.v4.sin_family = desti->sin_family; } else if (dst_addr->sa_family == AF_INET6) { desti6 = (struct sockaddr_in6 *) dst_addr; saddr.remote.v6 = *desti6; saddr.local.v6.sin6_family = desti6->sin6_family; } bnx2i_ep->timestamp = jiffies; bnx2i_ep->state = EP_STATE_CONNECT_START; if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { rc = -EINVAL; goto conn_failed; } else rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); if (rc) goto release_ep; bnx2i_ep_active_list_add(hba, bnx2i_ep); if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) goto del_active_ep; mutex_unlock(&hba->net_dev_lock); return ep; del_active_ep: bnx2i_ep_active_list_del(hba, bnx2i_ep); release_ep: if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { mutex_unlock(&hba->net_dev_lock); return ERR_PTR(rc); } conn_failed: bnx2i_free_qp_resc(hba, bnx2i_ep); qp_resc_err: bnx2i_free_ep(ep); check_busy: mutex_unlock(&hba->net_dev_lock); nohba: return ERR_PTR(rc); } /** * bnx2i_ep_poll - polls for TCP connection establishement * @ep: TCP connection (endpoint) handle * @timeout_ms: timeout value in milli secs * * polls for TCP connect request to complete */ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct bnx2i_endpoint *bnx2i_ep; int rc = 0; bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_IDLE) || (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) return -1; if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) return 1; rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, ((bnx2i_ep->state == EP_STATE_OFLD_FAILED) || (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)), msecs_to_jiffies(timeout_ms)); if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) rc = -1; if (rc > 0) return 1; else if (!rc) return 0; /* timeout */ else return rc; } /** * bnx2i_ep_tcp_conn_active - check EP state transition * @ep: endpoint pointer * * check if underlying TCP connection is active */ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) { int ret; int cnic_dev_10g = 0; if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) cnic_dev_10g = 1; switch (bnx2i_ep->state) { case EP_STATE_CLEANUP_FAILED: case EP_STATE_OFLD_FAILED: case EP_STATE_DISCONN_TIMEDOUT: ret = 0; break; case EP_STATE_CONNECT_START: case EP_STATE_CONNECT_FAILED: case EP_STATE_CONNECT_COMPL: case EP_STATE_ULP_UPDATE_START: case EP_STATE_ULP_UPDATE_COMPL: case EP_STATE_TCP_FIN_RCVD: case EP_STATE_LOGOUT_SENT: case EP_STATE_LOGOUT_RESP_RCVD: case EP_STATE_ULP_UPDATE_FAILED: ret = 1; break; case EP_STATE_TCP_RST_RCVD: if (cnic_dev_10g) ret = 0; else ret = 1; break; default: ret = 0; } return ret; } /* * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw * @ep: TCP connection (bnx2i endpoint) handle * * executes TCP connection teardown process */ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) { struct bnx2i_hba *hba = bnx2i_ep->hba; struct cnic_dev *cnic; struct iscsi_session *session = NULL; struct iscsi_conn *conn = NULL; int ret = 0; int close = 0; int close_ret = 0; if (!hba) return 0; cnic = hba->cnic; if (!cnic) return 0; if (bnx2i_ep->state == EP_STATE_IDLE || bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) return 0; if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) goto destroy_conn; if (bnx2i_ep->conn) { conn = bnx2i_ep->conn->cls_conn->dd_data; session = conn->session; } init_timer(&bnx2i_ep->ofld_timer); bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; add_timer(&bnx2i_ep->ofld_timer); if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) goto out; if (session) { spin_lock_bh(&session->frwd_lock); if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) { if (session->state == ISCSI_STATE_LOGGING_OUT) { if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { /* Logout sent, but no resp */ printk(KERN_ALERT "bnx2i (%s): WARNING" " logout response was not " "received!\n", bnx2i_ep->hba->netdev->name); } else if (bnx2i_ep->state == EP_STATE_LOGOUT_RESP_RCVD) close = 1; } } else close = 1; spin_unlock_bh(&session->frwd_lock); } bnx2i_ep->state = EP_STATE_DISCONN_START; if (close) close_ret = cnic->cm_close(bnx2i_ep->cm_sk); else close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); if (close_ret) printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n", bnx2i_ep->hba->netdev->name, close, close_ret); else /* wait for option-2 conn teardown */ wait_event_interruptible(bnx2i_ep->ofld_wait, bnx2i_ep->state != EP_STATE_DISCONN_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_ep->ofld_timer); destroy_conn: bnx2i_ep_active_list_del(hba, bnx2i_ep); if (bnx2i_tear_down_conn(hba, bnx2i_ep)) return -EINVAL; out: bnx2i_ep->state = EP_STATE_IDLE; return ret; } /** * bnx2i_ep_disconnect - executes TCP connection teardown process * @ep: TCP connection (iscsi endpoint) handle * * executes TCP connection teardown process */ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) { struct bnx2i_endpoint *bnx2i_ep; struct bnx2i_conn *bnx2i_conn = NULL; struct iscsi_conn *conn = NULL; struct bnx2i_hba *hba; bnx2i_ep = ep->dd_data; /* driver should not attempt connection cleanup until TCP_CONNECT * completes either successfully or fails. Timeout is 9-secs, so * wait for it to complete */ while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) msleep(250); if (bnx2i_ep->conn) { bnx2i_conn = bnx2i_ep->conn; conn = bnx2i_conn->cls_conn->dd_data; iscsi_suspend_queue(conn); } hba = bnx2i_ep->hba; mutex_lock(&hba->net_dev_lock); if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) goto out; if (bnx2i_ep->state == EP_STATE_IDLE) goto free_resc; if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || (bnx2i_ep->hba_age != hba->age)) { bnx2i_ep_active_list_del(hba, bnx2i_ep); goto free_resc; } /* Do all chip cleanup here */ if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { mutex_unlock(&hba->net_dev_lock); return; } free_resc: bnx2i_free_qp_resc(hba, bnx2i_ep); if (bnx2i_conn) bnx2i_conn->ep = NULL; bnx2i_free_ep(ep); out: mutex_unlock(&hba->net_dev_lock); wake_up_interruptible(&hba->eh_wait); } /** * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler * @buf: pointer to buffer containing iscsi path message * */ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) { struct bnx2i_hba *hba = iscsi_host_priv(shost); char *buf = (char *) params; u16 len = sizeof(*params); /* handled by cnic driver */ hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, len); return 0; } static umode_t bnx2i_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: return S_IRUGO; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_PING_TMO: case ISCSI_PARAM_RECV_TMO: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_ERL: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_FAST_ABORT: case ISCSI_PARAM_ABORT_TMO: case ISCSI_PARAM_LU_RESET_TMO: case ISCSI_PARAM_TGT_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: case ISCSI_PARAM_BOOT_ROOT: case ISCSI_PARAM_BOOT_NIC: case ISCSI_PARAM_BOOT_TARGET: return S_IRUGO; default: return 0; } } return 0; } /* * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template * used while registering with the scsi host and iSCSI transport module. */ static struct scsi_host_template bnx2i_host_template = { .module = THIS_MODULE, .name = "QLogic Offload iSCSI Initiator", .proc_name = "bnx2i", .queuecommand = iscsi_queuecommand, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .change_queue_depth = scsi_change_queue_depth, .target_alloc = iscsi_target_alloc, .can_queue = 2048, .max_sectors = 127, .cmd_per_lun = 128, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, .shost_attrs = bnx2i_dev_attributes, .track_queue_depth = 1, }; struct iscsi_transport bnx2i_iscsi_transport = { .owner = THIS_MODULE, .name = "bnx2i", .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, .create_session = bnx2i_session_create, .destroy_session = bnx2i_session_destroy, .create_conn = bnx2i_conn_create, .bind_conn = bnx2i_conn_bind, .destroy_conn = bnx2i_conn_destroy, .attr_is_visible = bnx2i_attr_is_visible, .set_param = iscsi_set_param, .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = bnx2i_host_get_param, .start_conn = bnx2i_conn_start, .stop_conn = iscsi_conn_stop, .send_pdu = iscsi_conn_send_pdu, .xmit_task = bnx2i_task_xmit, .get_stats = bnx2i_conn_get_stats, /* TCP connect - disconnect - option-2 interface calls */ .get_ep_param = bnx2i_ep_get_param, .ep_connect = bnx2i_ep_connect, .ep_poll = bnx2i_ep_poll, .ep_disconnect = bnx2i_ep_disconnect, .set_path = bnx2i_nl_set_path, /* Error recovery timeout call */ .session_recovery_timedout = iscsi_session_recovery_timedout, .cleanup_task = bnx2i_cleanup_task, };
gpl-2.0
Hardslog/f2fs
drivers/tty/serial/mpsc.c
432
57435
/* * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240, * GT64260, MV64340, MV64360, GT96100, ... ). * * Author: Mark A. Greer <mgreer@mvista.com> * * Based on an old MPSC driver that was in the linuxppc tree. It appears to * have been created by Chris Zankel (formerly of MontaVista) but there * is no proper Copyright so I'm not sure. Apparently, parts were also * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c * by Russell King. * * 2004 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ /* * The MPSC interface is much like a typical network controller's interface. * That is, you set up separate rings of descriptors for transmitting and * receiving data. There is also a pool of buffers with (one buffer per * descriptor) that incoming data are dma'd into or outgoing data are dma'd * out of. * * The MPSC requires two other controllers to be able to work. The Baud Rate * Generator (BRG) provides a clock at programmable frequencies which determines * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the * MPSC. It is actually the SDMA interrupt that the driver uses to keep the * transmit and receive "engines" going (i.e., indicate data has been * transmitted or received). * * NOTES: * * 1) Some chips have an erratum where several regs cannot be * read. To work around that, we keep a local copy of those regs in * 'mpsc_port_info'. * * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr * accesses system mem with coherency enabled. For that reason, the driver * assumes that coherency for that ctlr has been disabled. This means * that when in a cache coherent system, the driver has to manually manage * the data cache on the areas that it touches because the dma_* macro are * basically no-ops. * * 3) There is an erratum (on PPC) where you can't use the instruction to do * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed. * * 4) AFAICT, hardware flow control isn't supported by the controller --MAG. */ #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/mv643xx.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/irq.h> #define MPSC_NUM_CTLRS 2 /* * Descriptors and buffers must be cache line aligned. * Buffers lengths must be multiple of cache line size. * Number of Tx & Rx descriptors must be powers of 2. */ #define MPSC_RXR_ENTRIES 32 #define MPSC_RXRE_SIZE dma_get_cache_alignment() #define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE) #define MPSC_RXBE_SIZE dma_get_cache_alignment() #define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE) #define MPSC_TXR_ENTRIES 32 #define MPSC_TXRE_SIZE dma_get_cache_alignment() #define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE) #define MPSC_TXBE_SIZE dma_get_cache_alignment() #define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE) #define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \ + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */) /* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */ struct mpsc_rx_desc { u16 bufsize; u16 bytecnt; u32 cmdstat; u32 link; u32 buf_ptr; } __attribute((packed)); struct mpsc_tx_desc { u16 bytecnt; u16 shadow; u32 cmdstat; u32 link; u32 buf_ptr; } __attribute((packed)); /* * Some regs that have the erratum that you can't read them are are shared * between the two MPSC controllers. This struct contains those shared regs. */ struct mpsc_shared_regs { phys_addr_t mpsc_routing_base_p; phys_addr_t sdma_intr_base_p; void __iomem *mpsc_routing_base; void __iomem *sdma_intr_base; u32 MPSC_MRR_m; u32 MPSC_RCRR_m; u32 MPSC_TCRR_m; u32 SDMA_INTR_CAUSE_m; u32 SDMA_INTR_MASK_m; }; /* The main driver data structure */ struct mpsc_port_info { struct uart_port port; /* Overlay uart_port structure */ /* Internal driver state for this ctlr */ u8 ready; u8 rcv_data; tcflag_t c_iflag; /* save termios->c_iflag */ tcflag_t c_cflag; /* save termios->c_cflag */ /* Info passed in from platform */ u8 mirror_regs; /* Need to mirror regs? */ u8 cache_mgmt; /* Need manual cache mgmt? */ u8 brg_can_tune; /* BRG has baud tuning? */ u32 brg_clk_src; u16 mpsc_max_idle; int default_baud; int default_bits; int default_parity; int default_flow; /* Physical addresses of various blocks of registers (from platform) */ phys_addr_t mpsc_base_p; phys_addr_t sdma_base_p; phys_addr_t brg_base_p; /* Virtual addresses of various blocks of registers (from platform) */ void __iomem *mpsc_base; void __iomem *sdma_base; void __iomem *brg_base; /* Descriptor ring and buffer allocations */ void *dma_region; dma_addr_t dma_region_p; dma_addr_t rxr; /* Rx descriptor ring */ dma_addr_t rxr_p; /* Phys addr of rxr */ u8 *rxb; /* Rx Ring I/O buf */ u8 *rxb_p; /* Phys addr of rxb */ u32 rxr_posn; /* First desc w/ Rx data */ dma_addr_t txr; /* Tx descriptor ring */ dma_addr_t txr_p; /* Phys addr of txr */ u8 *txb; /* Tx Ring I/O buf */ u8 *txb_p; /* Phys addr of txb */ int txr_head; /* Where new data goes */ int txr_tail; /* Where sent data comes off */ spinlock_t tx_lock; /* transmit lock */ /* Mirrored values of regs we can't read (if 'mirror_regs' set) */ u32 MPSC_MPCR_m; u32 MPSC_CHR_1_m; u32 MPSC_CHR_2_m; u32 MPSC_CHR_10_m; u32 BRG_BCR_m; struct mpsc_shared_regs *shared_regs; }; /* Hooks to platform-specific code */ int mpsc_platform_register_driver(void); void mpsc_platform_unregister_driver(void); /* Hooks back in to mpsc common to be called by platform-specific code */ struct mpsc_port_info *mpsc_device_probe(int index); struct mpsc_port_info *mpsc_device_remove(int index); /* Main MPSC Configuration Register Offsets */ #define MPSC_MMCRL 0x0000 #define MPSC_MMCRH 0x0004 #define MPSC_MPCR 0x0008 #define MPSC_CHR_1 0x000c #define MPSC_CHR_2 0x0010 #define MPSC_CHR_3 0x0014 #define MPSC_CHR_4 0x0018 #define MPSC_CHR_5 0x001c #define MPSC_CHR_6 0x0020 #define MPSC_CHR_7 0x0024 #define MPSC_CHR_8 0x0028 #define MPSC_CHR_9 0x002c #define MPSC_CHR_10 0x0030 #define MPSC_CHR_11 0x0034 #define MPSC_MPCR_FRZ (1 << 9) #define MPSC_MPCR_CL_5 0 #define MPSC_MPCR_CL_6 1 #define MPSC_MPCR_CL_7 2 #define MPSC_MPCR_CL_8 3 #define MPSC_MPCR_SBL_1 0 #define MPSC_MPCR_SBL_2 1 #define MPSC_CHR_2_TEV (1<<1) #define MPSC_CHR_2_TA (1<<7) #define MPSC_CHR_2_TTCS (1<<9) #define MPSC_CHR_2_REV (1<<17) #define MPSC_CHR_2_RA (1<<23) #define MPSC_CHR_2_CRD (1<<25) #define MPSC_CHR_2_EH (1<<31) #define MPSC_CHR_2_PAR_ODD 0 #define MPSC_CHR_2_PAR_SPACE 1 #define MPSC_CHR_2_PAR_EVEN 2 #define MPSC_CHR_2_PAR_MARK 3 /* MPSC Signal Routing */ #define MPSC_MRR 0x0000 #define MPSC_RCRR 0x0004 #define MPSC_TCRR 0x0008 /* Serial DMA Controller Interface Registers */ #define SDMA_SDC 0x0000 #define SDMA_SDCM 0x0008 #define SDMA_RX_DESC 0x0800 #define SDMA_RX_BUF_PTR 0x0808 #define SDMA_SCRDP 0x0810 #define SDMA_TX_DESC 0x0c00 #define SDMA_SCTDP 0x0c10 #define SDMA_SFTDP 0x0c14 #define SDMA_DESC_CMDSTAT_PE (1<<0) #define SDMA_DESC_CMDSTAT_CDL (1<<1) #define SDMA_DESC_CMDSTAT_FR (1<<3) #define SDMA_DESC_CMDSTAT_OR (1<<6) #define SDMA_DESC_CMDSTAT_BR (1<<9) #define SDMA_DESC_CMDSTAT_MI (1<<10) #define SDMA_DESC_CMDSTAT_A (1<<11) #define SDMA_DESC_CMDSTAT_AM (1<<12) #define SDMA_DESC_CMDSTAT_CT (1<<13) #define SDMA_DESC_CMDSTAT_C (1<<14) #define SDMA_DESC_CMDSTAT_ES (1<<15) #define SDMA_DESC_CMDSTAT_L (1<<16) #define SDMA_DESC_CMDSTAT_F (1<<17) #define SDMA_DESC_CMDSTAT_P (1<<18) #define SDMA_DESC_CMDSTAT_EI (1<<23) #define SDMA_DESC_CMDSTAT_O (1<<31) #define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \ | SDMA_DESC_CMDSTAT_EI) #define SDMA_SDC_RFT (1<<0) #define SDMA_SDC_SFM (1<<1) #define SDMA_SDC_BLMR (1<<6) #define SDMA_SDC_BLMT (1<<7) #define SDMA_SDC_POVR (1<<8) #define SDMA_SDC_RIFB (1<<9) #define SDMA_SDCM_ERD (1<<7) #define SDMA_SDCM_AR (1<<15) #define SDMA_SDCM_STD (1<<16) #define SDMA_SDCM_TXD (1<<23) #define SDMA_SDCM_AT (1<<31) #define SDMA_0_CAUSE_RXBUF (1<<0) #define SDMA_0_CAUSE_RXERR (1<<1) #define SDMA_0_CAUSE_TXBUF (1<<2) #define SDMA_0_CAUSE_TXEND (1<<3) #define SDMA_1_CAUSE_RXBUF (1<<8) #define SDMA_1_CAUSE_RXERR (1<<9) #define SDMA_1_CAUSE_TXBUF (1<<10) #define SDMA_1_CAUSE_TXEND (1<<11) #define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \ | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR) #define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \ | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND) /* SDMA Interrupt registers */ #define SDMA_INTR_CAUSE 0x0000 #define SDMA_INTR_MASK 0x0080 /* Baud Rate Generator Interface Registers */ #define BRG_BCR 0x0000 #define BRG_BTR 0x0004 /* * Define how this driver is known to the outside (we've been assigned a * range on the "Low-density serial ports" major). */ #define MPSC_MAJOR 204 #define MPSC_MINOR_START 44 #define MPSC_DRIVER_NAME "MPSC" #define MPSC_DEV_NAME "ttyMM" #define MPSC_VERSION "1.00" static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS]; static struct mpsc_shared_regs mpsc_shared_regs; static struct uart_driver mpsc_reg; static void mpsc_start_rx(struct mpsc_port_info *pi); static void mpsc_free_ring_mem(struct mpsc_port_info *pi); static void mpsc_release_port(struct uart_port *port); /* ****************************************************************************** * * Baud Rate Generator Routines (BRG) * ****************************************************************************** */ static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src) { u32 v; v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18); if (pi->brg_can_tune) v &= ~(1 << 25); if (pi->mirror_regs) pi->BRG_BCR_m = v; writel(v, pi->brg_base + BRG_BCR); writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000, pi->brg_base + BRG_BTR); } static void mpsc_brg_enable(struct mpsc_port_info *pi) { u32 v; v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); v |= (1 << 16); if (pi->mirror_regs) pi->BRG_BCR_m = v; writel(v, pi->brg_base + BRG_BCR); } static void mpsc_brg_disable(struct mpsc_port_info *pi) { u32 v; v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); v &= ~(1 << 16); if (pi->mirror_regs) pi->BRG_BCR_m = v; writel(v, pi->brg_base + BRG_BCR); } /* * To set the baud, we adjust the CDV field in the BRG_BCR reg. * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1. * However, the input clock is divided by 16 in the MPSC b/c of how * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our * calculation by 16 to account for that. So the real calculation * that accounts for the way the mpsc is set up is: * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1. */ static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud) { u32 cdv = (pi->port.uartclk / (baud << 5)) - 1; u32 v; mpsc_brg_disable(pi); v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); v = (v & 0xffff0000) | (cdv & 0xffff); if (pi->mirror_regs) pi->BRG_BCR_m = v; writel(v, pi->brg_base + BRG_BCR); mpsc_brg_enable(pi); } /* ****************************************************************************** * * Serial DMA Routines (SDMA) * ****************************************************************************** */ static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size) { u32 v; pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n", pi->port.line, burst_size); burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */ if (burst_size < 2) v = 0x0; /* 1 64-bit word */ else if (burst_size < 4) v = 0x1; /* 2 64-bit words */ else if (burst_size < 8) v = 0x2; /* 4 64-bit words */ else v = 0x3; /* 8 64-bit words */ writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12), pi->sdma_base + SDMA_SDC); } static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size) { pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line, burst_size); writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f, pi->sdma_base + SDMA_SDC); mpsc_sdma_burstsize(pi, burst_size); } static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask) { u32 old, v; pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask); old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); mask &= 0xf; if (pi->port.line) mask <<= 8; v &= ~mask; if (pi->mirror_regs) pi->shared_regs->SDMA_INTR_MASK_m = v; writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); if (pi->port.line) old >>= 8; return old & 0xf; } static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask) { u32 v; pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask); v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); mask &= 0xf; if (pi->port.line) mask <<= 8; v |= mask; if (pi->mirror_regs) pi->shared_regs->SDMA_INTR_MASK_m = v; writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); } static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi) { pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line); if (pi->mirror_regs) pi->shared_regs->SDMA_INTR_CAUSE_m = 0; writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE + pi->port.line); } static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi, struct mpsc_rx_desc *rxre_p) { pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n", pi->port.line, (u32)rxre_p); writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP); } static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi, struct mpsc_tx_desc *txre_p) { writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP); writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP); } static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val) { u32 v; v = readl(pi->sdma_base + SDMA_SDCM); if (val) v |= val; else v = 0; wmb(); writel(v, pi->sdma_base + SDMA_SDCM); wmb(); } static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi) { return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD; } static void mpsc_sdma_start_tx(struct mpsc_port_info *pi) { struct mpsc_tx_desc *txre, *txre_p; /* If tx isn't running & there's a desc ready to go, start it */ if (!mpsc_sdma_tx_active(pi)) { txre = (struct mpsc_tx_desc *)(pi->txr + (pi->txr_tail * MPSC_TXRE_SIZE)); dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)txre, (ulong)txre + MPSC_TXRE_SIZE); #endif if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) { txre_p = (struct mpsc_tx_desc *) (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE)); mpsc_sdma_set_tx_ring(pi, txre_p); mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD); } } } static void mpsc_sdma_stop(struct mpsc_port_info *pi) { pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line); /* Abort any SDMA transfers */ mpsc_sdma_cmd(pi, 0); mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT); /* Clear the SDMA current and first TX and RX pointers */ mpsc_sdma_set_tx_ring(pi, NULL); mpsc_sdma_set_rx_ring(pi, NULL); /* Disable interrupts */ mpsc_sdma_intr_mask(pi, 0xf); mpsc_sdma_intr_ack(pi); } /* ****************************************************************************** * * Multi-Protocol Serial Controller Routines (MPSC) * ****************************************************************************** */ static void mpsc_hw_init(struct mpsc_port_info *pi) { u32 v; pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line); /* Set up clock routing */ if (pi->mirror_regs) { v = pi->shared_regs->MPSC_MRR_m; v &= ~0x1c7; pi->shared_regs->MPSC_MRR_m = v; writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); v = pi->shared_regs->MPSC_RCRR_m; v = (v & ~0xf0f) | 0x100; pi->shared_regs->MPSC_RCRR_m = v; writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); v = pi->shared_regs->MPSC_TCRR_m; v = (v & ~0xf0f) | 0x100; pi->shared_regs->MPSC_TCRR_m = v; writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); } else { v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR); v &= ~0x1c7; writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR); v = (v & ~0xf0f) | 0x100; writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR); v = (v & ~0xf0f) | 0x100; writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); } /* Put MPSC in UART mode & enabel Tx/Rx egines */ writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL); /* No preamble, 16x divider, low-latency, */ writel(0x04400400, pi->mpsc_base + MPSC_MMCRH); mpsc_set_baudrate(pi, pi->default_baud); if (pi->mirror_regs) { pi->MPSC_CHR_1_m = 0; pi->MPSC_CHR_2_m = 0; } writel(0, pi->mpsc_base + MPSC_CHR_1); writel(0, pi->mpsc_base + MPSC_CHR_2); writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3); writel(0, pi->mpsc_base + MPSC_CHR_4); writel(0, pi->mpsc_base + MPSC_CHR_5); writel(0, pi->mpsc_base + MPSC_CHR_6); writel(0, pi->mpsc_base + MPSC_CHR_7); writel(0, pi->mpsc_base + MPSC_CHR_8); writel(0, pi->mpsc_base + MPSC_CHR_9); writel(0, pi->mpsc_base + MPSC_CHR_10); } static void mpsc_enter_hunt(struct mpsc_port_info *pi) { pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line); if (pi->mirror_regs) { writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH, pi->mpsc_base + MPSC_CHR_2); /* Erratum prevents reading CHR_2 so just delay for a while */ udelay(100); } else { writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH, pi->mpsc_base + MPSC_CHR_2); while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH) udelay(10); } } static void mpsc_freeze(struct mpsc_port_info *pi) { u32 v; pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line); v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : readl(pi->mpsc_base + MPSC_MPCR); v |= MPSC_MPCR_FRZ; if (pi->mirror_regs) pi->MPSC_MPCR_m = v; writel(v, pi->mpsc_base + MPSC_MPCR); } static void mpsc_unfreeze(struct mpsc_port_info *pi) { u32 v; v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : readl(pi->mpsc_base + MPSC_MPCR); v &= ~MPSC_MPCR_FRZ; if (pi->mirror_regs) pi->MPSC_MPCR_m = v; writel(v, pi->mpsc_base + MPSC_MPCR); pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line); } static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len) { u32 v; pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len); v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : readl(pi->mpsc_base + MPSC_MPCR); v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12); if (pi->mirror_regs) pi->MPSC_MPCR_m = v; writel(v, pi->mpsc_base + MPSC_MPCR); } static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len) { u32 v; pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n", pi->port.line, len); v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : readl(pi->mpsc_base + MPSC_MPCR); v = (v & ~(1 << 14)) | ((len & 0x1) << 14); if (pi->mirror_regs) pi->MPSC_MPCR_m = v; writel(v, pi->mpsc_base + MPSC_MPCR); } static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p) { u32 v; pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p); v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m : readl(pi->mpsc_base + MPSC_CHR_2); p &= 0x3; v = (v & ~0xc000c) | (p << 18) | (p << 2); if (pi->mirror_regs) pi->MPSC_CHR_2_m = v; writel(v, pi->mpsc_base + MPSC_CHR_2); } /* ****************************************************************************** * * Driver Init Routines * ****************************************************************************** */ static void mpsc_init_hw(struct mpsc_port_info *pi) { pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line); mpsc_brg_init(pi, pi->brg_clk_src); mpsc_brg_enable(pi); mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */ mpsc_sdma_stop(pi); mpsc_hw_init(pi); } static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi) { int rc = 0; pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n", pi->port.line); if (!pi->dma_region) { if (!dma_supported(pi->port.dev, 0xffffffff)) { printk(KERN_ERR "MPSC: Inadequate DMA support\n"); rc = -ENXIO; } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE, &pi->dma_region_p, GFP_KERNEL)) == NULL) { printk(KERN_ERR "MPSC: Can't alloc Desc region\n"); rc = -ENOMEM; } } return rc; } static void mpsc_free_ring_mem(struct mpsc_port_info *pi) { pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line); if (pi->dma_region) { dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE, pi->dma_region, pi->dma_region_p); pi->dma_region = NULL; pi->dma_region_p = (dma_addr_t)NULL; } } static void mpsc_init_rings(struct mpsc_port_info *pi) { struct mpsc_rx_desc *rxre; struct mpsc_tx_desc *txre; dma_addr_t dp, dp_p; u8 *bp, *bp_p; int i; pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line); BUG_ON(pi->dma_region == NULL); memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE); /* * Descriptors & buffers are multiples of cacheline size and must be * cacheline aligned. */ dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment()); dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment()); /* * Partition dma region into rx ring descriptor, rx buffers, * tx ring descriptors, and tx buffers. */ pi->rxr = dp; pi->rxr_p = dp_p; dp += MPSC_RXR_SIZE; dp_p += MPSC_RXR_SIZE; pi->rxb = (u8 *)dp; pi->rxb_p = (u8 *)dp_p; dp += MPSC_RXB_SIZE; dp_p += MPSC_RXB_SIZE; pi->rxr_posn = 0; pi->txr = dp; pi->txr_p = dp_p; dp += MPSC_TXR_SIZE; dp_p += MPSC_TXR_SIZE; pi->txb = (u8 *)dp; pi->txb_p = (u8 *)dp_p; pi->txr_head = 0; pi->txr_tail = 0; /* Init rx ring descriptors */ dp = pi->rxr; dp_p = pi->rxr_p; bp = pi->rxb; bp_p = pi->rxb_p; for (i = 0; i < MPSC_RXR_ENTRIES; i++) { rxre = (struct mpsc_rx_desc *)dp; rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE); rxre->bytecnt = cpu_to_be16(0); rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F | SDMA_DESC_CMDSTAT_L); rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE); rxre->buf_ptr = cpu_to_be32(bp_p); dp += MPSC_RXRE_SIZE; dp_p += MPSC_RXRE_SIZE; bp += MPSC_RXBE_SIZE; bp_p += MPSC_RXBE_SIZE; } rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */ /* Init tx ring descriptors */ dp = pi->txr; dp_p = pi->txr_p; bp = pi->txb; bp_p = pi->txb_p; for (i = 0; i < MPSC_TXR_ENTRIES; i++) { txre = (struct mpsc_tx_desc *)dp; txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE); txre->buf_ptr = cpu_to_be32(bp_p); dp += MPSC_TXRE_SIZE; dp_p += MPSC_TXRE_SIZE; bp += MPSC_TXBE_SIZE; bp_p += MPSC_TXBE_SIZE; } txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ dma_cache_sync(pi->port.dev, (void *)pi->dma_region, MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)pi->dma_region, (ulong)pi->dma_region + MPSC_DMA_ALLOC_SIZE); #endif return; } static void mpsc_uninit_rings(struct mpsc_port_info *pi) { pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line); BUG_ON(pi->dma_region == NULL); pi->rxr = 0; pi->rxr_p = 0; pi->rxb = NULL; pi->rxb_p = NULL; pi->rxr_posn = 0; pi->txr = 0; pi->txr_p = 0; pi->txb = NULL; pi->txb_p = NULL; pi->txr_head = 0; pi->txr_tail = 0; } static int mpsc_make_ready(struct mpsc_port_info *pi) { int rc; pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line); if (!pi->ready) { mpsc_init_hw(pi); if ((rc = mpsc_alloc_ring_mem(pi))) return rc; mpsc_init_rings(pi); pi->ready = 1; } return 0; } #ifdef CONFIG_CONSOLE_POLL static int serial_polled; #endif /* ****************************************************************************** * * Interrupt Handling Routines * ****************************************************************************** */ static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags) { struct mpsc_rx_desc *rxre; struct tty_port *port = &pi->port.state->port; u32 cmdstat, bytes_in, i; int rc = 0; u8 *bp; char flag = TTY_NORMAL; pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)rxre, (ulong)rxre + MPSC_RXRE_SIZE); #endif /* * Loop through Rx descriptors handling ones that have been completed. */ while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) & SDMA_DESC_CMDSTAT_O)) { bytes_in = be16_to_cpu(rxre->bytecnt); #ifdef CONFIG_CONSOLE_POLL if (unlikely(serial_polled)) { serial_polled = 0; return 0; } #endif /* Following use of tty struct directly is deprecated */ if (tty_buffer_request_room(port, bytes_in) < bytes_in) { if (port->low_latency) { spin_unlock_irqrestore(&pi->port.lock, *flags); tty_flip_buffer_push(port); spin_lock_irqsave(&pi->port.lock, *flags); } /* * If this failed then we will throw away the bytes * but must do so to clear interrupts. */ } bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)bp, (ulong)bp + MPSC_RXBE_SIZE); #endif /* * Other than for parity error, the manual provides little * info on what data will be in a frame flagged by any of * these errors. For parity error, it is the last byte in * the buffer that had the error. As for the rest, I guess * we'll assume there is no data in the buffer. * If there is...it gets lost. */ if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR | SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) { pi->port.icount.rx++; if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */ pi->port.icount.brk++; if (uart_handle_break(&pi->port)) goto next_frame; } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) { pi->port.icount.frame++; } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) { pi->port.icount.overrun++; } cmdstat &= pi->port.read_status_mask; if (cmdstat & SDMA_DESC_CMDSTAT_BR) flag = TTY_BREAK; else if (cmdstat & SDMA_DESC_CMDSTAT_FR) flag = TTY_FRAME; else if (cmdstat & SDMA_DESC_CMDSTAT_OR) flag = TTY_OVERRUN; else if (cmdstat & SDMA_DESC_CMDSTAT_PE) flag = TTY_PARITY; } if (uart_handle_sysrq_char(&pi->port, *bp)) { bp++; bytes_in--; #ifdef CONFIG_CONSOLE_POLL if (unlikely(serial_polled)) { serial_polled = 0; return 0; } #endif goto next_frame; } if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR | SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) && !(cmdstat & pi->port.ignore_status_mask)) { tty_insert_flip_char(port, *bp, flag); } else { for (i=0; i<bytes_in; i++) tty_insert_flip_char(port, *bp++, TTY_NORMAL); pi->port.icount.rx += bytes_in; } next_frame: rxre->bytecnt = cpu_to_be16(0); wmb(); rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F | SDMA_DESC_CMDSTAT_L); wmb(); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)rxre, (ulong)rxre + MPSC_RXRE_SIZE); #endif /* Advance to next descriptor */ pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); rxre = (struct mpsc_rx_desc *) (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE)); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)rxre, (ulong)rxre + MPSC_RXRE_SIZE); #endif rc = 1; } /* Restart rx engine, if its stopped */ if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) mpsc_start_rx(pi); spin_unlock_irqrestore(&pi->port.lock, *flags); tty_flip_buffer_push(port); spin_lock_irqsave(&pi->port.lock, *flags); return rc; } static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) { struct mpsc_tx_desc *txre; txre = (struct mpsc_tx_desc *)(pi->txr + (pi->txr_head * MPSC_TXRE_SIZE)); txre->bytecnt = cpu_to_be16(count); txre->shadow = txre->bytecnt; wmb(); /* ensure cmdstat is last field updated */ txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F | SDMA_DESC_CMDSTAT_L | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0)); wmb(); dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)txre, (ulong)txre + MPSC_TXRE_SIZE); #endif } static void mpsc_copy_tx_data(struct mpsc_port_info *pi) { struct circ_buf *xmit = &pi->port.state->xmit; u8 *bp; u32 i; /* Make sure the desc ring isn't full */ while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES) < (MPSC_TXR_ENTRIES - 1)) { if (pi->port.x_char) { /* * Ideally, we should use the TCS field in * CHR_1 to put the x_char out immediately but * errata prevents us from being able to read * CHR_2 to know that its safe to write to * CHR_1. Instead, just put it in-band with * all the other Tx data. */ bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); *bp = pi->port.x_char; pi->port.x_char = 0; i = 1; } else if (!uart_circ_empty(xmit) && !uart_tx_stopped(&pi->port)) { i = min((u32)MPSC_TXBE_SIZE, (u32)uart_circ_chars_pending(xmit)); i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); memcpy(bp, &xmit->buf[xmit->tail], i); xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&pi->port); } else { /* All tx data copied into ring bufs */ return; } dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)bp, (ulong)bp + MPSC_TXBE_SIZE); #endif mpsc_setup_tx_desc(pi, i, 1); /* Advance to next descriptor */ pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); } } static int mpsc_tx_intr(struct mpsc_port_info *pi) { struct mpsc_tx_desc *txre; int rc = 0; unsigned long iflags; spin_lock_irqsave(&pi->tx_lock, iflags); if (!mpsc_sdma_tx_active(pi)) { txre = (struct mpsc_tx_desc *)(pi->txr + (pi->txr_tail * MPSC_TXRE_SIZE)); dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)txre, (ulong)txre + MPSC_TXRE_SIZE); #endif while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) { rc = 1; pi->port.icount.tx += be16_to_cpu(txre->bytecnt); pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1); /* If no more data to tx, fall out of loop */ if (pi->txr_head == pi->txr_tail) break; txre = (struct mpsc_tx_desc *)(pi->txr + (pi->txr_tail * MPSC_TXRE_SIZE)); dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)txre, (ulong)txre + MPSC_TXRE_SIZE); #endif } mpsc_copy_tx_data(pi); mpsc_sdma_start_tx(pi); /* start next desc if ready */ } spin_unlock_irqrestore(&pi->tx_lock, iflags); return rc; } /* * This is the driver's interrupt handler. To avoid a race, we first clear * the interrupt, then handle any completed Rx/Tx descriptors. When done * handling those descriptors, we restart the Rx/Tx engines if they're stopped. */ static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id) { struct mpsc_port_info *pi = dev_id; ulong iflags; int rc = IRQ_NONE; pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line); spin_lock_irqsave(&pi->port.lock, iflags); mpsc_sdma_intr_ack(pi); if (mpsc_rx_intr(pi, &iflags)) rc = IRQ_HANDLED; if (mpsc_tx_intr(pi)) rc = IRQ_HANDLED; spin_unlock_irqrestore(&pi->port.lock, iflags); pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line); return rc; } /* ****************************************************************************** * * serial_core.c Interface routines * ****************************************************************************** */ static uint mpsc_tx_empty(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); ulong iflags; uint rc; spin_lock_irqsave(&pi->port.lock, iflags); rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT; spin_unlock_irqrestore(&pi->port.lock, iflags); return rc; } static void mpsc_set_mctrl(struct uart_port *port, uint mctrl) { /* Have no way to set modem control lines AFAICT */ } static uint mpsc_get_mctrl(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); u32 mflags, status; status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m : readl(pi->mpsc_base + MPSC_CHR_10); mflags = 0; if (status & 0x1) mflags |= TIOCM_CTS; if (status & 0x2) mflags |= TIOCM_CAR; return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */ } static void mpsc_stop_tx(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); pr_debug("mpsc_stop_tx[%d]\n", port->line); mpsc_freeze(pi); } static void mpsc_start_tx(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); unsigned long iflags; spin_lock_irqsave(&pi->tx_lock, iflags); mpsc_unfreeze(pi); mpsc_copy_tx_data(pi); mpsc_sdma_start_tx(pi); spin_unlock_irqrestore(&pi->tx_lock, iflags); pr_debug("mpsc_start_tx[%d]\n", port->line); } static void mpsc_start_rx(struct mpsc_port_info *pi) { pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line); if (pi->rcv_data) { mpsc_enter_hunt(pi); mpsc_sdma_cmd(pi, SDMA_SDCM_ERD); } } static void mpsc_stop_rx(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line); if (pi->mirror_regs) { writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA, pi->mpsc_base + MPSC_CHR_2); /* Erratum prevents reading CHR_2 so just delay for a while */ udelay(100); } else { writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA, pi->mpsc_base + MPSC_CHR_2); while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA) udelay(10); } mpsc_sdma_cmd(pi, SDMA_SDCM_AR); } static void mpsc_break_ctl(struct uart_port *port, int ctl) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); ulong flags; u32 v; v = ctl ? 0x00ff0000 : 0; spin_lock_irqsave(&pi->port.lock, flags); if (pi->mirror_regs) pi->MPSC_CHR_1_m = v; writel(v, pi->mpsc_base + MPSC_CHR_1); spin_unlock_irqrestore(&pi->port.lock, flags); } static int mpsc_startup(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); u32 flag = 0; int rc; pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n", port->line, pi->port.irq); if ((rc = mpsc_make_ready(pi)) == 0) { /* Setup IRQ handler */ mpsc_sdma_intr_ack(pi); /* If irq's are shared, need to set flag */ if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq) flag = IRQF_SHARED; if (request_irq(pi->port.irq, mpsc_sdma_intr, flag, "mpsc-sdma", pi)) printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n", pi->port.irq); mpsc_sdma_intr_unmask(pi, 0xf); mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p + (pi->rxr_posn * MPSC_RXRE_SIZE))); } return rc; } static void mpsc_shutdown(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line); mpsc_sdma_stop(pi); free_irq(pi->port.irq, pi); } static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); u32 baud; ulong flags; u32 chr_bits, stop_bits, par; pi->c_iflag = termios->c_iflag; pi->c_cflag = termios->c_cflag; switch (termios->c_cflag & CSIZE) { case CS5: chr_bits = MPSC_MPCR_CL_5; break; case CS6: chr_bits = MPSC_MPCR_CL_6; break; case CS7: chr_bits = MPSC_MPCR_CL_7; break; case CS8: default: chr_bits = MPSC_MPCR_CL_8; break; } if (termios->c_cflag & CSTOPB) stop_bits = MPSC_MPCR_SBL_2; else stop_bits = MPSC_MPCR_SBL_1; par = MPSC_CHR_2_PAR_EVEN; if (termios->c_cflag & PARENB) if (termios->c_cflag & PARODD) par = MPSC_CHR_2_PAR_ODD; #ifdef CMSPAR if (termios->c_cflag & CMSPAR) { if (termios->c_cflag & PARODD) par = MPSC_CHR_2_PAR_MARK; else par = MPSC_CHR_2_PAR_SPACE; } #endif baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk); spin_lock_irqsave(&pi->port.lock, flags); uart_update_timeout(port, termios->c_cflag, baud); mpsc_set_char_length(pi, chr_bits); mpsc_set_stop_bit_length(pi, stop_bits); mpsc_set_parity(pi, par); mpsc_set_baudrate(pi, baud); /* Characters/events to read */ pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR; if (termios->c_iflag & INPCK) pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE | SDMA_DESC_CMDSTAT_FR; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR; /* Characters/events to ignore */ pi->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE | SDMA_DESC_CMDSTAT_FR; if (termios->c_iflag & IGNBRK) { pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR; if (termios->c_iflag & IGNPAR) pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR; } if ((termios->c_cflag & CREAD)) { if (!pi->rcv_data) { pi->rcv_data = 1; mpsc_start_rx(pi); } } else if (pi->rcv_data) { mpsc_stop_rx(port); pi->rcv_data = 0; } spin_unlock_irqrestore(&pi->port.lock, flags); } static const char *mpsc_type(struct uart_port *port) { pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME); return MPSC_DRIVER_NAME; } static int mpsc_request_port(struct uart_port *port) { /* Should make chip/platform specific call */ return 0; } static void mpsc_release_port(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); if (pi->ready) { mpsc_uninit_rings(pi); mpsc_free_ring_mem(pi); pi->ready = 0; } } static void mpsc_config_port(struct uart_port *port, int flags) { } static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); int rc = 0; pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line); if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC) rc = -EINVAL; else if (pi->port.irq != ser->irq) rc = -EINVAL; else if (ser->io_type != SERIAL_IO_MEM) rc = -EINVAL; else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */ rc = -EINVAL; else if ((void *)pi->port.mapbase != ser->iomem_base) rc = -EINVAL; else if (pi->port.iobase != ser->port) rc = -EINVAL; else if (ser->hub6 != 0) rc = -EINVAL; return rc; } #ifdef CONFIG_CONSOLE_POLL /* Serial polling routines for writing and reading from the uart while * in an interrupt or debug context. */ static char poll_buf[2048]; static int poll_ptr; static int poll_cnt; static void mpsc_put_poll_char(struct uart_port *port, unsigned char c); static int mpsc_get_poll_char(struct uart_port *port) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); struct mpsc_rx_desc *rxre; u32 cmdstat, bytes_in, i; u8 *bp; if (!serial_polled) serial_polled = 1; pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); if (poll_cnt) { poll_cnt--; return poll_buf[poll_ptr++]; } poll_ptr = 0; poll_cnt = 0; while (poll_cnt == 0) { rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)rxre, (ulong)rxre + MPSC_RXRE_SIZE); #endif /* * Loop through Rx descriptors handling ones that have * been completed. */ while (poll_cnt == 0 && !((cmdstat = be32_to_cpu(rxre->cmdstat)) & SDMA_DESC_CMDSTAT_O)){ bytes_in = be16_to_cpu(rxre->bytecnt); bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)bp, (ulong)bp + MPSC_RXBE_SIZE); #endif if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR | SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) && !(cmdstat & pi->port.ignore_status_mask)) { poll_buf[poll_cnt] = *bp; poll_cnt++; } else { for (i = 0; i < bytes_in; i++) { poll_buf[poll_cnt] = *bp++; poll_cnt++; } pi->port.icount.rx += bytes_in; } rxre->bytecnt = cpu_to_be16(0); wmb(); rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F | SDMA_DESC_CMDSTAT_L); wmb(); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)rxre, (ulong)rxre + MPSC_RXRE_SIZE); #endif /* Advance to next descriptor */ pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE)); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ invalidate_dcache_range((ulong)rxre, (ulong)rxre + MPSC_RXRE_SIZE); #endif } /* Restart rx engine, if its stopped */ if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) mpsc_start_rx(pi); } if (poll_cnt) { poll_cnt--; return poll_buf[poll_ptr++]; } return 0; } static void mpsc_put_poll_char(struct uart_port *port, unsigned char c) { struct mpsc_port_info *pi = container_of(port, struct mpsc_port_info, port); u32 data; data = readl(pi->mpsc_base + MPSC_MPCR); writeb(c, pi->mpsc_base + MPSC_CHR_1); mb(); data = readl(pi->mpsc_base + MPSC_CHR_2); data |= MPSC_CHR_2_TTCS; writel(data, pi->mpsc_base + MPSC_CHR_2); mb(); while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS); } #endif static struct uart_ops mpsc_pops = { .tx_empty = mpsc_tx_empty, .set_mctrl = mpsc_set_mctrl, .get_mctrl = mpsc_get_mctrl, .stop_tx = mpsc_stop_tx, .start_tx = mpsc_start_tx, .stop_rx = mpsc_stop_rx, .break_ctl = mpsc_break_ctl, .startup = mpsc_startup, .shutdown = mpsc_shutdown, .set_termios = mpsc_set_termios, .type = mpsc_type, .release_port = mpsc_release_port, .request_port = mpsc_request_port, .config_port = mpsc_config_port, .verify_port = mpsc_verify_port, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = mpsc_get_poll_char, .poll_put_char = mpsc_put_poll_char, #endif }; /* ****************************************************************************** * * Console Interface Routines * ****************************************************************************** */ #ifdef CONFIG_SERIAL_MPSC_CONSOLE static void mpsc_console_write(struct console *co, const char *s, uint count) { struct mpsc_port_info *pi = &mpsc_ports[co->index]; u8 *bp, *dp, add_cr = 0; int i; unsigned long iflags; spin_lock_irqsave(&pi->tx_lock, iflags); while (pi->txr_head != pi->txr_tail) { while (mpsc_sdma_tx_active(pi)) udelay(100); mpsc_sdma_intr_ack(pi); mpsc_tx_intr(pi); } while (mpsc_sdma_tx_active(pi)) udelay(100); while (count > 0) { bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); for (i = 0; i < MPSC_TXBE_SIZE; i++) { if (count == 0) break; if (add_cr) { *(dp++) = '\r'; add_cr = 0; } else { *(dp++) = *s; if (*(s++) == '\n') { /* add '\r' after '\n' */ add_cr = 1; count++; } } count--; } dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ flush_dcache_range((ulong)bp, (ulong)bp + MPSC_TXBE_SIZE); #endif mpsc_setup_tx_desc(pi, i, 0); pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); mpsc_sdma_start_tx(pi); while (mpsc_sdma_tx_active(pi)) udelay(100); pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1); } spin_unlock_irqrestore(&pi->tx_lock, iflags); } static int __init mpsc_console_setup(struct console *co, char *options) { struct mpsc_port_info *pi; int baud, bits, parity, flow; pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options); if (co->index >= MPSC_NUM_CTLRS) co->index = 0; pi = &mpsc_ports[co->index]; baud = pi->default_baud; bits = pi->default_bits; parity = pi->default_parity; flow = pi->default_flow; if (!pi->port.ops) return -ENODEV; spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */ if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&pi->port, co, baud, parity, bits, flow); } static struct console mpsc_console = { .name = MPSC_DEV_NAME, .write = mpsc_console_write, .device = uart_console_device, .setup = mpsc_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &mpsc_reg, }; static int __init mpsc_late_console_init(void) { pr_debug("mpsc_late_console_init: Enter\n"); if (!(mpsc_console.flags & CON_ENABLED)) register_console(&mpsc_console); return 0; } late_initcall(mpsc_late_console_init); #define MPSC_CONSOLE &mpsc_console #else #define MPSC_CONSOLE NULL #endif /* ****************************************************************************** * * Dummy Platform Driver to extract & map shared register regions * ****************************************************************************** */ static void mpsc_resource_err(char *s) { printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s); } static int mpsc_shared_map_regs(struct platform_device *pd) { struct resource *r; if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_ROUTING_BASE_ORDER)) && request_mem_region(r->start, MPSC_ROUTING_REG_BLOCK_SIZE, "mpsc_routing_regs")) { mpsc_shared_regs.mpsc_routing_base = ioremap(r->start, MPSC_ROUTING_REG_BLOCK_SIZE); mpsc_shared_regs.mpsc_routing_base_p = r->start; } else { mpsc_resource_err("MPSC routing base"); return -ENOMEM; } if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_SDMA_INTR_BASE_ORDER)) && request_mem_region(r->start, MPSC_SDMA_INTR_REG_BLOCK_SIZE, "sdma_intr_regs")) { mpsc_shared_regs.sdma_intr_base = ioremap(r->start, MPSC_SDMA_INTR_REG_BLOCK_SIZE); mpsc_shared_regs.sdma_intr_base_p = r->start; } else { iounmap(mpsc_shared_regs.mpsc_routing_base); release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, MPSC_ROUTING_REG_BLOCK_SIZE); mpsc_resource_err("SDMA intr base"); return -ENOMEM; } return 0; } static void mpsc_shared_unmap_regs(void) { if (!mpsc_shared_regs.mpsc_routing_base) { iounmap(mpsc_shared_regs.mpsc_routing_base); release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, MPSC_ROUTING_REG_BLOCK_SIZE); } if (!mpsc_shared_regs.sdma_intr_base) { iounmap(mpsc_shared_regs.sdma_intr_base); release_mem_region(mpsc_shared_regs.sdma_intr_base_p, MPSC_SDMA_INTR_REG_BLOCK_SIZE); } mpsc_shared_regs.mpsc_routing_base = NULL; mpsc_shared_regs.sdma_intr_base = NULL; mpsc_shared_regs.mpsc_routing_base_p = 0; mpsc_shared_regs.sdma_intr_base_p = 0; } static int mpsc_shared_drv_probe(struct platform_device *dev) { struct mpsc_shared_pdata *pdata; int rc = -ENODEV; if (dev->id == 0) { if (!(rc = mpsc_shared_map_regs(dev))) { pdata = (struct mpsc_shared_pdata *) dev_get_platdata(&dev->dev); mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val; mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val; mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val; mpsc_shared_regs.SDMA_INTR_CAUSE_m = pdata->intr_cause_val; mpsc_shared_regs.SDMA_INTR_MASK_m = pdata->intr_mask_val; rc = 0; } } return rc; } static int mpsc_shared_drv_remove(struct platform_device *dev) { int rc = -ENODEV; if (dev->id == 0) { mpsc_shared_unmap_regs(); mpsc_shared_regs.MPSC_MRR_m = 0; mpsc_shared_regs.MPSC_RCRR_m = 0; mpsc_shared_regs.MPSC_TCRR_m = 0; mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0; mpsc_shared_regs.SDMA_INTR_MASK_m = 0; rc = 0; } return rc; } static struct platform_driver mpsc_shared_driver = { .probe = mpsc_shared_drv_probe, .remove = mpsc_shared_drv_remove, .driver = { .name = MPSC_SHARED_NAME, }, }; /* ****************************************************************************** * * Driver Interface Routines * ****************************************************************************** */ static struct uart_driver mpsc_reg = { .owner = THIS_MODULE, .driver_name = MPSC_DRIVER_NAME, .dev_name = MPSC_DEV_NAME, .major = MPSC_MAJOR, .minor = MPSC_MINOR_START, .nr = MPSC_NUM_CTLRS, .cons = MPSC_CONSOLE, }; static int mpsc_drv_map_regs(struct mpsc_port_info *pi, struct platform_device *pd) { struct resource *r; if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER)) && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE, "mpsc_regs")) { pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE); pi->mpsc_base_p = r->start; } else { mpsc_resource_err("MPSC base"); goto err; } if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_SDMA_BASE_ORDER)) && request_mem_region(r->start, MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) { pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE); pi->sdma_base_p = r->start; } else { mpsc_resource_err("SDMA base"); if (pi->mpsc_base) { iounmap(pi->mpsc_base); pi->mpsc_base = NULL; } goto err; } if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER)) && request_mem_region(r->start, MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) { pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE); pi->brg_base_p = r->start; } else { mpsc_resource_err("BRG base"); if (pi->mpsc_base) { iounmap(pi->mpsc_base); pi->mpsc_base = NULL; } if (pi->sdma_base) { iounmap(pi->sdma_base); pi->sdma_base = NULL; } goto err; } return 0; err: return -ENOMEM; } static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi) { if (!pi->mpsc_base) { iounmap(pi->mpsc_base); release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE); } if (!pi->sdma_base) { iounmap(pi->sdma_base); release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE); } if (!pi->brg_base) { iounmap(pi->brg_base); release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE); } pi->mpsc_base = NULL; pi->sdma_base = NULL; pi->brg_base = NULL; pi->mpsc_base_p = 0; pi->sdma_base_p = 0; pi->brg_base_p = 0; } static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi, struct platform_device *pd, int num) { struct mpsc_pdata *pdata; pdata = dev_get_platdata(&pd->dev); pi->port.uartclk = pdata->brg_clk_freq; pi->port.iotype = UPIO_MEM; pi->port.line = num; pi->port.type = PORT_MPSC; pi->port.fifosize = MPSC_TXBE_SIZE; pi->port.membase = pi->mpsc_base; pi->port.mapbase = (ulong)pi->mpsc_base; pi->port.ops = &mpsc_pops; pi->mirror_regs = pdata->mirror_regs; pi->cache_mgmt = pdata->cache_mgmt; pi->brg_can_tune = pdata->brg_can_tune; pi->brg_clk_src = pdata->brg_clk_src; pi->mpsc_max_idle = pdata->max_idle; pi->default_baud = pdata->default_baud; pi->default_bits = pdata->default_bits; pi->default_parity = pdata->default_parity; pi->default_flow = pdata->default_flow; /* Initial values of mirrored regs */ pi->MPSC_CHR_1_m = pdata->chr_1_val; pi->MPSC_CHR_2_m = pdata->chr_2_val; pi->MPSC_CHR_10_m = pdata->chr_10_val; pi->MPSC_MPCR_m = pdata->mpcr_val; pi->BRG_BCR_m = pdata->bcr_val; pi->shared_regs = &mpsc_shared_regs; pi->port.irq = platform_get_irq(pd, 0); } static int mpsc_drv_probe(struct platform_device *dev) { struct mpsc_port_info *pi; int rc = -ENODEV; pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id); if (dev->id < MPSC_NUM_CTLRS) { pi = &mpsc_ports[dev->id]; if (!(rc = mpsc_drv_map_regs(pi, dev))) { mpsc_drv_get_platform_data(pi, dev, dev->id); pi->port.dev = &dev->dev; if (!(rc = mpsc_make_ready(pi))) { spin_lock_init(&pi->tx_lock); if (!(rc = uart_add_one_port(&mpsc_reg, &pi->port))) { rc = 0; } else { mpsc_release_port((struct uart_port *) pi); mpsc_drv_unmap_regs(pi); } } else { mpsc_drv_unmap_regs(pi); } } } return rc; } static int mpsc_drv_remove(struct platform_device *dev) { pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id); if (dev->id < MPSC_NUM_CTLRS) { uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port); mpsc_release_port((struct uart_port *) &mpsc_ports[dev->id].port); mpsc_drv_unmap_regs(&mpsc_ports[dev->id]); return 0; } else { return -ENODEV; } } static struct platform_driver mpsc_driver = { .probe = mpsc_drv_probe, .remove = mpsc_drv_remove, .driver = { .name = MPSC_CTLR_NAME, }, }; static int __init mpsc_drv_init(void) { int rc; printk(KERN_INFO "Serial: MPSC driver\n"); memset(mpsc_ports, 0, sizeof(mpsc_ports)); memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); if (!(rc = uart_register_driver(&mpsc_reg))) { if (!(rc = platform_driver_register(&mpsc_shared_driver))) { if ((rc = platform_driver_register(&mpsc_driver))) { platform_driver_unregister(&mpsc_shared_driver); uart_unregister_driver(&mpsc_reg); } } else { uart_unregister_driver(&mpsc_reg); } } return rc; } static void __exit mpsc_drv_exit(void) { platform_driver_unregister(&mpsc_driver); platform_driver_unregister(&mpsc_shared_driver); uart_unregister_driver(&mpsc_reg); memset(mpsc_ports, 0, sizeof(mpsc_ports)); memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); } module_init(mpsc_drv_init); module_exit(mpsc_drv_exit); MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>"); MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver"); MODULE_VERSION(MPSC_VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR); MODULE_ALIAS("platform:" MPSC_CTLR_NAME);
gpl-2.0
civato/SkyWalker
fs/udf/inode.c
944
62391
/* * inode.c * * PURPOSE * Inode handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 10/04/98 dgb Added rudimentary directory functions * 10/07/98 Fully working udf_block_map! It works! * 11/25/98 bmap altered to better support extents * 12/06/98 blf partition support in udf_iget, udf_block_map * and udf_read_inode * 12/12/98 rewrote udf_block_map to handle next extents and descs across * block boundaries (which is not actually allowed) * 12/20/98 added support for strategy 4096 * 03/07/99 rewrote udf_block_map (again) * New funcs, inode_bmap, udf_next_aext * 04/19/99 Support for writing device EA's for major/minor # */ #include "udfdecl.h" #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/crc-itu-t.h> #include "udf_i.h" #include "udf_sb.h" MODULE_AUTHOR("Ben Fennema"); MODULE_DESCRIPTION("Universal Disk Format Filesystem"); MODULE_LICENSE("GPL"); #define EXTENT_MERGE_SIZE 5 static mode_t udf_convert_permissions(struct fileEntry *); static int udf_update_inode(struct inode *, int); static void udf_fill_inode(struct inode *, struct buffer_head *); static int udf_sync_inode(struct inode *inode); static int udf_alloc_i_data(struct inode *inode, size_t size); static struct buffer_head *inode_getblk(struct inode *, sector_t, int *, sector_t *, int *); static int8_t udf_insert_aext(struct inode *, struct extent_position, struct kernel_lb_addr, uint32_t); static void udf_split_extents(struct inode *, int *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_prealloc_extents(struct inode *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_merge_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_update_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int, struct extent_position *); static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); void udf_evict_inode(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); int want_delete = 0; if (!inode->i_nlink && !is_bad_inode(inode)) { want_delete = 1; udf_setsize(inode, 0); udf_update_inode(inode, IS_SYNC(inode)); } else truncate_inode_pages(&inode->i_data, 0); invalidate_inode_buffers(inode); end_writeback(inode); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && inode->i_size != iinfo->i_lenExtents) { printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has " "inode size %llu different from extent length %llu. " "Filesystem need not be standards compliant.\n", inode->i_sb->s_id, inode->i_ino, inode->i_mode, (unsigned long long)inode->i_size, (unsigned long long)iinfo->i_lenExtents); } kfree(iinfo->i_ext.i_data); iinfo->i_ext.i_data = NULL; if (want_delete) { udf_free_inode(inode); } } static int udf_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, udf_get_block, wbc); } static int udf_readpage(struct file *file, struct page *page) { return block_read_full_page(page, udf_get_block); } static int udf_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); if (unlikely(ret)) { struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); loff_t isize = inode->i_size; if (pos + len > isize) { truncate_pagecache(inode, pos + len, isize); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } } } return ret; } static sector_t udf_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, udf_get_block); } const struct address_space_operations udf_aops = { .readpage = udf_readpage, .writepage = udf_writepage, .write_begin = udf_write_begin, .write_end = generic_write_end, .bmap = udf_bmap, }; int udf_expand_file_adinicb(struct inode *inode) { struct page *page; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); int err; struct writeback_control udf_wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = 1, }; if (!iinfo->i_lenAlloc) { if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; mark_inode_dirty(inode); return 0; } page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) return -ENOMEM; if (!PageUptodate(page)) { kaddr = kmap(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, PAGE_CACHE_SIZE - iinfo->i_lenAlloc); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); } memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; err = inode->i_data.a_ops->writepage(page, &udf_wbc); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); kaddr = kmap(page); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); kunmap(page); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; } page_cache_release(page); mark_inode_dirty(inode); return err; } struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block, int *err) { int newblock; struct buffer_head *dbh = NULL; struct kernel_lb_addr eloc; uint8_t alloctype; struct extent_position epos; struct udf_fileident_bh sfibh, dfibh; loff_t f_pos = udf_ext0_offset(inode); int size = udf_ext0_offset(inode) + inode->i_size; struct fileIdentDesc cfi, *sfi, *dfi; struct udf_inode_info *iinfo = UDF_I(inode); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) alloctype = ICBTAG_FLAG_AD_SHORT; else alloctype = ICBTAG_FLAG_AD_LONG; if (!inode->i_size) { iinfo->i_alloc_type = alloctype; mark_inode_dirty(inode); return NULL; } /* alloc block, and copy data to it */ *block = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, err); if (!(*block)) return NULL; newblock = udf_get_pblock(inode->i_sb, *block, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) return NULL; dbh = udf_tgetblk(inode->i_sb, newblock); if (!dbh) return NULL; lock_buffer(dbh); memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(dbh); unlock_buffer(dbh); mark_buffer_dirty_inode(dbh, inode); sfibh.soffset = sfibh.eoffset = f_pos & (inode->i_sb->s_blocksize - 1); sfibh.sbh = sfibh.ebh = NULL; dfibh.soffset = dfibh.eoffset = 0; dfibh.sbh = dfibh.ebh = dbh; while (f_pos < size) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); if (!sfi) { brelse(dbh); return NULL; } iinfo->i_alloc_type = alloctype; sfi->descTag.tagLocation = cpu_to_le32(*block); dfibh.soffset = dfibh.eoffset; dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; brelse(dbh); return NULL; } } mark_buffer_dirty_inode(dbh, inode); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; eloc.logicalBlockNum = *block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; iinfo->i_lenExtents = inode->i_size; epos.bh = NULL; epos.block = iinfo->i_location; epos.offset = udf_file_entry_alloc_offset(inode); udf_add_aext(inode, &epos, &eloc, inode->i_size, 0); /* UniqueID stuff */ brelse(epos.bh); mark_inode_dirty(inode); return dbh; } static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { int err, new; struct buffer_head *bh; sector_t phys = 0; struct udf_inode_info *iinfo; if (!create) { phys = udf_block_map(inode, block); if (phys) map_bh(bh_result, inode->i_sb, phys); return 0; } err = -EIO; new = 0; bh = NULL; iinfo = UDF_I(inode); down_write(&iinfo->i_data_sem); if (block == iinfo->i_next_alloc_block + 1) { iinfo->i_next_alloc_block++; iinfo->i_next_alloc_goal++; } err = 0; bh = inode_getblk(inode, block, &err, &phys, &new); BUG_ON(bh); if (err) goto abort; BUG_ON(!phys); if (new) set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, phys); abort: up_write(&iinfo->i_data_sem); return err; } static struct buffer_head *udf_getblk(struct inode *inode, long block, int create, int *err) { struct buffer_head *bh; struct buffer_head dummy; dummy.b_state = 0; dummy.b_blocknr = -1000; *err = udf_get_block(inode, block, &dummy, create); if (!*err && buffer_mapped(&dummy)) { bh = sb_getblk(inode->i_sb, dummy.b_blocknr); if (buffer_new(&dummy)) { lock_buffer(bh); memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); } return bh; } return NULL; } /* Extend the file by 'blocks' blocks, return the number of extents added */ static int udf_do_extend_file(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, sector_t blocks) { sector_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; struct kernel_lb_addr prealloc_loc = {}; int prealloc_len = 0; struct udf_inode_info *iinfo; int err; /* The previous extent is fake and we should not extend by anything * - there's nothing to do... */ if (!blocks && fake) return 0; iinfo = UDF_I(inode); /* Round the last extent up to a multiple of block size */ if (last_ext->extLength & (sb->s_blocksize - 1)) { last_ext->extLength = (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); iinfo->i_lenExtents = (iinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); } /* Last extent are just preallocated blocks? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) { /* Save the extent so that we can reattach it to the end */ prealloc_loc = last_ext->extLocation; prealloc_len = last_ext->extLength; /* Mark the extent as a hole */ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; } /* Can we merge with the previous extent? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits; if (add > blocks) add = blocks; blocks -= add; last_ext->extLength += add << sb->s_blocksize_bits; } if (fake) { udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); count++; } else udf_write_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); /* Managed to do everything necessary? */ if (!blocks) goto out; /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; add = (1 << (30-sb->s_blocksize_bits)) - 1; last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); /* Create enough extents to cover the whole hole */ while (blocks > add) { blocks -= add; err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } if (blocks) { last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (blocks << sb->s_blocksize_bits); err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } out: /* Do we have some preallocated blocks saved? */ if (prealloc_len) { err = udf_add_aext(inode, last_pos, &prealloc_loc, prealloc_len, 1); if (err) return err; last_ext->extLocation = prealloc_loc; last_ext->extLength = prealloc_len; count++; } /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) last_pos->offset -= sizeof(struct long_ad); else return -EIO; return count; } static int udf_extend_file(struct inode *inode, loff_t newsize) { struct extent_position epos; struct kernel_lb_addr eloc; uint32_t elen; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); /* File has extent covering the new size (could happen when extending * inside a block)? */ if (etype != -1) return 0; if (newsize & (sb->s_blocksize - 1)) offset++; /* Extended file just to the boundary of the last file block? */ if (offset == 0) return 0; /* Truncate is extending the file by 'offset' blocks */ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { /* File has no extents at all or has empty last * indirect extent! Create a fake extent... */ extent.extLocation.logicalBlockNum = 0; extent.extLocation.partitionReferenceNum = 0; extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; } else { epos.offset -= adsize; etype = udf_next_aext(inode, &epos, &extent.extLocation, &extent.extLength, 0); extent.extLength |= etype << 30; } err = udf_do_extend_file(inode, &epos, &extent, offset); if (err < 0) goto out; err = 0; iinfo->i_lenExtents = newsize; out: brelse(epos.bh); return err; } static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, int *err, sector_t *phys, int *new) { static sector_t last_block; struct buffer_head *result = NULL; struct kernel_long_ad laarr[EXTENT_MERGE_SIZE]; struct extent_position prev_epos, cur_epos, next_epos; int count = 0, startnum = 0, endnum = 0; uint32_t elen = 0, tmpelen; struct kernel_lb_addr eloc, tmpeloc; int c = 1; loff_t lbcount = 0, b_off = 0; uint32_t newblocknum, newblock; sector_t offset = 0; int8_t etype; struct udf_inode_info *iinfo = UDF_I(inode); int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; int lastblock = 0; prev_epos.offset = udf_file_entry_alloc_offset(inode); prev_epos.block = iinfo->i_location; prev_epos.bh = NULL; cur_epos = next_epos = prev_epos; b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; /* find the extent which contains the block we are looking for. alternate between laarr[0] and laarr[1] for locations of the current extent, and the previous extent */ do { if (prev_epos.bh != cur_epos.bh) { brelse(prev_epos.bh); get_bh(cur_epos.bh); prev_epos.bh = cur_epos.bh; } if (cur_epos.bh != next_epos.bh) { brelse(cur_epos.bh); get_bh(next_epos.bh); cur_epos.bh = next_epos.bh; } lbcount += elen; prev_epos.block = cur_epos.block; cur_epos.block = next_epos.block; prev_epos.offset = cur_epos.offset; cur_epos.offset = next_epos.offset; etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1); if (etype == -1) break; c = !c; laarr[c].extLength = (etype << 30) | elen; laarr[c].extLocation = eloc; if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) pgoal = eloc.logicalBlockNum + ((elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); count++; } while (lbcount + elen <= b_off); b_off -= lbcount; offset = b_off >> inode->i_sb->s_blocksize_bits; /* * Move prev_epos and cur_epos into indirect extent if we are at * the pointer to it */ udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); /* if the extent is allocated and recorded, return the block if the extent is not a multiple of the blocksize, round up */ if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { if (elen & (inode->i_sb->s_blocksize - 1)) { elen = EXT_RECORDED_ALLOCATED | ((elen + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); udf_write_aext(inode, &cur_epos, &eloc, elen, 1); } brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset); *phys = newblock; return NULL; } last_block = block; /* Are we beyond EOF? */ if (etype == -1) { int ret; if (count) { if (c) laarr[0] = laarr[1]; startnum = 1; } else { /* Create a fake extent when there's not one */ memset(&laarr[0].extLocation, 0x00, sizeof(struct kernel_lb_addr)); laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; /* Will udf_do_extend_file() create real extent from a fake one? */ startnum = (offset > 0); } /* Create extents for the hole between EOF and offset */ ret = udf_do_extend_file(inode, &prev_epos, laarr, offset); if (ret < 0) { brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); *err = ret; return NULL; } c = 0; offset = 0; count += ret; /* We are not covered by a preallocated extent? */ if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { /* Is there any real extent? - otherwise we overwrite * the fake one... */ if (count) c = !c; laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | inode->i_sb->s_blocksize; memset(&laarr[c].extLocation, 0x00, sizeof(struct kernel_lb_addr)); count++; endnum++; } endnum = c + 1; lastblock = 1; } else { endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, swap it with the previous */ if (!c && count != 1) { laarr[2] = laarr[0]; laarr[0] = laarr[1]; laarr[1] = laarr[2]; c = 1; } /* if the current block is located in an extent, read the next extent */ etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0); if (etype != -1) { laarr[c + 1].extLength = (etype << 30) | elen; laarr[c + 1].extLocation = eloc; count++; startnum++; endnum++; } else lastblock = 1; } /* if the current extent is not recorded but allocated, get the * block in the extent corresponding to the requested block */ if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) newblocknum = laarr[c].extLocation.logicalBlockNum + offset; else { /* otherwise, allocate a new block */ if (iinfo->i_next_alloc_block == block) goal = iinfo->i_next_alloc_goal; if (!goal) { if (!(goal = pgoal)) /* XXX: what was intended here? */ goal = iinfo->i_location.logicalBlockNum + 1; } newblocknum = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, goal, err); if (!newblocknum) { brelse(prev_epos.bh); *err = -ENOSPC; return NULL; } iinfo->i_lenExtents += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple * blocks, split the extent into at most three extents. blocks prior * to requested block, requested block, and blocks after requested * block */ udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); #ifdef UDF_PREALLOCATE /* We preallocate blocks only for regular files. It also makes sense * for directories but there's a problem when to drop the * preallocation. We might use some delayed work for that but I feel * it's overengineering for a filesystem like UDF. */ if (S_ISREG(inode->i_mode)) udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); #endif /* merge any continuous blocks in laarr */ udf_merge_extents(inode, laarr, &endnum); /* write back the new extents, inserting new extents if the new number * of extents is greater than the old number, and deleting extents if * the new number of extents is less than the old number */ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); brelse(prev_epos.bh); newblock = udf_get_pblock(inode->i_sb, newblocknum, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) return NULL; *phys = newblock; *err = 0; *new = 1; iinfo->i_next_alloc_block = block; iinfo->i_next_alloc_goal = newblocknum; inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return result; } static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { int curr = *c; int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits; int8_t etype = (laarr[curr].extLength >> 30); if (blen == 1) ; else if (!offset || blen == offset + 1) { laarr[curr + 2] = laarr[curr + 1]; laarr[curr + 1] = laarr[curr]; } else { laarr[curr + 3] = laarr[curr + 1]; laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; } if (offset) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &laarr[curr].extLocation, 0, offset); laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (offset << blocksize_bits); laarr[curr].extLocation.logicalBlockNum = 0; laarr[curr].extLocation. partitionReferenceNum = 0; } else laarr[curr].extLength = (etype << 30) | (offset << blocksize_bits); curr++; (*c)++; (*endnum)++; } laarr[curr].extLocation.logicalBlockNum = newblocknum; if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) laarr[curr].extLocation.partitionReferenceNum = UDF_I(inode)->i_location.partitionReferenceNum; laarr[curr].extLength = EXT_RECORDED_ALLOCATED | blocksize; curr++; if (blen != offset + 1) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) laarr[curr].extLocation.logicalBlockNum += offset + 1; laarr[curr].extLength = (etype << 30) | ((blen - (offset + 1)) << blocksize_bits); curr++; (*endnum)++; } } } static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int start, length = 0, currlength = 0, i; if (*endnum >= (c + 1)) { if (!lastblock) return; else start = c; } else { if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { start = c + 1; length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else start = c; } for (i = start + 1; i <= *endnum; i++) { if (i == *endnum) { if (lastblock) length += UDF_DEFAULT_PREALLOC_BLOCKS; } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else break; } if (length) { int next = laarr[start].extLocation.logicalBlockNum + (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); int numalloc = udf_prealloc_blocks(inode->i_sb, inode, laarr[start].extLocation.partitionReferenceNum, next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); if (numalloc) { if (start == (c + 1)) laarr[start].extLength += (numalloc << inode->i_sb->s_blocksize_bits); else { memmove(&laarr[c + 2], &laarr[c + 1], sizeof(struct long_ad) * (*endnum - (c + 1))); (*endnum)++; laarr[c + 1].extLocation.logicalBlockNum = next; laarr[c + 1].extLocation.partitionReferenceNum = laarr[c].extLocation. partitionReferenceNum; laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | (numalloc << inode->i_sb->s_blocksize_bits); start = c + 1; } for (i = start + 1; numalloc && i < *endnum; i++) { int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (elen > numalloc) { laarr[i].extLength -= (numalloc << inode->i_sb->s_blocksize_bits); numalloc = 0; } else { numalloc -= elen; if (*endnum > (i + 1)) memmove(&laarr[i], &laarr[i + 1], sizeof(struct long_ad) * (*endnum - (i + 1))); i--; (*endnum)--; } } UDF_I(inode)->i_lenExtents += numalloc << inode->i_sb->s_blocksize_bits; } } } static void udf_merge_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int i; unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; for (i = 0; i < (*endnum - 1); i++) { struct kernel_long_ad *li /*l[i]*/ = &laarr[i]; struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1]; if (((li->extLength >> 30) == (lip1->extLength >> 30)) && (((li->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || ((lip1->extLocation.logicalBlockNum - li->extLocation.logicalBlockNum) == (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits)))) { if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; lip1->extLocation.logicalBlockNum = li->extLocation.logicalBlockNum + ((li->extLength & UDF_EXTENT_LENGTH_MASK) >> blocksize_bits); } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if (((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && ((lip1->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if ((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; li->extLength = (li->extLength & UDF_EXTENT_LENGTH_MASK) | EXT_NOT_RECORDED_NOT_ALLOCATED; } } } static void udf_update_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum, struct extent_position *epos) { int start = 0, i; struct kernel_lb_addr tmploc; uint32_t tmplen; if (startnum > endnum) { for (i = 0; i < (startnum - endnum); i++) udf_delete_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); } else if (startnum < endnum) { for (i = 0; i < (endnum - startnum); i++) { udf_insert_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); udf_next_aext(inode, epos, &laarr[i].extLocation, &laarr[i].extLength, 1); start++; } } for (i = start; i < endnum; i++) { udf_next_aext(inode, epos, &tmploc, &tmplen, 0); udf_write_aext(inode, epos, &laarr[i].extLocation, laarr[i].extLength, 1); } } struct buffer_head *udf_bread(struct inode *inode, int block, int create, int *err) { struct buffer_head *bh = NULL; bh = udf_getblk(inode, block, create, err); if (!bh) return NULL; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); *err = -EIO; return NULL; } int udf_setsize(struct inode *inode, loff_t newsize) { int err; struct udf_inode_info *iinfo; int bsize = 1 << inode->i_blkbits; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return -EINVAL; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; iinfo = UDF_I(inode); if (newsize > inode->i_size) { down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (bsize < (udf_file_entry_alloc_offset(inode) + newsize)) { err = udf_expand_file_adinicb(inode); if (err) { up_write(&iinfo->i_data_sem); return err; } } else iinfo->i_lenAlloc = newsize; } err = udf_extend_file(inode, newsize); if (err) { up_write(&iinfo->i_data_sem); return err; } truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); } else { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize, 0x00, bsize - newsize - udf_file_entry_alloc_offset(inode)); iinfo->i_lenAlloc = newsize; truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); goto update_time; } err = block_truncate_page(inode->i_mapping, newsize, udf_get_block); if (err) return err; down_write(&iinfo->i_data_sem); truncate_setsize(inode, newsize); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } update_time: inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return 0; } static void __udf_read_inode(struct inode *inode) { struct buffer_head *bh = NULL; struct fileEntry *fe; uint16_t ident; struct udf_inode_info *iinfo = UDF_I(inode); /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident); if (!bh) { printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n", inode->i_ino); make_bad_inode(inode); return; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { printk(KERN_ERR "udf: udf_read_inode(ino %ld) " "failed ident=%d\n", inode->i_ino, ident); brelse(bh); make_bad_inode(inode); return; } fe = (struct fileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4096)) { struct buffer_head *ibh; ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, &ident); if (ident == TAG_IDENT_IE && ibh) { struct buffer_head *nbh = NULL; struct kernel_lb_addr loc; struct indirectEntry *ie; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength && (nbh = udf_read_ptagged(inode->i_sb, &loc, 0, &ident))) { if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) { memcpy(&iinfo->i_location, &loc, sizeof(struct kernel_lb_addr)); brelse(bh); brelse(ibh); brelse(nbh); __udf_read_inode(inode); return; } brelse(nbh); } } brelse(ibh); } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { printk(KERN_ERR "udf: unsupported strategy type: %d\n", le16_to_cpu(fe->icbTag.strategyType)); brelse(bh); make_bad_inode(inode); return; } udf_fill_inode(inode, bh); brelse(bh); } static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) { struct fileEntry *fe; struct extendedFileEntry *efe; int offset; struct udf_sb_info *sbi = UDF_SB(inode->i_sb); struct udf_inode_info *iinfo = UDF_I(inode); fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4)) iinfo->i_strat4096 = 0; else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ iinfo->i_strat4096 = 1; iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; iinfo->i_lenAlloc = 0; iinfo->i_next_alloc_block = 0; iinfo->i_next_alloc_goal = 0; if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { iinfo->i_efe = 1; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { iinfo->i_efe = 0; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { iinfo->i_efe = 0; iinfo->i_use = 1; iinfo->i_lenAlloc = le32_to_cpu( ((struct unallocSpaceEntry *)bh->b_data)-> lengthAllocDescs); if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); return; } read_lock(&sbi->s_cred_lock); inode->i_uid = le32_to_cpu(fe->uid); if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = UDF_SB(inode->i_sb)->s_uid; inode->i_gid = le32_to_cpu(fe->gid); if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_dmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_dmode; else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; read_unlock(&sbi->s_cred_lock); inode->i_nlink = le16_to_cpu(fe->fileLinkCount); if (!inode->i_nlink) inode->i_nlink = 1; inode->i_size = le64_to_cpu(fe->informationLength); iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr; } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime)) iinfo->i_crtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); offset = sizeof(struct extendedFileEntry) + iinfo->i_lenEAttr; } switch (fe->icbTag.fileType) { case ICBTAG_FILE_TYPE_DIRECTORY: inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; inode->i_mode |= S_IFDIR; inc_nlink(inode); break; case ICBTAG_FILE_TYPE_REALTIME: case ICBTAG_FILE_TYPE_REGULAR: case ICBTAG_FILE_TYPE_UNDEF: case ICBTAG_FILE_TYPE_VAT20: if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; inode->i_mode |= S_IFREG; break; case ICBTAG_FILE_TYPE_BLOCK: inode->i_mode |= S_IFBLK; break; case ICBTAG_FILE_TYPE_CHAR: inode->i_mode |= S_IFCHR; break; case ICBTAG_FILE_TYPE_FIFO: init_special_inode(inode, inode->i_mode | S_IFIFO, 0); break; case ICBTAG_FILE_TYPE_SOCKET: init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; inode->i_mode = S_IFLNK | S_IRWXUGO; break; case ICBTAG_FILE_TYPE_MAIN: udf_debug("METADATA FILE-----\n"); break; case ICBTAG_FILE_TYPE_MIRROR: udf_debug("METADATA MIRROR FILE-----\n"); break; case ICBTAG_FILE_TYPE_BITMAP: udf_debug("METADATA BITMAP FILE-----\n"); break; default: printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown " "file type=%d\n", inode->i_ino, fe->icbTag.fileType); make_bad_inode(inode); return; } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (dsea) { init_special_inode(inode, inode->i_mode, MKDEV(le32_to_cpu(dsea->majorDeviceIdent), le32_to_cpu(dsea->minorDeviceIdent))); /* Developer ID ??? */ } else make_bad_inode(inode); } } static int udf_alloc_i_data(struct inode *inode, size_t size) { struct udf_inode_info *iinfo = UDF_I(inode); iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL); if (!iinfo->i_ext.i_data) { printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) " "no free memory\n", inode->i_ino); return -ENOMEM; } return 0; } static mode_t udf_convert_permissions(struct fileEntry *fe) { mode_t mode; uint32_t permissions; uint32_t flags; permissions = le32_to_cpu(fe->permissions); flags = le16_to_cpu(fe->icbTag.flags); mode = ((permissions) & S_IRWXO) | ((permissions >> 2) & S_IRWXG) | ((permissions >> 4) & S_IRWXU) | ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); return mode; } int udf_write_inode(struct inode *inode, struct writeback_control *wbc) { return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } static int udf_sync_inode(struct inode *inode) { return udf_update_inode(inode, 1); } static int udf_update_inode(struct inode *inode, int do_sync) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint32_t udfperms; uint16_t icbflags; uint16_t crclen; int err = 0; struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; struct udf_inode_info *iinfo = UDF_I(inode); bh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0)); if (!bh) { udf_debug("getblk failure\n"); return -ENOMEM; } lock_buffer(bh); memset(bh->b_data, 0, inode->i_sb->s_blocksize); fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (iinfo->i_use) { struct unallocSpaceEntry *use = (struct unallocSpaceEntry *)bh->b_data; use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); use->descTag.tagLocation = cpu_to_le32(iinfo->i_location.logicalBlockNum); crclen = sizeof(struct unallocSpaceEntry) + iinfo->i_lenAlloc - sizeof(struct tag); use->descTag.descCRCLength = cpu_to_le16(crclen); use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + sizeof(struct tag), crclen)); use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); goto out; } if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) fe->uid = cpu_to_le32(-1); else fe->uid = cpu_to_le32(inode->i_uid); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) fe->gid = cpu_to_le32(-1); else fe->gid = cpu_to_le32(inode->i_gid); udfperms = ((inode->i_mode & S_IRWXO)) | ((inode->i_mode & S_IRWXG) << 2) | ((inode->i_mode & S_IRWXU) << 4); udfperms |= (le32_to_cpu(fe->permissions) & (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | FE_PERM_G_DELETE | FE_PERM_G_CHATTR | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); fe->permissions = cpu_to_le32(udfperms); if (S_ISDIR(inode->i_mode)) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); else fe->fileLinkCount = cpu_to_le16(inode->i_nlink); fe->informationLength = cpu_to_le64(inode->i_size); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct regid *eid; struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (!dsea) { dsea = (struct deviceSpec *) udf_add_extendedattr(inode, sizeof(struct deviceSpec) + sizeof(struct regid), 12, 0x3); dsea->attrType = cpu_to_le32(12); dsea->attrSubtype = 1; dsea->attrLength = cpu_to_le32( sizeof(struct deviceSpec) + sizeof(struct regid)); dsea->impUseLength = cpu_to_le32(sizeof(struct regid)); } eid = (struct regid *)dsea->impUse; memset(eid, 0, sizeof(struct regid)); strcpy(eid->ident, UDF_ID_DEVELOPER); eid->identSuffix[0] = UDF_OS_CLASS_UNIX; eid->identSuffix[1] = UDF_OS_ID_LINUX; dsea->majorDeviceIdent = cpu_to_le32(imajor(inode)); dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); } if (iinfo->i_efe == 0) { memcpy(bh->b_data + sizeof(struct fileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); fe->logicalBlocksRecorded = cpu_to_le64( (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> (blocksize_bits - 9)); udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime); memset(&(fe->impIdent), 0, sizeof(struct regid)); strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; fe->uniqueID = cpu_to_le64(iinfo->i_unique); fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); crclen = sizeof(struct fileEntry); } else { memcpy(bh->b_data + sizeof(struct extendedFileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); efe->objectSize = cpu_to_le64(inode->i_size); efe->logicalBlocksRecorded = cpu_to_le64( (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> (blocksize_bits - 9)); if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec)) iinfo->i_crtime = inode->i_atime; if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec)) iinfo->i_crtime = inode->i_mtime; if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec)) iinfo->i_crtime = inode->i_ctime; udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime); udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime); memset(&(efe->impIdent), 0, sizeof(struct regid)); strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; efe->uniqueID = cpu_to_le64(iinfo->i_unique); efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } if (iinfo->i_strat4096) { fe->icbTag.strategyType = cpu_to_le16(4096); fe->icbTag.strategyParameter = cpu_to_le16(1); fe->icbTag.numEntries = cpu_to_le16(2); } else { fe->icbTag.strategyType = cpu_to_le16(4); fe->icbTag.numEntries = cpu_to_le16(1); } if (S_ISDIR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; else if (S_ISREG(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; else if (S_ISLNK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK; else if (S_ISBLK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK; else if (S_ISCHR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR; else if (S_ISFIFO(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO; else if (S_ISSOCK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; icbflags = iinfo->i_alloc_type | ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | (le16_to_cpu(fe->icbTag.flags) & ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); fe->icbTag.flags = cpu_to_le16(icbflags); if (sbi->s_udfrev >= 0x0200) fe->descTag.descVersion = cpu_to_le16(3); else fe->descTag.descVersion = cpu_to_le16(2); fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); fe->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag); fe->descTag.descCRCLength = cpu_to_le16(crclen); fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag), crclen)); fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); out: set_buffer_uptodate(bh); unlock_buffer(bh); /* write the data blocks */ mark_buffer_dirty(bh); if (do_sync) { sync_dirty_buffer(bh); if (buffer_write_io_error(bh)) { printk(KERN_WARNING "IO error syncing udf inode " "[%s:%08lx]\n", inode->i_sb->s_id, inode->i_ino); err = -EIO; } } brelse(bh); return err; } struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino) { unsigned long block = udf_get_lb_pblock(sb, ino, 0); struct inode *inode = iget_locked(sb, block); if (!inode) return NULL; if (inode->i_state & I_NEW) { memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); __udf_read_inode(inode); unlock_new_inode(inode); } if (is_bad_inode(inode)) goto out_iput; if (ino->logicalBlockNum >= UDF_SB(sb)-> s_partmaps[ino->partitionReferenceNum].s_partition_len) { udf_debug("block=%d, partition=%d out of range\n", ino->logicalBlockNum, ino->partitionReferenceNum); make_bad_inode(inode); goto out_iput; } return inode; out_iput: iput(inode); return NULL; } int udf_add_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; struct short_ad *sad = NULL; struct long_ad *lad = NULL; struct allocExtDesc *aed; uint8_t *ptr; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return -EIO; if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) { unsigned char *sptr, *dptr; struct buffer_head *nbh; int err, loffset; struct kernel_lb_addr obloc = epos->block; epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, obloc.partitionReferenceNum, obloc.logicalBlockNum, &err); if (!epos->block.logicalBlockNum) return -ENOSPC; nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &epos->block, 0)); if (!nbh) return -EIO; lock_buffer(nbh); memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(nbh); unlock_buffer(nbh); mark_buffer_dirty_inode(nbh, inode); aed = (struct allocExtDesc *)(nbh->b_data); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); if (epos->offset + adsize > inode->i_sb->s_blocksize) { loffset = epos->offset; aed->lengthAllocDescs = cpu_to_le32(adsize); sptr = ptr - adsize; dptr = nbh->b_data + sizeof(struct allocExtDesc); memcpy(dptr, sptr, adsize); epos->offset = sizeof(struct allocExtDesc) + adsize; } else { loffset = epos->offset + adsize; aed->lengthAllocDescs = cpu_to_le32(0); sptr = ptr; epos->offset = sizeof(struct allocExtDesc); if (epos->bh) { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); } else { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } } if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200) udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, epos->block.logicalBlockNum, sizeof(struct tag)); else udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, epos->block.logicalBlockNum, sizeof(struct tag)); switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)sptr; sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)sptr; lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); lad->extLocation = cpu_to_lelb(epos->block); memset(lad->impUse, 0x00, sizeof(lad->impUse)); break; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, loffset); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); brelse(epos->bh); } else { mark_inode_dirty(inode); } epos->bh = nbh; } udf_write_aext(inode, epos, eloc, elen, inc); if (!epos->bh) { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); } return 0; } void udf_write_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)ptr; sad->extLength = cpu_to_le32(elen); sad->extPosition = cpu_to_le32(eloc->logicalBlockNum); adsize = sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)ptr; lad->extLength = cpu_to_le32(elen); lad->extLocation = cpu_to_lelb(*eloc); memset(lad->impUse, 0x00, sizeof(lad->impUse)); adsize = sizeof(struct long_ad); break; default: return; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) { struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; udf_update_tag(epos->bh->b_data, le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); } mark_buffer_dirty_inode(epos->bh, inode); } else { mark_inode_dirty(inode); } if (inc) epos->offset += adsize; } int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int8_t etype; while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { int block; epos->block = *eloc; epos->offset = sizeof(struct allocExtDesc); brelse(epos->bh); block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0); epos->bh = udf_tread(inode->i_sb, block); if (!epos->bh) { udf_debug("reading block %d failed!\n", block); return -1; } } return etype; } int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int alen; int8_t etype; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) { if (!epos->offset) epos->offset = udf_file_entry_alloc_offset(inode); ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; alen = udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc; } else { if (!epos->offset) epos->offset = sizeof(struct allocExtDesc); ptr = epos->bh->b_data + epos->offset; alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)-> lengthAllocDescs); } switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc); if (!sad) return -1; etype = le32_to_cpu(sad->extLength) >> 30; eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); eloc->partitionReferenceNum = iinfo->i_location.partitionReferenceNum; *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; break; case ICBTAG_FLAG_AD_LONG: lad = udf_get_filelongad(ptr, alen, &epos->offset, inc); if (!lad) return -1; etype = le32_to_cpu(lad->extLength) >> 30; *eloc = lelb_to_cpu(lad->extLocation); *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; break; default: udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type); return -1; } return etype; } static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr neloc, uint32_t nelen) { struct kernel_lb_addr oeloc; uint32_t oelen; int8_t etype; if (epos.bh) get_bh(epos.bh); while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) { udf_write_aext(inode, &epos, &neloc, nelen, 1); neloc = oeloc; nelen = (etype << 30) | oelen; } udf_add_aext(inode, &epos, &neloc, nelen, 1); brelse(epos.bh); return (nelen >> 30); } int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr eloc, uint32_t elen) { struct extent_position oepos; int adsize; int8_t etype; struct allocExtDesc *aed; struct udf_inode_info *iinfo; if (epos.bh) { get_bh(epos.bh); get_bh(epos.bh); } iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; oepos = epos; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1) return -1; while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1); if (oepos.bh != epos.bh) { oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); oepos.bh = epos.bh; oepos.offset = epos.offset - adsize; } } memset(&eloc, 0x00, sizeof(struct kernel_lb_addr)); elen = 0; if (epos.bh != oepos.bh) { udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= (adsize * 2); mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize)); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } else { udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, epos.offset - adsize); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } brelse(epos.bh); brelse(oepos.bh); return (elen >> 30); } int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos, struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset) { unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits; int8_t etype; struct udf_inode_info *iinfo; iinfo = UDF_I(inode); pos->offset = 0; pos->block = iinfo->i_location; pos->bh = NULL; *elen = 0; do { etype = udf_next_aext(inode, pos, eloc, elen, 1); if (etype == -1) { *offset = (bcount - lbcount) >> blocksize_bits; iinfo->i_lenExtents = lbcount; return -1; } lbcount += *elen; } while (lbcount <= bcount); *offset = (bcount + *elen - lbcount) >> blocksize_bits; return etype; } long udf_block_map(struct inode *inode, sector_t block) { struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; int ret; down_read(&UDF_I(inode)->i_data_sem); if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset); else ret = 0; up_read(&UDF_I(inode)->i_data_sem); brelse(epos.bh); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) return udf_fixed_to_variable(ret); else return ret; }
gpl-2.0
mayli/unionfs-latest
drivers/crypto/qce/common.c
1968
11821
/* * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/types.h> #include <crypto/scatterwalk.h> #include <crypto/sha.h> #include "cipher.h" #include "common.h" #include "core.h" #include "regs-v5.h" #include "sha.h" #define QCE_SECTOR_SIZE 512 static inline u32 qce_read(struct qce_device *qce, u32 offset) { return readl(qce->base + offset); } static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) { writel(val, qce->base + offset); } static inline void qce_write_array(struct qce_device *qce, u32 offset, const u32 *val, unsigned int len) { int i; for (i = 0; i < len; i++) qce_write(qce, offset + i * sizeof(u32), val[i]); } static inline void qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) { int i; for (i = 0; i < len; i++) qce_write(qce, offset + i * sizeof(u32), 0); } static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) { u32 cfg = 0; if (IS_AES(flags)) { if (aes_key_size == AES_KEYSIZE_128) cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; else if (aes_key_size == AES_KEYSIZE_256) cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; } if (IS_AES(flags)) cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; else if (IS_DES(flags) || IS_3DES(flags)) cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; if (IS_DES(flags)) cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; if (IS_3DES(flags)) cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; switch (flags & QCE_MODE_MASK) { case QCE_MODE_ECB: cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; break; case QCE_MODE_CBC: cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; break; case QCE_MODE_CTR: cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; break; case QCE_MODE_XTS: cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; break; case QCE_MODE_CCM: cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; break; default: return ~0; } return cfg; } static u32 qce_auth_cfg(unsigned long flags, u32 key_size) { u32 cfg = 0; if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags))) cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; else cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; if (IS_CCM(flags) || IS_CMAC(flags)) { if (key_size == AES_KEYSIZE_128) cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; else if (key_size == AES_KEYSIZE_256) cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; } if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; else if (IS_CMAC(flags)) cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; if (IS_SHA1(flags) || IS_SHA256(flags)) cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) || IS_CBC(flags) || IS_CTR(flags)) cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; else if (IS_AES(flags) && IS_CCM(flags)) cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; else if (IS_AES(flags) && IS_CMAC(flags)) cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; if (IS_SHA(flags) || IS_SHA_HMAC(flags)) cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; if (IS_CCM(flags)) cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) || IS_CMAC(flags)) cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT); return cfg; } static u32 qce_config_reg(struct qce_device *qce, int little) { u32 beats = (qce->burst_size >> 3) - 1; u32 pipe_pair = qce->pipe_pair_id; u32 config; config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; config &= ~HIGH_SPD_EN_N_SHIFT; if (little) config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); return config; } void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) { __be32 *d = dst; const u8 *s = src; unsigned int n; n = len / sizeof(u32); for (; n > 0; n--) { *d = cpu_to_be32p((const __u32 *) s); s += sizeof(__u32); d++; } } static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) { u8 swap[QCE_AES_IV_LENGTH]; u32 i, j; if (ivsize > QCE_AES_IV_LENGTH) return; memset(swap, 0, QCE_AES_IV_LENGTH); for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; i < QCE_AES_IV_LENGTH; i++, j--) swap[i] = src[j]; qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); } static void qce_xtskey(struct qce_device *qce, const u8 *enckey, unsigned int enckeylen, unsigned int cryptlen) { u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); unsigned int xtsdusize; qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, enckeylen / 2); qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); /* xts du size 512B */ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); } static void qce_setup_config(struct qce_device *qce) { u32 config; /* get big endianness */ config = qce_config_reg(qce, 0); /* clear status */ qce_write(qce, REG_STATUS, 0); qce_write(qce, REG_CONFIG, config); } static inline void qce_crypto_go(struct qce_device *qce) { qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); } static int qce_setup_regs_ahash(struct crypto_async_request *async_req, u32 totallen, u32 offset) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; u32 auth_cfg = 0, config; unsigned int iv_words; /* if not the last, the size has to be on the block boundary */ if (!rctx->last_blk && req->nbytes % blocksize) return -EINVAL; qce_setup_config(qce); if (IS_CMAC(rctx->flags)) { qce_write(qce, REG_AUTH_SEG_CFG, 0); qce_write(qce, REG_ENCR_SEG_CFG, 0); qce_write(qce, REG_ENCR_SEG_SIZE, 0); qce_clear_array(qce, REG_AUTH_IV0, 16); qce_clear_array(qce, REG_AUTH_KEY0, 16); qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen); } if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { u32 authkey_words = rctx->authklen / sizeof(u32); qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, authkey_words); } if (IS_CMAC(rctx->flags)) goto go_proc; if (rctx->first_blk) memcpy(auth, rctx->digest, digestsize); else qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); if (rctx->first_blk) qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); else qce_write_array(qce, REG_AUTH_BYTECNT0, (u32 *)rctx->byte_count, 2); auth_cfg = qce_auth_cfg(rctx->flags, 0); if (rctx->last_blk) auth_cfg |= BIT(AUTH_LAST_SHIFT); else auth_cfg &= ~BIT(AUTH_LAST_SHIFT); if (rctx->first_blk) auth_cfg |= BIT(AUTH_FIRST_SHIFT); else auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); go_proc: qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); qce_write(qce, REG_AUTH_SEG_START, 0); qce_write(qce, REG_ENCR_SEG_CFG, 0); qce_write(qce, REG_SEG_SIZE, req->nbytes); /* get little endianness */ config = qce_config_reg(qce, 1); qce_write(qce, REG_CONFIG, config); qce_crypto_go(qce); return 0; } static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, u32 totallen, u32 offset) { struct ablkcipher_request *req = ablkcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; unsigned int enckey_words, enciv_words; unsigned int keylen; u32 encr_cfg = 0, auth_cfg = 0, config; unsigned int ivsize = rctx->ivsize; unsigned long flags = rctx->flags; qce_setup_config(qce); if (IS_XTS(flags)) keylen = ctx->enc_keylen / 2; else keylen = ctx->enc_keylen; qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); enckey_words = keylen / sizeof(u32); qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); encr_cfg = qce_encr_cfg(flags, keylen); if (IS_DES(flags)) { enciv_words = 2; enckey_words = 2; } else if (IS_3DES(flags)) { enciv_words = 2; enckey_words = 6; } else if (IS_AES(flags)) { if (IS_XTS(flags)) qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, rctx->cryptlen); enciv_words = 4; } else { return -EINVAL; } qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); if (!IS_ECB(flags)) { if (IS_XTS(flags)) qce_xts_swapiv(enciv, rctx->iv, ivsize); else qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); } if (IS_ENCRYPT(flags)) encr_cfg |= BIT(ENCODE_SHIFT); qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); if (IS_CTR(flags)) { qce_write(qce, REG_CNTR_MASK, ~0); qce_write(qce, REG_CNTR_MASK0, ~0); qce_write(qce, REG_CNTR_MASK1, ~0); qce_write(qce, REG_CNTR_MASK2, ~0); } qce_write(qce, REG_SEG_SIZE, totallen); /* get little endianness */ config = qce_config_reg(qce, 1); qce_write(qce, REG_CONFIG, config); qce_crypto_go(qce); return 0; } int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, u32 offset) { switch (type) { case CRYPTO_ALG_TYPE_ABLKCIPHER: return qce_setup_regs_ablkcipher(async_req, totallen, offset); case CRYPTO_ALG_TYPE_AHASH: return qce_setup_regs_ahash(async_req, totallen, offset); default: return -EINVAL; } } #define STATUS_ERRORS \ (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) int qce_check_status(struct qce_device *qce, u32 *status) { int ret = 0; *status = qce_read(qce, REG_STATUS); /* * Don't use result dump status. The operation may not be complete. * Instead, use the status we just read from device. In case, we need to * use result_status from result dump the result_status needs to be byte * swapped, since we set the device to little endian. */ if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) ret = -ENXIO; return ret; } void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) { u32 val; val = qce_read(qce, REG_VERSION); *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; }
gpl-2.0
wang701/nexus_9_flounder_kernel_src
arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
2224
15877
/* * LocalPlus Bus FIFO driver for the Freescale MPC52xx. * * Copyright (C) 2009 Secret Lab Technologies Ltd. * * This file is released under the GPLv2 * * Todo: * - Add support for multiple requests to be queued. */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/spinlock.h> #include <linux/module.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/mpc52xx.h> #include <asm/time.h> #include <linux/fsl/bestcomm/bestcomm.h> #include <linux/fsl/bestcomm/bestcomm_priv.h> #include <linux/fsl/bestcomm/gen_bd.h> MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver"); MODULE_LICENSE("GPL"); #define LPBFIFO_REG_PACKET_SIZE (0x00) #define LPBFIFO_REG_START_ADDRESS (0x04) #define LPBFIFO_REG_CONTROL (0x08) #define LPBFIFO_REG_ENABLE (0x0C) #define LPBFIFO_REG_BYTES_DONE_STATUS (0x14) #define LPBFIFO_REG_FIFO_DATA (0x40) #define LPBFIFO_REG_FIFO_STATUS (0x44) #define LPBFIFO_REG_FIFO_CONTROL (0x48) #define LPBFIFO_REG_FIFO_ALARM (0x4C) struct mpc52xx_lpbfifo { struct device *dev; phys_addr_t regs_phys; void __iomem *regs; int irq; spinlock_t lock; struct bcom_task *bcom_tx_task; struct bcom_task *bcom_rx_task; struct bcom_task *bcom_cur_task; /* Current state data */ struct mpc52xx_lpbfifo_request *req; int dma_irqs_enabled; }; /* The MPC5200 has only one fifo, so only need one instance structure */ static struct mpc52xx_lpbfifo lpbfifo; /** * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred */ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) { size_t transfer_size = req->size - req->pos; struct bcom_bd *bd; void __iomem *reg; u32 *data; int i; int bit_fields; int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; /* Set and clear the reset bits; is good practice in User Manual */ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); /* set master enable bit */ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001); if (!dma) { /* While the FIFO can be setup for transfer sizes as large as * 16M-1, the FIFO itself is only 512 bytes deep and it does * not generate interrupts for FIFO full events (only transfer * complete will raise an IRQ). Therefore when not using * Bestcomm to drive the FIFO it needs to either be polled, or * transfers need to constrained to the size of the fifo. * * This driver restricts the size of the transfer */ if (transfer_size > 512) transfer_size = 512; /* Load the FIFO with data */ if (write) { reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; data = req->data + req->pos; for (i = 0; i < transfer_size; i += 4) out_be32(reg, *data++); } /* Unmask both error and completion irqs */ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301); } else { /* Choose the correct direction * * Configure the watermarks so DMA will always complete correctly. * It may be worth experimenting with the ALARM value to see if * there is a performance impacit. However, if it is wrong there * is a risk of DMA not transferring the last chunk of data */ if (write) { out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4); out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7); lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task; } else { out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff); out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0); lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task; if (poll_dma) { if (lpbfifo.dma_irqs_enabled) { disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); lpbfifo.dma_irqs_enabled = 0; } } else { if (!lpbfifo.dma_irqs_enabled) { enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); lpbfifo.dma_irqs_enabled = 1; } } } bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task); bd->status = transfer_size; if (!write) { /* * In the DMA read case, the DMA doesn't complete, * possibly due to incorrect watermarks in the ALARM * and CONTROL regs. For now instead of trying to * determine the right watermarks that will make this * work, just increase the number of bytes the FIFO is * expecting. * * When submitting another operation, the FIFO will get * reset, so the condition of the FIFO waiting for a * non-existent 4 bytes will get cleared. */ transfer_size += 4; /* BLECH! */ } bd->data[0] = req->data_phys + req->pos; bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL); /* error irq & master enabled bit */ bit_fields = 0x00000201; /* Unmask irqs */ if (write && (!poll_dma)) bit_fields |= 0x00000100; /* completion irq too */ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields); } /* Set transfer size, width, chip select and READ mode */ out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS, req->offset + req->pos); out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size); bit_fields = req->cs << 24 | 0x000008; if (!write) bit_fields |= 0x010000; /* read mode */ out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields); /* Kick it off */ if (!lpbfifo.req->defer_xfer_start) out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); if (dma) bcom_enable(lpbfifo.bcom_cur_task); } /** * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO * * On transmit, the dma completion irq triggers before the fifo completion * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm * task completion irq because everything is not really done until the LPB FIFO * completion irq triggers. * * In other words: * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings. * * Reasons for entering this routine: * 1) PIO mode rx and tx completion irq * 2) DMA interrupt mode tx completion irq * 3) DMA polled mode tx * * Exit conditions: * 1) Transfer aborted * 2) FIFO complete without DMA; more data to do * 3) FIFO complete without DMA; all data transferred * 4) FIFO complete using DMA * * Condition 1 can occur regardless of whether or not DMA is used. * It requires executing the callback to report the error and exiting * immediately. * * Condition 2 requires programming the FIFO with the next block of data * * Condition 3 requires executing the callback to report completion * * Condition 4 means the same as 3, except that we also retrieve the bcom * buffer so DMA doesn't get clogged up. * * To make things trickier, the spinlock must be dropped before * executing the callback, otherwise we could end up with a deadlock * or nested spinlock condition. The out path is non-trivial, so * extra fiddling is done to make sure all paths lead to the same * outbound code. */ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) { struct mpc52xx_lpbfifo_request *req; u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); void __iomem *reg; u32 *data; int count, i; int do_callback = 0; u32 ts; unsigned long flags; int dma, write, poll_dma; spin_lock_irqsave(&lpbfifo.lock, flags); ts = get_tbl(); req = lpbfifo.req; if (!req) { spin_unlock_irqrestore(&lpbfifo.lock, flags); pr_err("bogus LPBFIFO IRQ\n"); return IRQ_HANDLED; } dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; if (dma && !write) { spin_unlock_irqrestore(&lpbfifo.lock, flags); pr_err("bogus LPBFIFO IRQ (dma and not writting)\n"); return IRQ_HANDLED; } if ((status & 0x01) == 0) { goto out; } /* check abort bit */ if (status & 0x10) { out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); do_callback = 1; goto out; } /* Read result from hardware */ count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); count &= 0x00ffffff; if (!dma && !write) { /* copy the data out of the FIFO */ reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; data = req->data + req->pos; for (i = 0; i < count; i += 4) *data++ = in_be32(reg); } /* Update transfer position and count */ req->pos += count; /* Decide what to do next */ if (req->size - req->pos) mpc52xx_lpbfifo_kick(req); /* more work to do */ else do_callback = 1; out: /* Clear the IRQ */ out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01); if (dma && (status & 0x11)) { /* * Count the DMA as complete only when the FIFO completion * status or abort bits are set. * * (status & 0x01) should always be the case except sometimes * when using polled DMA. * * (status & 0x10) {transfer aborted}: This case needs more * testing. */ bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); } req->last_byte = ((u8 *)req->data)[req->size - 1]; /* When the do_callback flag is set; it means the transfer is finished * so set the FIFO as idle */ if (do_callback) lpbfifo.req = NULL; if (irq != 0) /* don't increment on polled case */ req->irq_count++; req->irq_ticks += get_tbl() - ts; spin_unlock_irqrestore(&lpbfifo.lock, flags); /* Spinlock is released; it is now safe to call the callback */ if (do_callback && req->callback) req->callback(req); return IRQ_HANDLED; } /** * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task * * Only used when receiving data. */ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) { struct mpc52xx_lpbfifo_request *req; unsigned long flags; u32 status; u32 ts; spin_lock_irqsave(&lpbfifo.lock, flags); ts = get_tbl(); req = lpbfifo.req; if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) { spin_unlock_irqrestore(&lpbfifo.lock, flags); return IRQ_HANDLED; } if (irq != 0) /* don't increment on polled case */ req->irq_count++; if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) { spin_unlock_irqrestore(&lpbfifo.lock, flags); req->buffer_not_done_cnt++; if ((req->buffer_not_done_cnt % 1000) == 0) pr_err("transfer stalled\n"); return IRQ_HANDLED; } bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); req->last_byte = ((u8 *)req->data)[req->size - 1]; req->pos = status & 0x00ffffff; /* Mark the FIFO as idle */ lpbfifo.req = NULL; /* Release the lock before calling out to the callback. */ req->irq_ticks += get_tbl() - ts; spin_unlock_irqrestore(&lpbfifo.lock, flags); if (req->callback) req->callback(req); return IRQ_HANDLED; } /** * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion */ void mpc52xx_lpbfifo_poll(void) { struct mpc52xx_lpbfifo_request *req = lpbfifo.req; int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; /* * For more information, see comments on the "Fat Lady" */ if (dma && write) mpc52xx_lpbfifo_irq(0, NULL); else mpc52xx_lpbfifo_bcom_irq(0, NULL); } EXPORT_SYMBOL(mpc52xx_lpbfifo_poll); /** * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request. * @req: Pointer to request structure */ int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req) { unsigned long flags; if (!lpbfifo.regs) return -ENODEV; spin_lock_irqsave(&lpbfifo.lock, flags); /* If the req pointer is already set, then a transfer is in progress */ if (lpbfifo.req) { spin_unlock_irqrestore(&lpbfifo.lock, flags); return -EBUSY; } /* Setup the transfer */ lpbfifo.req = req; req->irq_count = 0; req->irq_ticks = 0; req->buffer_not_done_cnt = 0; req->pos = 0; mpc52xx_lpbfifo_kick(req); spin_unlock_irqrestore(&lpbfifo.lock, flags); return 0; } EXPORT_SYMBOL(mpc52xx_lpbfifo_submit); int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req) { unsigned long flags; if (!lpbfifo.regs) return -ENODEV; spin_lock_irqsave(&lpbfifo.lock, flags); /* * If the req pointer is already set and a transfer was * started on submit, then this transfer is in progress */ if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) { spin_unlock_irqrestore(&lpbfifo.lock, flags); return -EBUSY; } /* * If the req was previously submitted but not * started, start it now */ if (lpbfifo.req && lpbfifo.req == req && lpbfifo.req->defer_xfer_start) { out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); } spin_unlock_irqrestore(&lpbfifo.lock, flags); return 0; } EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer); void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) { unsigned long flags; spin_lock_irqsave(&lpbfifo.lock, flags); if (lpbfifo.req == req) { /* Put it into reset and clear the state */ bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task); bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task); out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); lpbfifo.req = NULL; } spin_unlock_irqrestore(&lpbfifo.lock, flags); } EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); static int mpc52xx_lpbfifo_probe(struct platform_device *op) { struct resource res; int rc = -ENOMEM; if (lpbfifo.dev != NULL) return -ENOSPC; lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0); if (!lpbfifo.irq) return -ENODEV; if (of_address_to_resource(op->dev.of_node, 0, &res)) return -ENODEV; lpbfifo.regs_phys = res.start; lpbfifo.regs = of_iomap(op->dev.of_node, 0); if (!lpbfifo.regs) return -ENOMEM; spin_lock_init(&lpbfifo.lock); /* Put FIFO into reset */ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); /* Register the interrupt handler */ rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0, "mpc52xx-lpbfifo", &lpbfifo); if (rc) goto err_irq; /* Request the Bestcomm receive (fifo --> memory) task and IRQ */ lpbfifo.bcom_rx_task = bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC, 16*1024*1024); if (!lpbfifo.bcom_rx_task) goto err_bcom_rx; rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), mpc52xx_lpbfifo_bcom_irq, 0, "mpc52xx-lpbfifo-rx", &lpbfifo); if (rc) goto err_bcom_rx_irq; lpbfifo.dma_irqs_enabled = 1; /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */ lpbfifo.bcom_tx_task = bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC); if (!lpbfifo.bcom_tx_task) goto err_bcom_tx; lpbfifo.dev = &op->dev; return 0; err_bcom_tx: free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); err_bcom_rx_irq: bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); err_bcom_rx: err_irq: iounmap(lpbfifo.regs); lpbfifo.regs = NULL; dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n"); return -ENODEV; } static int mpc52xx_lpbfifo_remove(struct platform_device *op) { if (lpbfifo.dev != &op->dev) return 0; /* Put FIFO in reset */ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); /* Release the bestcomm transmit task */ free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo); bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task); /* Release the bestcomm receive task */ free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); free_irq(lpbfifo.irq, &lpbfifo); iounmap(lpbfifo.regs); lpbfifo.regs = NULL; lpbfifo.dev = NULL; return 0; } static struct of_device_id mpc52xx_lpbfifo_match[] = { { .compatible = "fsl,mpc5200-lpbfifo", }, {}, }; static struct platform_driver mpc52xx_lpbfifo_driver = { .driver = { .name = "mpc52xx-lpbfifo", .owner = THIS_MODULE, .of_match_table = mpc52xx_lpbfifo_match, }, .probe = mpc52xx_lpbfifo_probe, .remove = mpc52xx_lpbfifo_remove, }; module_platform_driver(mpc52xx_lpbfifo_driver);
gpl-2.0
SomeshThakur/Xeon-Kernel
drivers/video/omap2/dss/overlay.c
2480
4829
/* * linux/drivers/video/omap2/dss/overlay.c * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "OVERLAY" #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/slab.h> #include <video/omapdss.h> #include "dss.h" #include "dss_features.h" static int num_overlays; static struct omap_overlay *overlays; int omap_dss_get_num_overlays(void) { return num_overlays; } EXPORT_SYMBOL(omap_dss_get_num_overlays); struct omap_overlay *omap_dss_get_overlay(int num) { if (num >= num_overlays) return NULL; return &overlays[num]; } EXPORT_SYMBOL(omap_dss_get_overlay); void dss_init_overlays(struct platform_device *pdev) { int i, r; num_overlays = dss_feat_get_num_ovls(); overlays = kzalloc(sizeof(struct omap_overlay) * num_overlays, GFP_KERNEL); BUG_ON(overlays == NULL); for (i = 0; i < num_overlays; ++i) { struct omap_overlay *ovl = &overlays[i]; switch (i) { case 0: ovl->name = "gfx"; ovl->id = OMAP_DSS_GFX; break; case 1: ovl->name = "vid1"; ovl->id = OMAP_DSS_VIDEO1; break; case 2: ovl->name = "vid2"; ovl->id = OMAP_DSS_VIDEO2; break; case 3: ovl->name = "vid3"; ovl->id = OMAP_DSS_VIDEO3; break; } ovl->caps = dss_feat_get_overlay_caps(ovl->id); ovl->supported_modes = dss_feat_get_supported_color_modes(ovl->id); r = dss_overlay_kobj_init(ovl, pdev); if (r) DSSERR("failed to create sysfs file\n"); } } void dss_uninit_overlays(struct platform_device *pdev) { int i; for (i = 0; i < num_overlays; ++i) { struct omap_overlay *ovl = &overlays[i]; dss_overlay_kobj_uninit(ovl); } kfree(overlays); overlays = NULL; num_overlays = 0; } int dss_ovl_simple_check(struct omap_overlay *ovl, const struct omap_overlay_info *info) { if (info->paddr == 0) { DSSERR("check_overlay: paddr cannot be 0\n"); return -EINVAL; } if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { if (info->out_width != 0 && info->width != info->out_width) { DSSERR("check_overlay: overlay %d doesn't support " "scaling\n", ovl->id); return -EINVAL; } if (info->out_height != 0 && info->height != info->out_height) { DSSERR("check_overlay: overlay %d doesn't support " "scaling\n", ovl->id); return -EINVAL; } } if ((ovl->supported_modes & info->color_mode) == 0) { DSSERR("check_overlay: overlay %d doesn't support mode %d\n", ovl->id, info->color_mode); return -EINVAL; } if (info->zorder >= omap_dss_get_num_overlays()) { DSSERR("check_overlay: zorder %d too high\n", info->zorder); return -EINVAL; } if (dss_feat_rotation_type_supported(info->rotation_type) == 0) { DSSERR("check_overlay: rotation type %d not supported\n", info->rotation_type); return -EINVAL; } return 0; } int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, const struct omap_video_timings *mgr_timings) { u16 outw, outh; u16 dw, dh; dw = mgr_timings->x_res; dh = mgr_timings->y_res; if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { outw = info->width; outh = info->height; } else { if (info->out_width == 0) outw = info->width; else outw = info->out_width; if (info->out_height == 0) outh = info->height; else outh = info->out_height; } if (dw < info->pos_x + outw) { DSSERR("overlay %d horizontally not inside the display area " "(%d + %d >= %d)\n", ovl->id, info->pos_x, outw, dw); return -EINVAL; } if (dh < info->pos_y + outh) { DSSERR("overlay %d vertically not inside the display area " "(%d + %d >= %d)\n", ovl->id, info->pos_y, outh, dh); return -EINVAL; } return 0; } /* * Checks if replication logic should be used. Only use when overlay is in * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp */ bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, enum omap_color_mode mode) { if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) return false; return config.video_port_width > 16; }
gpl-2.0
CyanogenMod/android_kernel_moto_shamu
drivers/clk/versatile/clk-impd1.c
2480
2314
/* * Clock driver for the ARM Integrator/IM-PD1 board * Copyright (C) 2012 Linus Walleij * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_data/clk-integrator.h> #include <mach/impd1.h> #include "clk-icst.h" struct impd1_clk { struct clk *vcoclk; struct clk *uartclk; struct clk_lookup *clks[3]; }; static struct impd1_clk impd1_clks[4]; /* * There are two VCO's on the IM-PD1 but only one is used by the * kernel, that is why we are only implementing the control of * IMPD1_OSC1 here. */ static const struct icst_params impd1_vco_params = { .ref = 24000000, /* 24 MHz */ .vco_max = ICST525_VCO_MAX_3V, .vco_min = ICST525_VCO_MIN, .vd_min = 12, .vd_max = 519, .rd_min = 3, .rd_max = 120, .s2div = icst525_s2div, .idx2s = icst525_idx2s, }; static const struct clk_icst_desc impd1_icst1_desc = { .params = &impd1_vco_params, .vco_offset = IMPD1_OSC1, .lock_offset = IMPD1_LOCK, }; /** * integrator_impd1_clk_init() - set up the integrator clock tree * @base: base address of the logic module (LM) * @id: the ID of this LM */ void integrator_impd1_clk_init(void __iomem *base, unsigned int id) { struct impd1_clk *imc; struct clk *clk; int i; if (id > 3) { pr_crit("no more than 4 LMs can be attached\n"); return; } imc = &impd1_clks[id]; clk = icst_clk_register(NULL, &impd1_icst1_desc, base); imc->vcoclk = clk; imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id); /* UART reference clock */ clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT, 14745600); imc->uartclk = clk; imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:00100", id); imc->clks[2] = clkdev_alloc(clk, NULL, "lm%x:00200", id); for (i = 0; i < ARRAY_SIZE(imc->clks); i++) clkdev_add(imc->clks[i]); } void integrator_impd1_clk_exit(unsigned int id) { int i; struct impd1_clk *imc; if (id > 3) return; imc = &impd1_clks[id]; for (i = 0; i < ARRAY_SIZE(imc->clks); i++) clkdev_drop(imc->clks[i]); clk_unregister(imc->uartclk); clk_unregister(imc->vcoclk); }
gpl-2.0
denghl/linux3.x
net/bridge/br_stp.c
2480
13317
/* * Spanning tree protocol; generic parts * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/rculist.h> #include "br_private.h" #include "br_private_stp.h" /* since time values in bpdu are in jiffies and then scaled (1/256) * before sending, make sure that is at least one STP tick. */ #define MESSAGE_AGE_INCR ((HZ / 256) + 1) static const char *const br_port_state_names[] = { [BR_STATE_DISABLED] = "disabled", [BR_STATE_LISTENING] = "listening", [BR_STATE_LEARNING] = "learning", [BR_STATE_FORWARDING] = "forwarding", [BR_STATE_BLOCKING] = "blocking", }; void br_log_state(const struct net_bridge_port *p) { br_info(p->br, "port %u(%s) entered %s state\n", (unsigned int) p->port_no, p->dev->name, br_port_state_names[p->state]); } /* called under bridge lock */ struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no) { struct net_bridge_port *p; list_for_each_entry_rcu(p, &br->port_list, list) { if (p->port_no == port_no) return p; } return NULL; } /* called under bridge lock */ static int br_should_become_root_port(const struct net_bridge_port *p, u16 root_port) { struct net_bridge *br; struct net_bridge_port *rp; int t; br = p->br; if (p->state == BR_STATE_DISABLED || br_is_designated_port(p)) return 0; if (memcmp(&br->bridge_id, &p->designated_root, 8) <= 0) return 0; if (!root_port) return 1; rp = br_get_port(br, root_port); t = memcmp(&p->designated_root, &rp->designated_root, 8); if (t < 0) return 1; else if (t > 0) return 0; if (p->designated_cost + p->path_cost < rp->designated_cost + rp->path_cost) return 1; else if (p->designated_cost + p->path_cost > rp->designated_cost + rp->path_cost) return 0; t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8); if (t < 0) return 1; else if (t > 0) return 0; if (p->designated_port < rp->designated_port) return 1; else if (p->designated_port > rp->designated_port) return 0; if (p->port_id < rp->port_id) return 1; return 0; } static void br_root_port_block(const struct net_bridge *br, struct net_bridge_port *p) { br_notice(br, "port %u(%s) tried to become root port (blocked)", (unsigned int) p->port_no, p->dev->name); p->state = BR_STATE_LISTENING; br_log_state(p); br_ifinfo_notify(RTM_NEWLINK, p); if (br->forward_delay > 0) mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); } /* called under bridge lock */ static void br_root_selection(struct net_bridge *br) { struct net_bridge_port *p; u16 root_port = 0; list_for_each_entry(p, &br->port_list, list) { if (!br_should_become_root_port(p, root_port)) continue; if (p->flags & BR_ROOT_BLOCK) br_root_port_block(br, p); else root_port = p->port_no; } br->root_port = root_port; if (!root_port) { br->designated_root = br->bridge_id; br->root_path_cost = 0; } else { p = br_get_port(br, root_port); br->designated_root = p->designated_root; br->root_path_cost = p->designated_cost + p->path_cost; } } /* called under bridge lock */ void br_become_root_bridge(struct net_bridge *br) { br->max_age = br->bridge_max_age; br->hello_time = br->bridge_hello_time; br->forward_delay = br->bridge_forward_delay; br_topology_change_detection(br); del_timer(&br->tcn_timer); if (br->dev->flags & IFF_UP) { br_config_bpdu_generation(br); mod_timer(&br->hello_timer, jiffies + br->hello_time); } } /* called under bridge lock */ void br_transmit_config(struct net_bridge_port *p) { struct br_config_bpdu bpdu; struct net_bridge *br; if (timer_pending(&p->hold_timer)) { p->config_pending = 1; return; } br = p->br; bpdu.topology_change = br->topology_change; bpdu.topology_change_ack = p->topology_change_ack; bpdu.root = br->designated_root; bpdu.root_path_cost = br->root_path_cost; bpdu.bridge_id = br->bridge_id; bpdu.port_id = p->port_id; if (br_is_root_bridge(br)) bpdu.message_age = 0; else { struct net_bridge_port *root = br_get_port(br, br->root_port); bpdu.message_age = (jiffies - root->designated_age) + MESSAGE_AGE_INCR; } bpdu.max_age = br->max_age; bpdu.hello_time = br->hello_time; bpdu.forward_delay = br->forward_delay; if (bpdu.message_age < br->max_age) { br_send_config_bpdu(p, &bpdu); p->topology_change_ack = 0; p->config_pending = 0; mod_timer(&p->hold_timer, round_jiffies(jiffies + BR_HOLD_TIME)); } } /* called under bridge lock */ static void br_record_config_information(struct net_bridge_port *p, const struct br_config_bpdu *bpdu) { p->designated_root = bpdu->root; p->designated_cost = bpdu->root_path_cost; p->designated_bridge = bpdu->bridge_id; p->designated_port = bpdu->port_id; p->designated_age = jiffies - bpdu->message_age; mod_timer(&p->message_age_timer, jiffies + (bpdu->max_age - bpdu->message_age)); } /* called under bridge lock */ static void br_record_config_timeout_values(struct net_bridge *br, const struct br_config_bpdu *bpdu) { br->max_age = bpdu->max_age; br->hello_time = bpdu->hello_time; br->forward_delay = bpdu->forward_delay; br->topology_change = bpdu->topology_change; } /* called under bridge lock */ void br_transmit_tcn(struct net_bridge *br) { struct net_bridge_port *p; p = br_get_port(br, br->root_port); if (p) br_send_tcn_bpdu(p); else br_notice(br, "root port %u not found for topology notice\n", br->root_port); } /* called under bridge lock */ static int br_should_become_designated_port(const struct net_bridge_port *p) { struct net_bridge *br; int t; br = p->br; if (br_is_designated_port(p)) return 1; if (memcmp(&p->designated_root, &br->designated_root, 8)) return 1; if (br->root_path_cost < p->designated_cost) return 1; else if (br->root_path_cost > p->designated_cost) return 0; t = memcmp(&br->bridge_id, &p->designated_bridge, 8); if (t < 0) return 1; else if (t > 0) return 0; if (p->port_id < p->designated_port) return 1; return 0; } /* called under bridge lock */ static void br_designated_port_selection(struct net_bridge *br) { struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) { if (p->state != BR_STATE_DISABLED && br_should_become_designated_port(p)) br_become_designated_port(p); } } /* called under bridge lock */ static int br_supersedes_port_info(const struct net_bridge_port *p, const struct br_config_bpdu *bpdu) { int t; t = memcmp(&bpdu->root, &p->designated_root, 8); if (t < 0) return 1; else if (t > 0) return 0; if (bpdu->root_path_cost < p->designated_cost) return 1; else if (bpdu->root_path_cost > p->designated_cost) return 0; t = memcmp(&bpdu->bridge_id, &p->designated_bridge, 8); if (t < 0) return 1; else if (t > 0) return 0; if (memcmp(&bpdu->bridge_id, &p->br->bridge_id, 8)) return 1; if (bpdu->port_id <= p->designated_port) return 1; return 0; } /* called under bridge lock */ static void br_topology_change_acknowledged(struct net_bridge *br) { br->topology_change_detected = 0; del_timer(&br->tcn_timer); } /* called under bridge lock */ void br_topology_change_detection(struct net_bridge *br) { int isroot = br_is_root_bridge(br); if (br->stp_enabled != BR_KERNEL_STP) return; br_info(br, "topology change detected, %s\n", isroot ? "propagating" : "sending tcn bpdu"); if (isroot) { br->topology_change = 1; mod_timer(&br->topology_change_timer, jiffies + br->bridge_forward_delay + br->bridge_max_age); } else if (!br->topology_change_detected) { br_transmit_tcn(br); mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time); } br->topology_change_detected = 1; } /* called under bridge lock */ void br_config_bpdu_generation(struct net_bridge *br) { struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) { if (p->state != BR_STATE_DISABLED && br_is_designated_port(p)) br_transmit_config(p); } } /* called under bridge lock */ static void br_reply(struct net_bridge_port *p) { br_transmit_config(p); } /* called under bridge lock */ void br_configuration_update(struct net_bridge *br) { br_root_selection(br); br_designated_port_selection(br); } /* called under bridge lock */ void br_become_designated_port(struct net_bridge_port *p) { struct net_bridge *br; br = p->br; p->designated_root = br->designated_root; p->designated_cost = br->root_path_cost; p->designated_bridge = br->bridge_id; p->designated_port = p->port_id; } /* called under bridge lock */ static void br_make_blocking(struct net_bridge_port *p) { if (p->state != BR_STATE_DISABLED && p->state != BR_STATE_BLOCKING) { if (p->state == BR_STATE_FORWARDING || p->state == BR_STATE_LEARNING) br_topology_change_detection(p->br); p->state = BR_STATE_BLOCKING; br_log_state(p); br_ifinfo_notify(RTM_NEWLINK, p); del_timer(&p->forward_delay_timer); } } /* called under bridge lock */ static void br_make_forwarding(struct net_bridge_port *p) { struct net_bridge *br = p->br; if (p->state != BR_STATE_BLOCKING) return; if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) { p->state = BR_STATE_FORWARDING; br_topology_change_detection(br); del_timer(&p->forward_delay_timer); } else if (br->stp_enabled == BR_KERNEL_STP) p->state = BR_STATE_LISTENING; else p->state = BR_STATE_LEARNING; br_multicast_enable_port(p); br_log_state(p); br_ifinfo_notify(RTM_NEWLINK, p); if (br->forward_delay != 0) mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); } /* called under bridge lock */ void br_port_state_selection(struct net_bridge *br) { struct net_bridge_port *p; unsigned int liveports = 0; list_for_each_entry(p, &br->port_list, list) { if (p->state == BR_STATE_DISABLED) continue; /* Don't change port states if userspace is handling STP */ if (br->stp_enabled != BR_USER_STP) { if (p->port_no == br->root_port) { p->config_pending = 0; p->topology_change_ack = 0; br_make_forwarding(p); } else if (br_is_designated_port(p)) { del_timer(&p->message_age_timer); br_make_forwarding(p); } else { p->config_pending = 0; p->topology_change_ack = 0; br_make_blocking(p); } } if (p->state == BR_STATE_FORWARDING) ++liveports; } if (liveports == 0) netif_carrier_off(br->dev); else netif_carrier_on(br->dev); } /* called under bridge lock */ static void br_topology_change_acknowledge(struct net_bridge_port *p) { p->topology_change_ack = 1; br_transmit_config(p); } /* called under bridge lock */ void br_received_config_bpdu(struct net_bridge_port *p, const struct br_config_bpdu *bpdu) { struct net_bridge *br; int was_root; br = p->br; was_root = br_is_root_bridge(br); if (br_supersedes_port_info(p, bpdu)) { br_record_config_information(p, bpdu); br_configuration_update(br); br_port_state_selection(br); if (!br_is_root_bridge(br) && was_root) { del_timer(&br->hello_timer); if (br->topology_change_detected) { del_timer(&br->topology_change_timer); br_transmit_tcn(br); mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time); } } if (p->port_no == br->root_port) { br_record_config_timeout_values(br, bpdu); br_config_bpdu_generation(br); if (bpdu->topology_change_ack) br_topology_change_acknowledged(br); } } else if (br_is_designated_port(p)) { br_reply(p); } } /* called under bridge lock */ void br_received_tcn_bpdu(struct net_bridge_port *p) { if (br_is_designated_port(p)) { br_info(p->br, "port %u(%s) received tcn bpdu\n", (unsigned int) p->port_no, p->dev->name); br_topology_change_detection(p->br); br_topology_change_acknowledge(p); } } /* Change bridge STP parameter */ int br_set_hello_time(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME) return -ERANGE; spin_lock_bh(&br->lock); br->bridge_hello_time = t; if (br_is_root_bridge(br)) br->hello_time = br->bridge_hello_time; spin_unlock_bh(&br->lock); return 0; } int br_set_max_age(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE) return -ERANGE; spin_lock_bh(&br->lock); br->bridge_max_age = t; if (br_is_root_bridge(br)) br->max_age = br->bridge_max_age; spin_unlock_bh(&br->lock); return 0; } void __br_set_forward_delay(struct net_bridge *br, unsigned long t) { br->bridge_forward_delay = t; if (br_is_root_bridge(br)) br->forward_delay = br->bridge_forward_delay; } int br_set_forward_delay(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); int err = -ERANGE; spin_lock_bh(&br->lock); if (br->stp_enabled != BR_NO_STP && (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) goto unlock; __br_set_forward_delay(br, t); err = 0; unlock: spin_unlock_bh(&br->lock); return err; }
gpl-2.0
estiko/android_kernel_lenovo_a706_xtremeuv
drivers/gpu/drm/radeon/radeon_display.c
3248
49104
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #include <asm/div64.h> #include "drm_crtc_helper.h" #include "drm_edid.h" static void avivo_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int i; DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); WREG32(AVIVO_DC_LUT_RW_MODE, 0); WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); WREG8(AVIVO_DC_LUT_RW_INDEX, 0); for (i = 0; i < 256; i++) { WREG32(AVIVO_DC_LUT_30_COLOR, (radeon_crtc->lut_r[i] << 20) | (radeon_crtc->lut_g[i] << 10) | (radeon_crtc->lut_b[i] << 0)); } WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); } static void dce4_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int i; DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); for (i = 0; i < 256; i++) { WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, (radeon_crtc->lut_r[i] << 20) | (radeon_crtc->lut_g[i] << 10) | (radeon_crtc->lut_b[i] << 0)); } } static void dce5_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int i; DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, NI_GRPH_PRESCALE_BYPASS); WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, NI_OVL_PRESCALE_BYPASS); WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); for (i = 0; i < 256; i++) { WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, (radeon_crtc->lut_r[i] << 20) | (radeon_crtc->lut_g[i] << 10) | (radeon_crtc->lut_b[i] << 0)); } WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); /* XXX match this to the depth of the crtc fmt block, move to modeset? */ WREG32(0x6940 + radeon_crtc->crtc_offset, 0); } static void legacy_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; int i; uint32_t dac2_cntl; dac2_cntl = RREG32(RADEON_DAC_CNTL2); if (radeon_crtc->crtc_id == 0) dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; else dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; WREG32(RADEON_DAC_CNTL2, dac2_cntl); WREG8(RADEON_PALETTE_INDEX, 0); for (i = 0; i < 256; i++) { WREG32(RADEON_PALETTE_30_DATA, (radeon_crtc->lut_r[i] << 20) | (radeon_crtc->lut_g[i] << 10) | (radeon_crtc->lut_b[i] << 0)); } } void radeon_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; if (!crtc->enabled) return; if (ASIC_IS_DCE5(rdev)) dce5_crtc_load_lut(crtc); else if (ASIC_IS_DCE4(rdev)) dce4_crtc_load_lut(crtc); else if (ASIC_IS_AVIVO(rdev)) avivo_crtc_load_lut(crtc); else legacy_crtc_load_lut(crtc); } /** Sets the color ramps on behalf of fbcon */ void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); radeon_crtc->lut_r[regno] = red >> 6; radeon_crtc->lut_g[regno] = green >> 6; radeon_crtc->lut_b[regno] = blue >> 6; } /** Gets the color ramps on behalf of fbcon */ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); *red = radeon_crtc->lut_r[regno] << 6; *green = radeon_crtc->lut_g[regno] << 6; *blue = radeon_crtc->lut_b[regno] << 6; } static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); int end = (start + size > 256) ? 256 : start + size, i; /* userspace palettes are always correct as is */ for (i = start; i < end; i++) { radeon_crtc->lut_r[i] = red[i] >> 6; radeon_crtc->lut_g[i] = green[i] >> 6; radeon_crtc->lut_b[i] = blue[i] >> 6; } radeon_crtc_load_lut(crtc); } static void radeon_crtc_destroy(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); drm_crtc_cleanup(crtc); kfree(radeon_crtc); } /* * Handle unpin events outside the interrupt handler proper. */ static void radeon_unpin_work_func(struct work_struct *__work) { struct radeon_unpin_work *work = container_of(__work, struct radeon_unpin_work, work); int r; /* unpin of the old buffer */ r = radeon_bo_reserve(work->old_rbo, false); if (likely(r == 0)) { r = radeon_bo_unpin(work->old_rbo); if (unlikely(r != 0)) { DRM_ERROR("failed to unpin buffer after flip\n"); } radeon_bo_unreserve(work->old_rbo); } else DRM_ERROR("failed to reserve buffer after flip\n"); drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); kfree(work); } void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; struct radeon_unpin_work *work; struct drm_pending_vblank_event *e; struct timeval now; unsigned long flags; u32 update_pending; int vpos, hpos; spin_lock_irqsave(&rdev->ddev->event_lock, flags); work = radeon_crtc->unpin_work; if (work == NULL || (work->fence && !radeon_fence_signaled(work->fence))) { spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } /* New pageflip, or just completion of a previous one? */ if (!radeon_crtc->deferred_flip_completion) { /* do the flip (mmio) */ update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); } else { /* This is just a completion of a flip queued in crtc * at last invocation. Make sure we go directly to * completion routine. */ update_pending = 0; radeon_crtc->deferred_flip_completion = 0; } /* Has the pageflip already completed in crtc, or is it certain * to complete in this vblank? */ if (update_pending && (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, &vpos, &hpos)) && ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { /* crtc didn't flip in this target vblank interval, * but flip is pending in crtc. Based on the current * scanout position we know that the current frame is * (nearly) complete and the flip will (likely) * complete before the start of the next frame. */ update_pending = 0; } if (update_pending) { /* crtc didn't flip in this target vblank interval, * but flip is pending in crtc. It will complete it * in next vblank interval, so complete the flip at * next vblank irq. */ radeon_crtc->deferred_flip_completion = 1; spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } /* Pageflip (will be) certainly completed in this vblank. Clean up. */ radeon_crtc->unpin_work = NULL; /* wakeup userspace */ if (work->event) { e = work->event; e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); } spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); radeon_fence_unref(&work->fence); radeon_post_page_flip(work->rdev, work->crtc_id); schedule_work(&work->work); } static int radeon_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_framebuffer *old_radeon_fb; struct radeon_framebuffer *new_radeon_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; struct radeon_unpin_work *work; unsigned long flags; u32 tiling_flags, pitch_pixels; u64 base; int r; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) return -ENOMEM; work->event = event; work->rdev = rdev; work->crtc_id = radeon_crtc->crtc_id; old_radeon_fb = to_radeon_framebuffer(crtc->fb); new_radeon_fb = to_radeon_framebuffer(fb); /* schedule unpin of the old buffer */ obj = old_radeon_fb->obj; /* take a reference to the old object */ drm_gem_object_reference(obj); rbo = gem_to_radeon_bo(obj); work->old_rbo = rbo; obj = new_radeon_fb->obj; rbo = gem_to_radeon_bo(obj); if (rbo->tbo.sync_obj) work->fence = radeon_fence_ref(rbo->tbo.sync_obj); INIT_WORK(&work->work, radeon_unpin_work_func); /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (radeon_crtc->unpin_work) { DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); r = -EBUSY; goto unlock_free; } radeon_crtc->unpin_work = work; radeon_crtc->deferred_flip_completion = 0; spin_unlock_irqrestore(&dev->event_lock, flags); /* pin the new buffer */ DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", work->old_rbo, rbo); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) { DRM_ERROR("failed to reserve new rbo buffer before flip\n"); goto pflip_cleanup; } /* Only 27 bit offset for legacy CRTC */ r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto pflip_cleanup; } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); if (!ASIC_IS_AVIVO(rdev)) { /* crtc offset is from display base addr not FB location */ base -= radeon_crtc->legacy_display_base_addr; pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8); if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) { base &= ~0x7ff; } else { int byteshift = fb->bits_per_pixel >> 4; int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); } } else { int offset = crtc->y * pitch_pixels + crtc->x; switch (fb->bits_per_pixel) { case 8: default: offset *= 1; break; case 15: case 16: offset *= 2; break; case 24: offset *= 3; break; case 32: offset *= 4; break; } base += offset; } base &= ~7; } spin_lock_irqsave(&dev->event_lock, flags); work->new_crtc_base = base; spin_unlock_irqrestore(&dev->event_lock, flags); /* update crtc fb */ crtc->fb = fb; r = drm_vblank_get(dev, radeon_crtc->crtc_id); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup1; } /* set the proper interrupt */ radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); return 0; pflip_cleanup1: if (unlikely(radeon_bo_reserve(rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); goto pflip_cleanup; } if (unlikely(radeon_bo_unpin(rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } radeon_bo_unreserve(rbo); pflip_cleanup: spin_lock_irqsave(&dev->event_lock, flags); radeon_crtc->unpin_work = NULL; unlock_free: spin_unlock_irqrestore(&dev->event_lock, flags); drm_gem_object_unreference_unlocked(old_radeon_fb->obj); radeon_fence_unref(&work->fence); kfree(work); return r; } static const struct drm_crtc_funcs radeon_crtc_funcs = { .cursor_set = radeon_crtc_cursor_set, .cursor_move = radeon_crtc_cursor_move, .gamma_set = radeon_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = radeon_crtc_destroy, .page_flip = radeon_crtc_page_flip, }; static void radeon_crtc_init(struct drm_device *dev, int index) { struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc; int i; radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); if (radeon_crtc == NULL) return; drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; rdev->mode_info.crtcs[index] = radeon_crtc; #if 0 radeon_crtc->mode_set.crtc = &radeon_crtc->base; radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); radeon_crtc->mode_set.num_connectors = 0; #endif for (i = 0; i < 256; i++) { radeon_crtc->lut_r[i] = i << 2; radeon_crtc->lut_g[i] = i << 2; radeon_crtc->lut_b[i] = i << 2; } if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) radeon_atombios_init_crtc(dev, radeon_crtc); else radeon_legacy_init_crtc(dev, radeon_crtc); } static const char *encoder_names[37] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", "INTERNAL_TMDS2", "INTERNAL_DAC1", "INTERNAL_DAC2", "INTERNAL_SDVOA", "INTERNAL_SDVOB", "SI170B", "CH7303", "CH7301", "INTERNAL_DVO1", "EXTERNAL_SDVOA", "EXTERNAL_SDVOB", "TITFP513", "INTERNAL_LVTM1", "VT1623", "HDMI_SI1930", "HDMI_INTERNAL", "INTERNAL_KLDSCP_TMDS1", "INTERNAL_KLDSCP_DVO1", "INTERNAL_KLDSCP_DAC1", "INTERNAL_KLDSCP_DAC2", "SI178", "MVPU_FPGA", "INTERNAL_DDI", "VT1625", "HDMI_SI1932", "DP_AN9801", "DP_DP501", "INTERNAL_UNIPHY", "INTERNAL_KLDSCP_LVTMA", "INTERNAL_UNIPHY1", "INTERNAL_UNIPHY2", "NUTMEG", "TRAVIS", "INTERNAL_VCE" }; static const char *connector_names[15] = { "Unknown", "VGA", "DVI-I", "DVI-D", "DVI-A", "Composite", "S-video", "LVDS", "Component", "DIN", "DisplayPort", "HDMI-A", "HDMI-B", "TV", "eDP", }; static const char *hpd_names[6] = { "HPD1", "HPD2", "HPD3", "HPD4", "HPD5", "HPD6", }; static void radeon_print_display_setup(struct drm_device *dev) { struct drm_connector *connector; struct radeon_connector *radeon_connector; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; uint32_t devices; int i = 0; DRM_INFO("Radeon Display Connectors\n"); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); DRM_INFO("Connector %d:\n", i); DRM_INFO(" %s\n", connector_names[connector->connector_type]); if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); if (radeon_connector->ddc_bus) { DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", radeon_connector->ddc_bus->rec.mask_clk_reg, radeon_connector->ddc_bus->rec.mask_data_reg, radeon_connector->ddc_bus->rec.a_clk_reg, radeon_connector->ddc_bus->rec.a_data_reg, radeon_connector->ddc_bus->rec.en_clk_reg, radeon_connector->ddc_bus->rec.en_data_reg, radeon_connector->ddc_bus->rec.y_clk_reg, radeon_connector->ddc_bus->rec.y_data_reg); if (radeon_connector->router.ddc_valid) DRM_INFO(" DDC Router 0x%x/0x%x\n", radeon_connector->router.ddc_mux_control_pin, radeon_connector->router.ddc_mux_state); if (radeon_connector->router.cd_valid) DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", radeon_connector->router.cd_mux_control_pin, radeon_connector->router.cd_mux_state); } else { if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || connector->connector_type == DRM_MODE_CONNECTOR_DVII || connector->connector_type == DRM_MODE_CONNECTOR_DVID || connector->connector_type == DRM_MODE_CONNECTOR_DVIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); } DRM_INFO(" Encoders:\n"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); devices = radeon_encoder->devices & radeon_connector->devices; if (devices) { if (devices & ATOM_DEVICE_CRT1_SUPPORT) DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_CRT2_SUPPORT) DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_LCD1_SUPPORT) DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP1_SUPPORT) DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP2_SUPPORT) DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP3_SUPPORT) DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP4_SUPPORT) DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP5_SUPPORT) DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP6_SUPPORT) DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_TV1_SUPPORT) DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_CV_SUPPORT) DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]); } } i++; } } static bool radeon_setup_enc_conn(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; bool ret = false; if (rdev->bios) { if (rdev->is_atom_bios) { ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); if (ret == false) ret = radeon_get_atom_connector_info_from_object_table(dev); } else { ret = radeon_get_legacy_connector_info_from_bios(dev); if (ret == false) ret = radeon_get_legacy_connector_info_from_table(dev); } } else { if (!ASIC_IS_AVIVO(rdev)) ret = radeon_get_legacy_connector_info_from_table(dev); } if (ret) { radeon_setup_encoder_clones(dev); radeon_print_display_setup(dev); } return ret; } int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) { struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; int ret = 0; /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != ENCODER_OBJECT_ID_NONE)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); else if (radeon_connector->ddc_bus && !radeon_connector->edid) radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); } else { if (radeon_connector->ddc_bus && !radeon_connector->edid) radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); } if (!radeon_connector->edid) { if (rdev->is_atom_bios) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) || (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP))) radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } else /* some servers provide a hardcoded edid in rom for KVMs */ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } if (radeon_connector->edid) { drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); return ret; } drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); return 0; } /* avivo */ static void avivo_get_fb_div(struct radeon_pll *pll, u32 target_clock, u32 post_div, u32 ref_div, u32 *fb_div, u32 *frac_fb_div) { u32 tmp = post_div * ref_div; tmp *= target_clock; *fb_div = tmp / pll->reference_freq; *frac_fb_div = tmp % pll->reference_freq; if (*fb_div > pll->max_feedback_div) *fb_div = pll->max_feedback_div; else if (*fb_div < pll->min_feedback_div) *fb_div = pll->min_feedback_div; } static u32 avivo_get_post_div(struct radeon_pll *pll, u32 target_clock) { u32 vco, post_div, tmp; if (pll->flags & RADEON_PLL_USE_POST_DIV) return pll->post_div; if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { if (pll->flags & RADEON_PLL_IS_LCD) vco = pll->lcd_pll_out_min; else vco = pll->pll_out_min; } else { if (pll->flags & RADEON_PLL_IS_LCD) vco = pll->lcd_pll_out_max; else vco = pll->pll_out_max; } post_div = vco / target_clock; tmp = vco % target_clock; if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { if (tmp) post_div++; } else { if (!tmp) post_div--; } if (post_div > pll->max_post_div) post_div = pll->max_post_div; else if (post_div < pll->min_post_div) post_div = pll->min_post_div; return post_div; } #define MAX_TOLERANCE 10 void radeon_compute_pll_avivo(struct radeon_pll *pll, u32 freq, u32 *dot_clock_p, u32 *fb_div_p, u32 *frac_fb_div_p, u32 *ref_div_p, u32 *post_div_p) { u32 target_clock = freq / 10; u32 post_div = avivo_get_post_div(pll, target_clock); u32 ref_div = pll->min_ref_div; u32 fb_div = 0, frac_fb_div = 0, tmp; if (pll->flags & RADEON_PLL_USE_REF_DIV) ref_div = pll->reference_div; if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; if (frac_fb_div >= 5) { frac_fb_div -= 5; frac_fb_div = frac_fb_div / 10; frac_fb_div++; } if (frac_fb_div >= 10) { fb_div++; frac_fb_div = 0; } } else { while (ref_div <= pll->max_ref_div) { avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); if (frac_fb_div >= (pll->reference_freq / 2)) fb_div++; frac_fb_div = 0; tmp = (pll->reference_freq * fb_div) / (post_div * ref_div); tmp = (tmp * 10000) / target_clock; if (tmp > (10000 + MAX_TOLERANCE)) ref_div++; else if (tmp >= (10000 - MAX_TOLERANCE)) break; else ref_div++; } } *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / (ref_div * post_div * 10); *fb_div_p = fb_div; *frac_fb_div_p = frac_fb_div; *ref_div_p = ref_div; *post_div_p = post_div; DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); } /* pre-avivo */ static inline uint32_t radeon_div(uint64_t n, uint32_t d) { uint64_t mod; n += d / 2; mod = do_div(n, d); return n; } void radeon_compute_pll_legacy(struct radeon_pll *pll, uint64_t freq, uint32_t *dot_clock_p, uint32_t *fb_div_p, uint32_t *frac_fb_div_p, uint32_t *ref_div_p, uint32_t *post_div_p) { uint32_t min_ref_div = pll->min_ref_div; uint32_t max_ref_div = pll->max_ref_div; uint32_t min_post_div = pll->min_post_div; uint32_t max_post_div = pll->max_post_div; uint32_t min_fractional_feed_div = 0; uint32_t max_fractional_feed_div = 0; uint32_t best_vco = pll->best_vco; uint32_t best_post_div = 1; uint32_t best_ref_div = 1; uint32_t best_feedback_div = 1; uint32_t best_frac_feedback_div = 0; uint32_t best_freq = -1; uint32_t best_error = 0xffffffff; uint32_t best_vco_diff = 1; uint32_t post_div; u32 pll_out_min, pll_out_max; DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); freq = freq * 1000; if (pll->flags & RADEON_PLL_IS_LCD) { pll_out_min = pll->lcd_pll_out_min; pll_out_max = pll->lcd_pll_out_max; } else { pll_out_min = pll->pll_out_min; pll_out_max = pll->pll_out_max; } if (pll_out_min > 64800) pll_out_min = 64800; if (pll->flags & RADEON_PLL_USE_REF_DIV) min_ref_div = max_ref_div = pll->reference_div; else { while (min_ref_div < max_ref_div-1) { uint32_t mid = (min_ref_div + max_ref_div) / 2; uint32_t pll_in = pll->reference_freq / mid; if (pll_in < pll->pll_in_min) max_ref_div = mid; else if (pll_in > pll->pll_in_max) min_ref_div = mid; else break; } } if (pll->flags & RADEON_PLL_USE_POST_DIV) min_post_div = max_post_div = pll->post_div; if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { min_fractional_feed_div = pll->min_frac_feedback_div; max_fractional_feed_div = pll->max_frac_feedback_div; } for (post_div = max_post_div; post_div >= min_post_div; --post_div) { uint32_t ref_div; if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) continue; /* legacy radeons only have a few post_divs */ if (pll->flags & RADEON_PLL_LEGACY) { if ((post_div == 5) || (post_div == 7) || (post_div == 9) || (post_div == 10) || (post_div == 11) || (post_div == 13) || (post_div == 14) || (post_div == 15)) continue; } for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { uint32_t feedback_div, current_freq = 0, error, vco_diff; uint32_t pll_in = pll->reference_freq / ref_div; uint32_t min_feed_div = pll->min_feedback_div; uint32_t max_feed_div = pll->max_feedback_div + 1; if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) continue; while (min_feed_div < max_feed_div) { uint32_t vco; uint32_t min_frac_feed_div = min_fractional_feed_div; uint32_t max_frac_feed_div = max_fractional_feed_div + 1; uint32_t frac_feedback_div; uint64_t tmp; feedback_div = (min_feed_div + max_feed_div) / 2; tmp = (uint64_t)pll->reference_freq * feedback_div; vco = radeon_div(tmp, ref_div); if (vco < pll_out_min) { min_feed_div = feedback_div + 1; continue; } else if (vco > pll_out_max) { max_feed_div = feedback_div; continue; } while (min_frac_feed_div < max_frac_feed_div) { frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2; tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div; tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; current_freq = radeon_div(tmp, ref_div * post_div); if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { if (freq < current_freq) error = 0xffffffff; else error = freq - current_freq; } else error = abs(current_freq - freq); vco_diff = abs(vco - best_vco); if ((best_vco == 0 && error < best_error) || (best_vco != 0 && ((best_error > 100 && error < best_error - 100) || (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { best_post_div = post_div; best_ref_div = ref_div; best_feedback_div = feedback_div; best_frac_feedback_div = frac_feedback_div; best_freq = current_freq; best_error = error; best_vco_diff = vco_diff; } else if (current_freq == freq) { if (best_freq == -1) { best_post_div = post_div; best_ref_div = ref_div; best_feedback_div = feedback_div; best_frac_feedback_div = frac_feedback_div; best_freq = current_freq; best_error = error; best_vco_diff = vco_diff; } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { best_post_div = post_div; best_ref_div = ref_div; best_feedback_div = feedback_div; best_frac_feedback_div = frac_feedback_div; best_freq = current_freq; best_error = error; best_vco_diff = vco_diff; } } if (current_freq < freq) min_frac_feed_div = frac_feedback_div + 1; else max_frac_feed_div = frac_feedback_div; } if (current_freq < freq) min_feed_div = feedback_div + 1; else max_feed_div = feedback_div; } } } *dot_clock_p = best_freq / 10000; *fb_div_p = best_feedback_div; *frac_fb_div_p = best_frac_feedback_div; *ref_div_p = best_ref_div; *post_div_p = best_post_div; DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n", (long long)freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, best_ref_div, best_post_div); } static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); if (radeon_fb->obj) { drm_gem_object_unreference_unlocked(radeon_fb->obj); } drm_framebuffer_cleanup(fb); kfree(radeon_fb); } static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); return drm_gem_handle_create(file_priv, radeon_fb->obj, handle); } static const struct drm_framebuffer_funcs radeon_fb_funcs = { .destroy = radeon_user_framebuffer_destroy, .create_handle = radeon_user_framebuffer_create_handle, }; int radeon_framebuffer_init(struct drm_device *dev, struct radeon_framebuffer *rfb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; rfb->obj = obj; ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); if (ret) { rfb->obj = NULL; return ret; } drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); return 0; } static struct drm_framebuffer * radeon_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct radeon_framebuffer *radeon_fb; int ret; obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); if (obj == NULL) { dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " "can't create framebuffer\n", mode_cmd->handles[0]); return ERR_PTR(-ENOENT); } radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); if (radeon_fb == NULL) return ERR_PTR(-ENOMEM); ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); if (ret) { kfree(radeon_fb); drm_gem_object_unreference_unlocked(obj); return NULL; } return &radeon_fb->base; } static void radeon_output_poll_changed(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; radeon_fb_output_poll_changed(rdev); } static const struct drm_mode_config_funcs radeon_mode_funcs = { .fb_create = radeon_user_framebuffer_create, .output_poll_changed = radeon_output_poll_changed }; static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = { { 0, "driver" }, { 1, "bios" }, }; static struct drm_prop_enum_list radeon_tv_std_enum_list[] = { { TV_STD_NTSC, "ntsc" }, { TV_STD_PAL, "pal" }, { TV_STD_PAL_M, "pal-m" }, { TV_STD_PAL_60, "pal-60" }, { TV_STD_NTSC_J, "ntsc-j" }, { TV_STD_SCART_PAL, "scart-pal" }, { TV_STD_PAL_CN, "pal-cn" }, { TV_STD_SECAM, "secam" }, }; static struct drm_prop_enum_list radeon_underscan_enum_list[] = { { UNDERSCAN_OFF, "off" }, { UNDERSCAN_ON, "on" }, { UNDERSCAN_AUTO, "auto" }, }; static int radeon_modeset_create_props(struct radeon_device *rdev) { int sz; if (rdev->is_atom_bios) { rdev->mode_info.coherent_mode_property = drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1); if (!rdev->mode_info.coherent_mode_property) return -ENOMEM; } if (!ASIC_IS_AVIVO(rdev)) { sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); rdev->mode_info.tmds_pll_property = drm_property_create_enum(rdev->ddev, 0, "tmds_pll", radeon_tmds_pll_enum_list, sz); } rdev->mode_info.load_detect_property = drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1); if (!rdev->mode_info.load_detect_property) return -ENOMEM; drm_mode_create_scaling_mode_property(rdev->ddev); sz = ARRAY_SIZE(radeon_tv_std_enum_list); rdev->mode_info.tv_std_property = drm_property_create_enum(rdev->ddev, 0, "tv standard", radeon_tv_std_enum_list, sz); sz = ARRAY_SIZE(radeon_underscan_enum_list); rdev->mode_info.underscan_property = drm_property_create_enum(rdev->ddev, 0, "underscan", radeon_underscan_enum_list, sz); rdev->mode_info.underscan_hborder_property = drm_property_create_range(rdev->ddev, 0, "underscan hborder", 0, 128); if (!rdev->mode_info.underscan_hborder_property) return -ENOMEM; rdev->mode_info.underscan_vborder_property = drm_property_create_range(rdev->ddev, 0, "underscan vborder", 0, 128); if (!rdev->mode_info.underscan_vborder_property) return -ENOMEM; return 0; } void radeon_update_display_priority(struct radeon_device *rdev) { /* adjustment options for the display watermarks */ if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { /* set display priority to high for r3xx, rv515 chips * this avoids flickering due to underflow to the * display controllers during heavy acceleration. * Don't force high on rs4xx igp chips as it seems to * affect the sound card. See kernel bug 15982. */ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && !(rdev->flags & RADEON_IS_IGP)) rdev->disp_priority = 2; else rdev->disp_priority = 0; } else rdev->disp_priority = radeon_disp_priority; } int radeon_modeset_init(struct radeon_device *rdev) { int i; int ret; drm_mode_config_init(rdev->ddev); rdev->mode_info.mode_config_initialized = true; rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; if (ASIC_IS_DCE5(rdev)) { rdev->ddev->mode_config.max_width = 16384; rdev->ddev->mode_config.max_height = 16384; } else if (ASIC_IS_AVIVO(rdev)) { rdev->ddev->mode_config.max_width = 8192; rdev->ddev->mode_config.max_height = 8192; } else { rdev->ddev->mode_config.max_width = 4096; rdev->ddev->mode_config.max_height = 4096; } rdev->ddev->mode_config.preferred_depth = 24; rdev->ddev->mode_config.prefer_shadow = 1; rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; ret = radeon_modeset_create_props(rdev); if (ret) { return ret; } /* init i2c buses */ radeon_i2c_init(rdev); /* check combios for a valid hardcoded EDID - Sun servers */ if (!rdev->is_atom_bios) { /* check for hardcoded EDID in BIOS */ radeon_combios_check_hardcoded_edid(rdev); } /* allocate crtcs */ for (i = 0; i < rdev->num_crtc; i++) { radeon_crtc_init(rdev->ddev, i); } /* okay we should have all the bios connectors */ ret = radeon_setup_enc_conn(rdev->ddev); if (!ret) { return ret; } /* init dig PHYs, disp eng pll */ if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); radeon_atom_disp_eng_pll_init(rdev); } /* initialize hpd */ radeon_hpd_init(rdev); /* Initialize power management */ radeon_pm_init(rdev); radeon_fbdev_init(rdev); drm_kms_helper_poll_init(rdev->ddev); return 0; } void radeon_modeset_fini(struct radeon_device *rdev) { radeon_fbdev_fini(rdev); kfree(rdev->mode_info.bios_hardcoded_edid); radeon_pm_fini(rdev); if (rdev->mode_info.mode_config_initialized) { drm_kms_helper_poll_fini(rdev->ddev); radeon_hpd_fini(rdev); drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; } /* free i2c buses */ radeon_i2c_fini(rdev); } static bool is_hdtv_mode(struct drm_display_mode *mode) { /* try and guess if this is a tv or a monitor */ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ (mode->vdisplay == 576) || /* 576p */ (mode->vdisplay == 720) || /* 720p */ (mode->vdisplay == 1080)) /* 1080p */ return true; else return false; } bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_encoder *radeon_encoder; struct drm_connector *connector; struct radeon_connector *radeon_connector; bool first = true; u32 src_v = 1, dst_v = 1; u32 src_h = 1, dst_h = 1; radeon_crtc->h_border = 0; radeon_crtc->v_border = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; radeon_encoder = to_radeon_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder); radeon_connector = to_radeon_connector(connector); if (first) { /* set scaling */ if (radeon_encoder->rmx_type == RMX_OFF) radeon_crtc->rmx_type = RMX_OFF; else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || mode->vdisplay < radeon_encoder->native_mode.vdisplay) radeon_crtc->rmx_type = radeon_encoder->rmx_type; else radeon_crtc->rmx_type = RMX_OFF; /* copy native mode */ memcpy(&radeon_crtc->native_mode, &radeon_encoder->native_mode, sizeof(struct drm_display_mode)); src_v = crtc->mode.vdisplay; dst_v = radeon_crtc->native_mode.vdisplay; src_h = crtc->mode.hdisplay; dst_h = radeon_crtc->native_mode.hdisplay; /* fix up for overscan on hdmi */ if (ASIC_IS_AVIVO(rdev) && (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((radeon_encoder->underscan_type == UNDERSCAN_ON) || ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && drm_detect_hdmi_monitor(radeon_connector->edid) && is_hdtv_mode(mode)))) { if (radeon_encoder->underscan_hborder != 0) radeon_crtc->h_border = radeon_encoder->underscan_hborder; else radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; if (radeon_encoder->underscan_vborder != 0) radeon_crtc->v_border = radeon_encoder->underscan_vborder; else radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; radeon_crtc->rmx_type = RMX_FULL; src_v = crtc->mode.vdisplay; dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); src_h = crtc->mode.hdisplay; dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); } first = false; } else { if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { /* WARNING: Right now this can't happen but * in the future we need to check that scaling * are consistent across different encoder * (ie all encoder can work with the same * scaling). */ DRM_ERROR("Scaling not consistent across encoder.\n"); return false; } } } if (radeon_crtc->rmx_type != RMX_OFF) { fixed20_12 a, b; a.full = dfixed_const(src_v); b.full = dfixed_const(dst_v); radeon_crtc->vsc.full = dfixed_div(a, b); a.full = dfixed_const(src_h); b.full = dfixed_const(dst_h); radeon_crtc->hsc.full = dfixed_div(a, b); } else { radeon_crtc->vsc.full = dfixed_const(1); radeon_crtc->hsc.full = dfixed_const(1); } return true; } /* * Retrieve current video scanout position of crtc on a given gpu. * * \param dev Device to query. * \param crtc Crtc to query. * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. * * Returns vpos as a positive number while in active scanout area. * Returns vpos as a negative number inside vblank, counting the number * of scanlines to go until end of vblank, e.g., -1 means "one scanline * until start of active scanout / end of vblank." * * \return Flags, or'ed together as follows: * * DRM_SCANOUTPOS_VALID = Query successful. * DRM_SCANOUTPOS_INVBL = Inside vblank. * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of * this flag means that returned position may be offset by a constant but * unknown small number of scanlines wrt. real scanout position. * */ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) { u32 stat_crtc = 0, vbl = 0, position = 0; int vbl_start, vbl_end, vtotal, ret = 0; bool in_vbl = true; struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) { if (crtc == 0) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC0_REGISTER_OFFSET); ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC1_REGISTER_OFFSET); ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 2) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC2_REGISTER_OFFSET); ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 3) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC3_REGISTER_OFFSET); ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 4) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC4_REGISTER_OFFSET); ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 5) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC5_REGISTER_OFFSET); ret |= DRM_SCANOUTPOS_VALID; } } else if (ASIC_IS_AVIVO(rdev)) { if (crtc == 0) { vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); ret |= DRM_SCANOUTPOS_VALID; } } else { /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ if (crtc == 0) { /* Assume vbl_end == 0, get vbl_start from * upper 16 bits. */ vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; stat_crtc = RREG32(RADEON_CRTC_STATUS); if (!(stat_crtc & 1)) in_vbl = false; ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; stat_crtc = RREG32(RADEON_CRTC2_STATUS); if (!(stat_crtc & 1)) in_vbl = false; ret |= DRM_SCANOUTPOS_VALID; } } /* Decode into vertical and horizontal scanout position. */ *vpos = position & 0x1fff; *hpos = (position >> 16) & 0x1fff; /* Valid vblank area boundaries from gpu retrieved? */ if (vbl > 0) { /* Yes: Decode. */ ret |= DRM_SCANOUTPOS_ACCURATE; vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; } else { /* No: Fake something reasonable which gives at least ok results. */ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; vbl_end = 0; } /* Test scanout position against vblank region. */ if ((*vpos < vbl_start) && (*vpos >= vbl_end)) in_vbl = false; /* Check if inside vblank area and apply corrective offsets: * vpos will then be >=0 in video scanout area, but negative * within vblank area, counting down the number of lines until * start of scanout. */ /* Inside "upper part" of vblank area? Apply corrective offset if so: */ if (in_vbl && (*vpos >= vbl_start)) { vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; *vpos = *vpos - vtotal; } /* Correct for shifted end of vbl at vbl_end. */ *vpos = *vpos - vbl_end; /* In vblank? */ if (in_vbl) ret |= DRM_SCANOUTPOS_INVBL; return ret; }
gpl-2.0
droidroidz/USCC_R970_kernel_KK
arch/arm/mach-msm/smd_nmea.c
3504
4341
/* Copyright (c) 2008-2009, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * SMD NMEA Driver -- Provides GPS NMEA device to SMD port interface. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/miscdevice.h> #include <linux/workqueue.h> #include <linux/uaccess.h> #include <mach/msm_smd.h> #define MAX_BUF_SIZE 200 static DEFINE_MUTEX(nmea_ch_lock); static DEFINE_MUTEX(nmea_rx_buf_lock); static DECLARE_WAIT_QUEUE_HEAD(nmea_wait_queue); struct nmea_device_t { struct miscdevice misc; struct smd_channel *ch; unsigned char rx_buf[MAX_BUF_SIZE]; unsigned int bytes_read; }; struct nmea_device_t *nmea_devp; static void nmea_work_func(struct work_struct *ws) { int sz; for (;;) { sz = smd_cur_packet_size(nmea_devp->ch); if (sz == 0) break; if (sz > smd_read_avail(nmea_devp->ch)) break; if (sz > MAX_BUF_SIZE) { smd_read(nmea_devp->ch, 0, sz); continue; } mutex_lock(&nmea_rx_buf_lock); if (smd_read(nmea_devp->ch, nmea_devp->rx_buf, sz) != sz) { mutex_unlock(&nmea_rx_buf_lock); printk(KERN_ERR "nmea: not enough data?!\n"); continue; } nmea_devp->bytes_read = sz; mutex_unlock(&nmea_rx_buf_lock); wake_up_interruptible(&nmea_wait_queue); } } struct workqueue_struct *nmea_wq; static DECLARE_WORK(nmea_work, nmea_work_func); static void nmea_notify(void *priv, unsigned event) { switch (event) { case SMD_EVENT_DATA: { int sz; sz = smd_cur_packet_size(nmea_devp->ch); if ((sz > 0) && (sz <= smd_read_avail(nmea_devp->ch))) queue_work(nmea_wq, &nmea_work); break; } case SMD_EVENT_OPEN: printk(KERN_INFO "nmea: smd opened\n"); break; case SMD_EVENT_CLOSE: printk(KERN_INFO "nmea: smd closed\n"); break; } } static ssize_t nmea_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int r; int bytes_read; r = wait_event_interruptible(nmea_wait_queue, nmea_devp->bytes_read); if (r < 0) { /* qualify error message */ if (r != -ERESTARTSYS) { /* we get this anytime a signal comes in */ printk(KERN_ERR "ERROR:%s:%i:%s: " "wait_event_interruptible ret %i\n", __FILE__, __LINE__, __func__, r ); } return r; } mutex_lock(&nmea_rx_buf_lock); bytes_read = nmea_devp->bytes_read; nmea_devp->bytes_read = 0; r = copy_to_user(buf, nmea_devp->rx_buf, bytes_read); mutex_unlock(&nmea_rx_buf_lock); if (r > 0) { printk(KERN_ERR "ERROR:%s:%i:%s: " "copy_to_user could not copy %i bytes.\n", __FILE__, __LINE__, __func__, r); return r; } return bytes_read; } static int nmea_open(struct inode *ip, struct file *fp) { int r = 0; mutex_lock(&nmea_ch_lock); if (nmea_devp->ch == 0) r = smd_open("GPSNMEA", &nmea_devp->ch, nmea_devp, nmea_notify); mutex_unlock(&nmea_ch_lock); return r; } static int nmea_release(struct inode *ip, struct file *fp) { int r = 0; mutex_lock(&nmea_ch_lock); if (nmea_devp->ch != 0) { r = smd_close(nmea_devp->ch); nmea_devp->ch = 0; } mutex_unlock(&nmea_ch_lock); return r; } static const struct file_operations nmea_fops = { .owner = THIS_MODULE, .read = nmea_read, .open = nmea_open, .release = nmea_release, }; static struct nmea_device_t nmea_device = { .misc = { .minor = MISC_DYNAMIC_MINOR, .name = "nmea", .fops = &nmea_fops, } }; static void __exit nmea_exit(void) { destroy_workqueue(nmea_wq); misc_deregister(&nmea_device.misc); } static int __init nmea_init(void) { int ret; nmea_device.bytes_read = 0; nmea_devp = &nmea_device; nmea_wq = create_singlethread_workqueue("nmea"); if (nmea_wq == 0) return -ENOMEM; ret = misc_register(&nmea_device.misc); return ret; } module_init(nmea_init); module_exit(nmea_exit); MODULE_DESCRIPTION("MSM Shared Memory NMEA Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
lollipop-og/hellsgod-kernel
drivers/media/video/adp1653.c
4784
12811
/* * drivers/media/video/adp1653.c * * Copyright (C) 2008--2011 Nokia Corporation * * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> * * Contributors: * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> * Tuukka Toivonen <tuukkat76@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * TODO: * - fault interrupt handling * - hardware strobe * - power doesn't need to be ON if all lights are off * */ #include <linux/delay.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/version.h> #include <media/adp1653.h> #include <media/v4l2-device.h> #define TIMEOUT_MAX 820000 #define TIMEOUT_STEP 54600 #define TIMEOUT_MIN (TIMEOUT_MAX - ADP1653_REG_CONFIG_TMR_SET_MAX \ * TIMEOUT_STEP) #define TIMEOUT_US_TO_CODE(t) ((TIMEOUT_MAX + (TIMEOUT_STEP / 2) - (t)) \ / TIMEOUT_STEP) #define TIMEOUT_CODE_TO_US(c) (TIMEOUT_MAX - (c) * TIMEOUT_STEP) /* Write values into ADP1653 registers. */ static int adp1653_update_hw(struct adp1653_flash *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); u8 out_sel; u8 config = 0; int rval; out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG( flash->indicator_intensity->val) << ADP1653_REG_OUT_SEL_ILED_SHIFT; switch (flash->led_mode->val) { case V4L2_FLASH_LED_MODE_NONE: break; case V4L2_FLASH_LED_MODE_FLASH: /* Flash mode, light on with strobe, duration from timer */ config = ADP1653_REG_CONFIG_TMR_CFG; config |= TIMEOUT_US_TO_CODE(flash->flash_timeout->val) << ADP1653_REG_CONFIG_TMR_SET_SHIFT; break; case V4L2_FLASH_LED_MODE_TORCH: /* Torch mode, light immediately on, duration indefinite */ out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG( flash->torch_intensity->val) << ADP1653_REG_OUT_SEL_HPLED_SHIFT; break; } rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel); if (rval < 0) return rval; rval = i2c_smbus_write_byte_data(client, ADP1653_REG_CONFIG, config); if (rval < 0) return rval; return 0; } static int adp1653_get_fault(struct adp1653_flash *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int fault; int rval; fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT); if (IS_ERR_VALUE(fault)) return fault; flash->fault |= fault; if (!flash->fault) return 0; /* Clear faults. */ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); if (IS_ERR_VALUE(rval)) return rval; flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE; rval = adp1653_update_hw(flash); if (IS_ERR_VALUE(rval)) return rval; return flash->fault; } static int adp1653_strobe(struct adp1653_flash *flash, int enable) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); u8 out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG( flash->indicator_intensity->val) << ADP1653_REG_OUT_SEL_ILED_SHIFT; int rval; if (flash->led_mode->val != V4L2_FLASH_LED_MODE_FLASH) return -EBUSY; if (!enable) return i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel); out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG( flash->flash_intensity->val) << ADP1653_REG_OUT_SEL_HPLED_SHIFT; rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel); if (rval) return rval; /* Software strobe using i2c */ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, ADP1653_REG_SW_STROBE_SW_STROBE); if (rval) return rval; return i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, 0); } /* -------------------------------------------------------------------------- * V4L2 controls */ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl) { struct adp1653_flash *flash = container_of(ctrl->handler, struct adp1653_flash, ctrls); int rval; rval = adp1653_get_fault(flash); if (IS_ERR_VALUE(rval)) return rval; ctrl->cur.val = 0; if (flash->fault & ADP1653_REG_FAULT_FLT_SCP) ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; if (flash->fault & ADP1653_REG_FAULT_FLT_OT) ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; if (flash->fault & ADP1653_REG_FAULT_FLT_TMR) ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT; if (flash->fault & ADP1653_REG_FAULT_FLT_OV) ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE; flash->fault = 0; return 0; } static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl) { struct adp1653_flash *flash = container_of(ctrl->handler, struct adp1653_flash, ctrls); int rval; rval = adp1653_get_fault(flash); if (IS_ERR_VALUE(rval)) return rval; if ((rval & (ADP1653_REG_FAULT_FLT_SCP | ADP1653_REG_FAULT_FLT_OT | ADP1653_REG_FAULT_FLT_OV)) && (ctrl->id == V4L2_CID_FLASH_STROBE || ctrl->id == V4L2_CID_FLASH_TORCH_INTENSITY || ctrl->id == V4L2_CID_FLASH_LED_MODE)) return -EBUSY; switch (ctrl->id) { case V4L2_CID_FLASH_STROBE: return adp1653_strobe(flash, 1); case V4L2_CID_FLASH_STROBE_STOP: return adp1653_strobe(flash, 0); } return adp1653_update_hw(flash); } static const struct v4l2_ctrl_ops adp1653_ctrl_ops = { .g_volatile_ctrl = adp1653_get_ctrl, .s_ctrl = adp1653_set_ctrl, }; static int adp1653_init_controls(struct adp1653_flash *flash) { struct v4l2_ctrl *fault; v4l2_ctrl_handler_init(&flash->ctrls, 9); flash->led_mode = v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_LED_MODE, V4L2_FLASH_LED_MODE_TORCH, ~0x7, 0); v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_STROBE_SOURCE, V4L2_FLASH_STROBE_SOURCE_SOFTWARE, ~0x1, 0); v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0); v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0); flash->flash_timeout = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_TIMEOUT, TIMEOUT_MIN, flash->platform_data->max_flash_timeout, TIMEOUT_STEP, flash->platform_data->max_flash_timeout); flash->flash_intensity = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_INTENSITY, ADP1653_FLASH_INTENSITY_MIN, flash->platform_data->max_flash_intensity, 1, flash->platform_data->max_flash_intensity); flash->torch_intensity = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_TORCH_INTENSITY, ADP1653_TORCH_INTENSITY_MIN, flash->platform_data->max_torch_intensity, ADP1653_FLASH_INTENSITY_STEP, flash->platform_data->max_torch_intensity); flash->indicator_intensity = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_INDICATOR_INTENSITY, ADP1653_INDICATOR_INTENSITY_MIN, flash->platform_data->max_indicator_intensity, ADP1653_INDICATOR_INTENSITY_STEP, ADP1653_INDICATOR_INTENSITY_MIN); fault = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops, V4L2_CID_FLASH_FAULT, 0, V4L2_FLASH_FAULT_OVER_VOLTAGE | V4L2_FLASH_FAULT_OVER_TEMPERATURE | V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0); if (flash->ctrls.error) return flash->ctrls.error; fault->flags |= V4L2_CTRL_FLAG_VOLATILE; flash->subdev.ctrl_handler = &flash->ctrls; return 0; } /* -------------------------------------------------------------------------- * V4L2 subdev operations */ static int adp1653_init_device(struct adp1653_flash *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev); int rval; /* Clear FAULT register by writing zero to OUT_SEL */ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); if (rval < 0) { dev_err(&client->dev, "failed writing fault register\n"); return -EIO; } mutex_lock(&flash->ctrls.lock); /* Reset faults before reading new ones. */ flash->fault = 0; rval = adp1653_get_fault(flash); mutex_unlock(&flash->ctrls.lock); if (rval > 0) { dev_err(&client->dev, "faults detected: 0x%1.1x\n", rval); return -EIO; } mutex_lock(&flash->ctrls.lock); rval = adp1653_update_hw(flash); mutex_unlock(&flash->ctrls.lock); if (rval) { dev_err(&client->dev, "adp1653_update_hw failed at %s\n", __func__); return -EIO; } return 0; } static int __adp1653_set_power(struct adp1653_flash *flash, int on) { int ret; ret = flash->platform_data->power(&flash->subdev, on); if (ret < 0) return ret; if (!on) return 0; ret = adp1653_init_device(flash); if (ret < 0) flash->platform_data->power(&flash->subdev, 0); return ret; } static int adp1653_set_power(struct v4l2_subdev *subdev, int on) { struct adp1653_flash *flash = to_adp1653_flash(subdev); int ret = 0; mutex_lock(&flash->power_lock); /* If the power count is modified from 0 to != 0 or from != 0 to 0, * update the power state. */ if (flash->power_count == !on) { ret = __adp1653_set_power(flash, !!on); if (ret < 0) goto done; } /* Update the power count. */ flash->power_count += on ? 1 : -1; WARN_ON(flash->power_count < 0); done: mutex_unlock(&flash->power_lock); return ret; } static int adp1653_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return adp1653_set_power(sd, 1); } static int adp1653_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return adp1653_set_power(sd, 0); } static const struct v4l2_subdev_core_ops adp1653_core_ops = { .s_power = adp1653_set_power, }; static const struct v4l2_subdev_ops adp1653_ops = { .core = &adp1653_core_ops, }; static const struct v4l2_subdev_internal_ops adp1653_internal_ops = { .open = adp1653_open, .close = adp1653_close, }; /* -------------------------------------------------------------------------- * I2C driver */ #ifdef CONFIG_PM static int adp1653_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct adp1653_flash *flash = to_adp1653_flash(subdev); if (!flash->power_count) return 0; return __adp1653_set_power(flash, 0); } static int adp1653_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct adp1653_flash *flash = to_adp1653_flash(subdev); if (!flash->power_count) return 0; return __adp1653_set_power(flash, 1); } #else #define adp1653_suspend NULL #define adp1653_resume NULL #endif /* CONFIG_PM */ static int adp1653_probe(struct i2c_client *client, const struct i2c_device_id *devid) { struct adp1653_flash *flash; int ret; /* we couldn't work without platform data */ if (client->dev.platform_data == NULL) return -ENODEV; flash = kzalloc(sizeof(*flash), GFP_KERNEL); if (flash == NULL) return -ENOMEM; flash->platform_data = client->dev.platform_data; mutex_init(&flash->power_lock); v4l2_i2c_subdev_init(&flash->subdev, client, &adp1653_ops); flash->subdev.internal_ops = &adp1653_internal_ops; flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ret = adp1653_init_controls(flash); if (ret) goto free_and_quit; ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0); if (ret < 0) goto free_and_quit; flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH; return 0; free_and_quit: v4l2_ctrl_handler_free(&flash->ctrls); kfree(flash); return ret; } static int __exit adp1653_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct adp1653_flash *flash = to_adp1653_flash(subdev); v4l2_device_unregister_subdev(&flash->subdev); v4l2_ctrl_handler_free(&flash->ctrls); media_entity_cleanup(&flash->subdev.entity); kfree(flash); return 0; } static const struct i2c_device_id adp1653_id_table[] = { { ADP1653_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adp1653_id_table); static struct dev_pm_ops adp1653_pm_ops = { .suspend = adp1653_suspend, .resume = adp1653_resume, }; static struct i2c_driver adp1653_i2c_driver = { .driver = { .name = ADP1653_NAME, .pm = &adp1653_pm_ops, }, .probe = adp1653_probe, .remove = __exit_p(adp1653_remove), .id_table = adp1653_id_table, }; module_i2c_driver(adp1653_i2c_driver); MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>"); MODULE_DESCRIPTION("Analog Devices ADP1653 LED flash driver"); MODULE_LICENSE("GPL");
gpl-2.0
thornbirdblue/MI3_kernel_code
arch/s390/hypfs/hypfs_vm.c
7600
6818
/* * Hypervisor filesystem for Linux on s390. z/VM implementation. * * Copyright (C) IBM Corp. 2006 * Author(s): Michael Holzheu <holzheu@de.ibm.com> */ #include <linux/types.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <asm/ebcdic.h> #include <asm/timex.h> #include "hypfs.h" #define NAME_LEN 8 #define DBFS_D2FC_HDR_VERSION 0 static char local_guest[] = " "; static char all_guests[] = "* "; static char *guest_query; struct diag2fc_data { __u32 version; __u32 flags; __u64 used_cpu; __u64 el_time; __u64 mem_min_kb; __u64 mem_max_kb; __u64 mem_share_kb; __u64 mem_used_kb; __u32 pcpus; __u32 lcpus; __u32 vcpus; __u32 cpu_min; __u32 cpu_max; __u32 cpu_shares; __u32 cpu_use_samp; __u32 cpu_delay_samp; __u32 page_wait_samp; __u32 idle_samp; __u32 other_samp; __u32 total_samp; char guest_name[NAME_LEN]; }; struct diag2fc_parm_list { char userid[NAME_LEN]; char aci_grp[NAME_LEN]; __u64 addr; __u32 size; __u32 fmt; }; static int diag2fc(int size, char* query, void *addr) { unsigned long residual_cnt; unsigned long rc; struct diag2fc_parm_list parm_list; memcpy(parm_list.userid, query, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN); parm_list.addr = (unsigned long) addr ; parm_list.size = size; parm_list.fmt = 0x02; memset(parm_list.aci_grp, 0x40, NAME_LEN); rc = -1; asm volatile( " diag %0,%1,0x2fc\n" "0:\n" EX_TABLE(0b,0b) : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory"); if ((rc != 0 ) && (rc != -2)) return rc; else return -residual_cnt; } /* * Allocate buffer for "query" and store diag 2fc at "offset" */ static void *diag2fc_store(char *query, unsigned int *count, int offset) { void *data; int size; do { size = diag2fc(0, query, NULL); if (size < 0) return ERR_PTR(-EACCES); data = vmalloc(size + offset); if (!data) return ERR_PTR(-ENOMEM); if (diag2fc(size, query, data + offset) == 0) break; vfree(data); } while (1); *count = (size / sizeof(struct diag2fc_data)); return data; } static void diag2fc_free(const void *data) { vfree(data); } #define ATTRIBUTE(sb, dir, name, member) \ do { \ void *rc; \ rc = hypfs_create_u64(sb, dir, name, member); \ if (IS_ERR(rc)) \ return PTR_ERR(rc); \ } while(0) static int hpyfs_vm_create_guest(struct super_block *sb, struct dentry *systems_dir, struct diag2fc_data *data) { char guest_name[NAME_LEN + 1] = {}; struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir; int dedicated_flag, capped_value; capped_value = (data->flags & 0x00000006) >> 1; dedicated_flag = (data->flags & 0x00000008) >> 3; /* guest dir */ memcpy(guest_name, data->guest_name, NAME_LEN); EBCASC(guest_name, NAME_LEN); strim(guest_name); guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); if (IS_ERR(guest_dir)) return PTR_ERR(guest_dir); ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time); /* logical cpu information */ cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus"); if (IS_ERR(cpus_dir)) return PTR_ERR(cpus_dir); ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu); ATTRIBUTE(sb, cpus_dir, "capped", capped_value); ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag); ATTRIBUTE(sb, cpus_dir, "count", data->vcpus); ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min); ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max); ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares); /* memory information */ mem_dir = hypfs_mkdir(sb, guest_dir, "mem"); if (IS_ERR(mem_dir)) return PTR_ERR(mem_dir); ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb); ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb); ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb); ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb); /* samples */ samples_dir = hypfs_mkdir(sb, guest_dir, "samples"); if (IS_ERR(samples_dir)) return PTR_ERR(samples_dir); ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp); ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp); ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp); ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp); ATTRIBUTE(sb, samples_dir, "other", data->other_samp); ATTRIBUTE(sb, samples_dir, "total", data->total_samp); return 0; } int hypfs_vm_create_files(struct super_block *sb, struct dentry *root) { struct dentry *dir, *file; struct diag2fc_data *data; unsigned int count = 0; int rc, i; data = diag2fc_store(guest_query, &count, 0); if (IS_ERR(data)) return PTR_ERR(data); /* Hpervisor Info */ dir = hypfs_mkdir(sb, root, "hyp"); if (IS_ERR(dir)) { rc = PTR_ERR(dir); goto failed; } file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor"); if (IS_ERR(file)) { rc = PTR_ERR(file); goto failed; } /* physical cpus */ dir = hypfs_mkdir(sb, root, "cpus"); if (IS_ERR(dir)) { rc = PTR_ERR(dir); goto failed; } file = hypfs_create_u64(sb, dir, "count", data->lcpus); if (IS_ERR(file)) { rc = PTR_ERR(file); goto failed; } /* guests */ dir = hypfs_mkdir(sb, root, "systems"); if (IS_ERR(dir)) { rc = PTR_ERR(dir); goto failed; } for (i = 0; i < count; i++) { rc = hpyfs_vm_create_guest(sb, dir, &(data[i])); if (rc) goto failed; } diag2fc_free(data); return 0; failed: diag2fc_free(data); return rc; } struct dbfs_d2fc_hdr { u64 len; /* Length of d2fc buffer without header */ u16 version; /* Version of header */ char tod_ext[16]; /* TOD clock for d2fc */ u64 count; /* Number of VM guests in d2fc buffer */ char reserved[30]; } __attribute__ ((packed)); struct dbfs_d2fc { struct dbfs_d2fc_hdr hdr; /* 64 byte header */ char buf[]; /* d2fc buffer */ } __attribute__ ((packed)); static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size) { struct dbfs_d2fc *d2fc; unsigned int count; d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); if (IS_ERR(d2fc)) return PTR_ERR(d2fc); get_clock_ext(d2fc->hdr.tod_ext); d2fc->hdr.len = count * sizeof(struct diag2fc_data); d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; d2fc->hdr.count = count; memset(&d2fc->hdr.reserved, 0, sizeof(d2fc->hdr.reserved)); *data = d2fc; *data_free_ptr = d2fc; *size = d2fc->hdr.len + sizeof(struct dbfs_d2fc_hdr); return 0; } static struct hypfs_dbfs_file dbfs_file_2fc = { .name = "diag_2fc", .data_create = dbfs_diag2fc_create, .data_free = diag2fc_free, }; int hypfs_vm_init(void) { if (!MACHINE_IS_VM) return 0; if (diag2fc(0, all_guests, NULL) > 0) guest_query = all_guests; else if (diag2fc(0, local_guest, NULL) > 0) guest_query = local_guest; else return -EACCES; return hypfs_dbfs_create_file(&dbfs_file_2fc); } void hypfs_vm_exit(void) { if (!MACHINE_IS_VM) return; hypfs_dbfs_remove_file(&dbfs_file_2fc); }
gpl-2.0
aospan/linux-next-bcm4708-edgecore-ecw7220-l
net/netfilter/xt_ecn.c
9392
4541
/* * Xtables module for matching the value of the IPv4/IPv6 and TCP ECN bits * * (C) 2002 by Harald Welte <laforge@gnumonks.org> * (C) 2011 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_ecn.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_ecn"); MODULE_ALIAS("ip6t_ecn"); static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_ecn_info *einfo = par->matchinfo; struct tcphdr _tcph; const struct tcphdr *th; /* In practice, TCP match does this, so can't fail. But let's * be good citizens. */ th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph); if (th == NULL) return false; if (einfo->operation & XT_ECN_OP_MATCH_ECE) { if (einfo->invert & XT_ECN_OP_MATCH_ECE) { if (th->ece == 1) return false; } else { if (th->ece == 0) return false; } } if (einfo->operation & XT_ECN_OP_MATCH_CWR) { if (einfo->invert & XT_ECN_OP_MATCH_CWR) { if (th->cwr == 1) return false; } else { if (th->cwr == 0) return false; } } return true; } static inline bool match_ip(const struct sk_buff *skb, const struct xt_ecn_info *einfo) { return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^ !!(einfo->invert & XT_ECN_OP_MATCH_IP); } static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_ecn_info *info = par->matchinfo; if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info)) return false; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && !match_tcp(skb, par)) return false; return true; } static int ecn_mt_check4(const struct xt_mtchk_param *par) { const struct xt_ecn_info *info = par->matchinfo; const struct ipt_ip *ip = par->entryinfo; if (info->operation & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->invert & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { pr_info("cannot match TCP bits in rule for non-tcp packets\n"); return -EINVAL; } return 0; } static inline bool match_ipv6(const struct sk_buff *skb, const struct xt_ecn_info *einfo) { return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) == einfo->ip_ect) ^ !!(einfo->invert & XT_ECN_OP_MATCH_IP); } static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_ecn_info *info = par->matchinfo; if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info)) return false; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && !match_tcp(skb, par)) return false; return true; } static int ecn_mt_check6(const struct xt_mtchk_param *par) { const struct xt_ecn_info *info = par->matchinfo; const struct ip6t_ip6 *ip = par->entryinfo; if (info->operation & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->invert & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { pr_info("cannot match TCP bits in rule for non-tcp packets\n"); return -EINVAL; } return 0; } static struct xt_match ecn_mt_reg[] __read_mostly = { { .name = "ecn", .family = NFPROTO_IPV4, .match = ecn_mt4, .matchsize = sizeof(struct xt_ecn_info), .checkentry = ecn_mt_check4, .me = THIS_MODULE, }, { .name = "ecn", .family = NFPROTO_IPV6, .match = ecn_mt6, .matchsize = sizeof(struct xt_ecn_info), .checkentry = ecn_mt_check6, .me = THIS_MODULE, }, }; static int __init ecn_mt_init(void) { return xt_register_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg)); } static void __exit ecn_mt_exit(void) { xt_unregister_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg)); } module_init(ecn_mt_init); module_exit(ecn_mt_exit);
gpl-2.0