repo_name
string
path
string
copies
string
size
string
content
string
license
string
GenetICS/lge_kernel_msm7x27
drivers/ssb/sprom.c
2416
5355
/* * Sonics Silicon Backplane * Common SPROM support routines * * Copyright (C) 2005-2008 Michael Buesch <mb@bu3sch.de> * Copyright (C) 2005 Martin Langer <martin-langer@gmx.de> * Copyright (C) 2005 Stefano Brivio <st3@riseup.net> * Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org> * Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> * * Licensed under the GNU/GPL. See COPYING for details. */ #include "ssb_private.h" #include <linux/ctype.h> #include <linux/slab.h> static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out); static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, size_t sprom_size_words) { int i, pos = 0; for (i = 0; i < sprom_size_words; i++) pos += snprintf(buf + pos, buf_len - pos - 1, "%04X", swab16(sprom[i]) & 0xFFFF); pos += snprintf(buf + pos, buf_len - pos - 1, "\n"); return pos + 1; } static int hex2sprom(u16 *sprom, const char *dump, size_t len, size_t sprom_size_words) { char c, tmp[5] = { 0 }; int err, cnt = 0; unsigned long parsed; /* Strip whitespace at the end. */ while (len) { c = dump[len - 1]; if (!isspace(c) && c != '\0') break; len--; } /* Length must match exactly. */ if (len != sprom_size_words * 4) return -EINVAL; while (cnt < sprom_size_words) { memcpy(tmp, dump, 4); dump += 4; err = strict_strtoul(tmp, 16, &parsed); if (err) return err; sprom[cnt++] = swab16((u16)parsed); } return 0; } /* Common sprom device-attribute show-handler */ ssize_t ssb_attr_sprom_show(struct ssb_bus *bus, char *buf, int (*sprom_read)(struct ssb_bus *bus, u16 *sprom)) { u16 *sprom; int err = -ENOMEM; ssize_t count = 0; size_t sprom_size_words = bus->sprom_size; sprom = kcalloc(sprom_size_words, sizeof(u16), GFP_KERNEL); if (!sprom) goto out; /* Use interruptible locking, as the SPROM write might * be holding the lock for several seconds. So allow userspace * to cancel operation. */ err = -ERESTARTSYS; if (mutex_lock_interruptible(&bus->sprom_mutex)) goto out_kfree; err = sprom_read(bus, sprom); mutex_unlock(&bus->sprom_mutex); if (!err) count = sprom2hex(sprom, buf, PAGE_SIZE, sprom_size_words); out_kfree: kfree(sprom); out: return err ? err : count; } /* Common sprom device-attribute store-handler */ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, const char *buf, size_t count, int (*sprom_check_crc)(const u16 *sprom, size_t size), int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)) { u16 *sprom; int res = 0, err = -ENOMEM; size_t sprom_size_words = bus->sprom_size; struct ssb_freeze_context freeze; sprom = kcalloc(bus->sprom_size, sizeof(u16), GFP_KERNEL); if (!sprom) goto out; err = hex2sprom(sprom, buf, count, sprom_size_words); if (err) { err = -EINVAL; goto out_kfree; } err = sprom_check_crc(sprom, sprom_size_words); if (err) { err = -EINVAL; goto out_kfree; } /* Use interruptible locking, as the SPROM write might * be holding the lock for several seconds. So allow userspace * to cancel operation. */ err = -ERESTARTSYS; if (mutex_lock_interruptible(&bus->sprom_mutex)) goto out_kfree; err = ssb_devices_freeze(bus, &freeze); if (err) { ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n"); goto out_unlock; } res = sprom_write(bus, sprom); err = ssb_devices_thaw(&freeze); if (err) ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n"); out_unlock: mutex_unlock(&bus->sprom_mutex); out_kfree: kfree(sprom); out: if (res) return res; return err ? err : count; } /** * ssb_arch_register_fallback_sprom - Registers a method providing a * fallback SPROM if no SPROM is found. * * @sprom_callback: The callback function. * * With this function the architecture implementation may register a * callback handler which fills the SPROM data structure. The fallback is * only used for PCI based SSB devices, where no valid SPROM can be found * in the shadow registers. * * This function is useful for weird architectures that have a half-assed * SSB device hardwired to their PCI bus. * * Note that it does only work with PCI attached SSB devices. PCMCIA * devices currently don't use this fallback. * Architectures must provide the SPROM for native SSB devices anyway, so * the fallback also isn't used for native devices. * * This function is available for architecture code, only. So it is not * exported. */ int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus, struct ssb_sprom *out)) { if (get_fallback_sprom) return -EEXIST; get_fallback_sprom = sprom_callback; return 0; } int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) { if (!get_fallback_sprom) return -ENOENT; return get_fallback_sprom(bus, out); } /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ bool ssb_is_sprom_available(struct ssb_bus *bus) { /* status register only exists on chipcomon rev >= 11 and we need check for >= 31 only */ /* this routine differs from specs as we do not access SPROM directly on PCMCIA */ if (bus->bustype == SSB_BUSTYPE_PCI && bus->chipco.dev && /* can be unavailable! */ bus->chipco.dev->id.revision >= 31) return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM; return true; }
gpl-2.0
sparkma/kernel
drivers/tty/serial/uartlite.c
2928
15598
/* * uartlite.c: Serial driver for Xilinx uartlite serial controller * * Copyright (C) 2006 Peter Korsgaard <jacmet@sunsite.dk> * Copyright (C) 2007 Secret Lab Technologies Ltd. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/console.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <asm/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #define ULITE_NAME "ttyUL" #define ULITE_MAJOR 204 #define ULITE_MINOR 187 #define ULITE_NR_UARTS 4 /* --------------------------------------------------------------------- * Register definitions * * For register details see datasheet: * http://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf */ #define ULITE_RX 0x00 #define ULITE_TX 0x04 #define ULITE_STATUS 0x08 #define ULITE_CONTROL 0x0c #define ULITE_REGION 16 #define ULITE_STATUS_RXVALID 0x01 #define ULITE_STATUS_RXFULL 0x02 #define ULITE_STATUS_TXEMPTY 0x04 #define ULITE_STATUS_TXFULL 0x08 #define ULITE_STATUS_IE 0x10 #define ULITE_STATUS_OVERRUN 0x20 #define ULITE_STATUS_FRAME 0x40 #define ULITE_STATUS_PARITY 0x80 #define ULITE_CONTROL_RST_TX 0x01 #define ULITE_CONTROL_RST_RX 0x02 #define ULITE_CONTROL_IE 0x10 static struct uart_port ulite_ports[ULITE_NR_UARTS]; /* --------------------------------------------------------------------- * Core UART driver operations */ static int ulite_receive(struct uart_port *port, int stat) { struct tty_struct *tty = port->state->port.tty; unsigned char ch = 0; char flag = TTY_NORMAL; if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN | ULITE_STATUS_FRAME)) == 0) return 0; /* stats */ if (stat & ULITE_STATUS_RXVALID) { port->icount.rx++; ch = ioread32be(port->membase + ULITE_RX); if (stat & ULITE_STATUS_PARITY) port->icount.parity++; } if (stat & ULITE_STATUS_OVERRUN) port->icount.overrun++; if (stat & ULITE_STATUS_FRAME) port->icount.frame++; /* drop byte with parity error if IGNPAR specificed */ if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY) stat &= ~ULITE_STATUS_RXVALID; stat &= port->read_status_mask; if (stat & ULITE_STATUS_PARITY) flag = TTY_PARITY; stat &= ~port->ignore_status_mask; if (stat & ULITE_STATUS_RXVALID) tty_insert_flip_char(tty, ch, flag); if (stat & ULITE_STATUS_FRAME) tty_insert_flip_char(tty, 0, TTY_FRAME); if (stat & ULITE_STATUS_OVERRUN) tty_insert_flip_char(tty, 0, TTY_OVERRUN); return 1; } static int ulite_transmit(struct uart_port *port, int stat) { struct circ_buf *xmit = &port->state->xmit; if (stat & ULITE_STATUS_TXFULL) return 0; if (port->x_char) { iowrite32be(port->x_char, port->membase + ULITE_TX); port->x_char = 0; port->icount.tx++; return 1; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) return 0; iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); port->icount.tx++; /* wake up */ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); return 1; } static irqreturn_t ulite_isr(int irq, void *dev_id) { struct uart_port *port = dev_id; int busy, n = 0; do { int stat = ioread32be(port->membase + ULITE_STATUS); busy = ulite_receive(port, stat); busy |= ulite_transmit(port, stat); n++; } while (busy); /* work done? */ if (n > 1) { tty_flip_buffer_push(port->state->port.tty); return IRQ_HANDLED; } else { return IRQ_NONE; } } static unsigned int ulite_tx_empty(struct uart_port *port) { unsigned long flags; unsigned int ret; spin_lock_irqsave(&port->lock, flags); ret = ioread32be(port->membase + ULITE_STATUS); spin_unlock_irqrestore(&port->lock, flags); return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0; } static unsigned int ulite_get_mctrl(struct uart_port *port) { return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; } static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl) { /* N/A */ } static void ulite_stop_tx(struct uart_port *port) { /* N/A */ } static void ulite_start_tx(struct uart_port *port) { ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS)); } static void ulite_stop_rx(struct uart_port *port) { /* don't forward any more data (like !CREAD) */ port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; } static void ulite_enable_ms(struct uart_port *port) { /* N/A */ } static void ulite_break_ctl(struct uart_port *port, int ctl) { /* N/A */ } static int ulite_startup(struct uart_port *port) { int ret; ret = request_irq(port->irq, ulite_isr, IRQF_SHARED | IRQF_SAMPLE_RANDOM, "uartlite", port); if (ret) return ret; iowrite32be(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX, port->membase + ULITE_CONTROL); iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL); return 0; } static void ulite_shutdown(struct uart_port *port) { iowrite32be(0, port->membase + ULITE_CONTROL); ioread32be(port->membase + ULITE_CONTROL); /* dummy */ free_irq(port->irq, port); } static void ulite_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned long flags; unsigned int baud; spin_lock_irqsave(&port->lock, flags); port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN | ULITE_STATUS_TXFULL; if (termios->c_iflag & INPCK) port->read_status_mask |= ULITE_STATUS_PARITY | ULITE_STATUS_FRAME; port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= ULITE_STATUS_PARITY | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; /* ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) port->ignore_status_mask |= ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN; /* update timeout */ baud = uart_get_baud_rate(port, termios, old, 0, 460800); uart_update_timeout(port, termios->c_cflag, baud); spin_unlock_irqrestore(&port->lock, flags); } static const char *ulite_type(struct uart_port *port) { return port->type == PORT_UARTLITE ? "uartlite" : NULL; } static void ulite_release_port(struct uart_port *port) { release_mem_region(port->mapbase, ULITE_REGION); iounmap(port->membase); port->membase = NULL; } static int ulite_request_port(struct uart_port *port) { pr_debug("ulite console: port=%p; port->mapbase=%llx\n", port, (unsigned long long) port->mapbase); if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) { dev_err(port->dev, "Memory region busy\n"); return -EBUSY; } port->membase = ioremap(port->mapbase, ULITE_REGION); if (!port->membase) { dev_err(port->dev, "Unable to map registers\n"); release_mem_region(port->mapbase, ULITE_REGION); return -EBUSY; } return 0; } static void ulite_config_port(struct uart_port *port, int flags) { if (!ulite_request_port(port)) port->type = PORT_UARTLITE; } static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser) { /* we don't want the core code to modify any port params */ return -EINVAL; } #ifdef CONFIG_CONSOLE_POLL static int ulite_get_poll_char(struct uart_port *port) { if (!(ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_RXVALID)) return NO_POLL_CHAR; return ioread32be(port->membase + ULITE_RX); } static void ulite_put_poll_char(struct uart_port *port, unsigned char ch) { while (ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_TXFULL) cpu_relax(); /* write char to device */ iowrite32be(ch, port->membase + ULITE_TX); } #endif static struct uart_ops ulite_ops = { .tx_empty = ulite_tx_empty, .set_mctrl = ulite_set_mctrl, .get_mctrl = ulite_get_mctrl, .stop_tx = ulite_stop_tx, .start_tx = ulite_start_tx, .stop_rx = ulite_stop_rx, .enable_ms = ulite_enable_ms, .break_ctl = ulite_break_ctl, .startup = ulite_startup, .shutdown = ulite_shutdown, .set_termios = ulite_set_termios, .type = ulite_type, .release_port = ulite_release_port, .request_port = ulite_request_port, .config_port = ulite_config_port, .verify_port = ulite_verify_port, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = ulite_get_poll_char, .poll_put_char = ulite_put_poll_char, #endif }; /* --------------------------------------------------------------------- * Console driver operations */ #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE static void ulite_console_wait_tx(struct uart_port *port) { int i; u8 val; /* Spin waiting for TX fifo to have space available */ for (i = 0; i < 100000; i++) { val = ioread32be(port->membase + ULITE_STATUS); if ((val & ULITE_STATUS_TXFULL) == 0) break; cpu_relax(); } } static void ulite_console_putchar(struct uart_port *port, int ch) { ulite_console_wait_tx(port); iowrite32be(ch, port->membase + ULITE_TX); } static void ulite_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port = &ulite_ports[co->index]; unsigned long flags; unsigned int ier; int locked = 1; if (oops_in_progress) { locked = spin_trylock_irqsave(&port->lock, flags); } else spin_lock_irqsave(&port->lock, flags); /* save and disable interrupt */ ier = ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_IE; iowrite32be(0, port->membase + ULITE_CONTROL); uart_console_write(port, s, count, ulite_console_putchar); ulite_console_wait_tx(port); /* restore interrupt state */ if (ier) iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL); if (locked) spin_unlock_irqrestore(&port->lock, flags); } static int __devinit ulite_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index < 0 || co->index >= ULITE_NR_UARTS) return -EINVAL; port = &ulite_ports[co->index]; /* Has the device been initialized yet? */ if (!port->mapbase) { pr_debug("console on ttyUL%i not present\n", co->index); return -ENODEV; } /* not initialized yet? */ if (!port->membase) { if (ulite_request_port(port)) return -ENODEV; } if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver ulite_uart_driver; static struct console ulite_console = { .name = ULITE_NAME, .write = ulite_console_write, .device = uart_console_device, .setup = ulite_console_setup, .flags = CON_PRINTBUFFER, .index = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */ .data = &ulite_uart_driver, }; static int __init ulite_console_init(void) { register_console(&ulite_console); return 0; } console_initcall(ulite_console_init); #endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */ static struct uart_driver ulite_uart_driver = { .owner = THIS_MODULE, .driver_name = "uartlite", .dev_name = ULITE_NAME, .major = ULITE_MAJOR, .minor = ULITE_MINOR, .nr = ULITE_NR_UARTS, #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE .cons = &ulite_console, #endif }; /* --------------------------------------------------------------------- * Port assignment functions (mapping devices to uart_port structures) */ /** ulite_assign: register a uartlite device with the driver * * @dev: pointer to device structure * @id: requested id number. Pass -1 for automatic port assignment * @base: base address of uartlite registers * @irq: irq number for uartlite * * Returns: 0 on success, <0 otherwise */ static int __devinit ulite_assign(struct device *dev, int id, u32 base, int irq) { struct uart_port *port; int rc; /* if id = -1; then scan for a free id and use that */ if (id < 0) { for (id = 0; id < ULITE_NR_UARTS; id++) if (ulite_ports[id].mapbase == 0) break; } if (id < 0 || id >= ULITE_NR_UARTS) { dev_err(dev, "%s%i too large\n", ULITE_NAME, id); return -EINVAL; } if ((ulite_ports[id].mapbase) && (ulite_ports[id].mapbase != base)) { dev_err(dev, "cannot assign to %s%i; it is already in use\n", ULITE_NAME, id); return -EBUSY; } port = &ulite_ports[id]; spin_lock_init(&port->lock); port->fifosize = 16; port->regshift = 2; port->iotype = UPIO_MEM; port->iobase = 1; /* mark port in use */ port->mapbase = base; port->membase = NULL; port->ops = &ulite_ops; port->irq = irq; port->flags = UPF_BOOT_AUTOCONF; port->dev = dev; port->type = PORT_UNKNOWN; port->line = id; dev_set_drvdata(dev, port); /* Register the port */ rc = uart_add_one_port(&ulite_uart_driver, port); if (rc) { dev_err(dev, "uart_add_one_port() failed; err=%i\n", rc); port->mapbase = 0; dev_set_drvdata(dev, NULL); return rc; } return 0; } /** ulite_release: register a uartlite device with the driver * * @dev: pointer to device structure */ static int __devexit ulite_release(struct device *dev) { struct uart_port *port = dev_get_drvdata(dev); int rc = 0; if (port) { rc = uart_remove_one_port(&ulite_uart_driver, port); dev_set_drvdata(dev, NULL); port->mapbase = 0; } return rc; } /* --------------------------------------------------------------------- * Platform bus binding */ #if defined(CONFIG_OF) /* Match table for of_platform binding */ static struct of_device_id ulite_of_match[] __devinitdata = { { .compatible = "xlnx,opb-uartlite-1.00.b", }, { .compatible = "xlnx,xps-uartlite-1.00.a", }, {} }; MODULE_DEVICE_TABLE(of, ulite_of_match); #else /* CONFIG_OF */ #define ulite_of_match NULL #endif /* CONFIG_OF */ static int __devinit ulite_probe(struct platform_device *pdev) { struct resource *res, *res2; int id = pdev->id; #ifdef CONFIG_OF const __be32 *prop; prop = of_get_property(pdev->dev.of_node, "port-number", NULL); if (prop) id = be32_to_cpup(prop); #endif res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res2) return -ENODEV; return ulite_assign(&pdev->dev, id, res->start, res2->start); } static int __devexit ulite_remove(struct platform_device *pdev) { return ulite_release(&pdev->dev); } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:uartlite"); static struct platform_driver ulite_platform_driver = { .probe = ulite_probe, .remove = __devexit_p(ulite_remove), .driver = { .owner = THIS_MODULE, .name = "uartlite", .of_match_table = ulite_of_match, }, }; /* --------------------------------------------------------------------- * Module setup/teardown */ int __init ulite_init(void) { int ret; pr_debug("uartlite: calling uart_register_driver()\n"); ret = uart_register_driver(&ulite_uart_driver); if (ret) goto err_uart; pr_debug("uartlite: calling platform_driver_register()\n"); ret = platform_driver_register(&ulite_platform_driver); if (ret) goto err_plat; return 0; err_plat: uart_unregister_driver(&ulite_uart_driver); err_uart: printk(KERN_ERR "registering uartlite driver failed: err=%i", ret); return ret; } void __exit ulite_exit(void) { platform_driver_unregister(&ulite_platform_driver); uart_unregister_driver(&ulite_uart_driver); } module_init(ulite_init); module_exit(ulite_exit); MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); MODULE_DESCRIPTION("Xilinx uartlite serial driver"); MODULE_LICENSE("GPL");
gpl-2.0
krosk/android-omap-tuna-sideload
drivers/net/ibm_newemac/tah.c
3184
4144
/* * drivers/net/ibm_newemac/tah.c * * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright 2004 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/io.h> #include "emac.h" #include "core.h" int __devinit tah_attach(struct platform_device *ofdev, int channel) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); mutex_lock(&dev->lock); /* Reset has been done at probe() time... nothing else to do for now */ ++dev->users; mutex_unlock(&dev->lock); return 0; } void tah_detach(struct platform_device *ofdev, int channel) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); mutex_lock(&dev->lock); --dev->users; mutex_unlock(&dev->lock); } void tah_reset(struct platform_device *ofdev) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); struct tah_regs __iomem *p = dev->base; int n; /* Reset TAH */ out_be32(&p->mr, TAH_MR_SR); n = 100; while ((in_be32(&p->mr) & TAH_MR_SR) && n) --n; if (unlikely(!n)) printk(KERN_ERR "%s: reset timeout\n", ofdev->dev.of_node->full_name); /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */ out_be32(&p->mr, TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | TAH_MR_DIG); } int tah_get_regs_len(struct platform_device *ofdev) { return sizeof(struct emac_ethtool_regs_subhdr) + sizeof(struct tah_regs); } void *tah_dump_regs(struct platform_device *ofdev, void *buf) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); struct emac_ethtool_regs_subhdr *hdr = buf; struct tah_regs *regs = (struct tah_regs *)(hdr + 1); hdr->version = 0; hdr->index = 0; /* for now, are there chips with more than one * zmii ? if yes, then we'll add a cell_index * like we do for emac */ memcpy_fromio(regs, dev->base, sizeof(struct tah_regs)); return regs + 1; } static int __devinit tah_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct tah_instance *dev; struct resource regs; int rc; rc = -ENOMEM; dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL); if (dev == NULL) { printk(KERN_ERR "%s: could not allocate TAH device!\n", np->full_name); goto err_gone; } mutex_init(&dev->lock); dev->ofdev = ofdev; rc = -ENXIO; if (of_address_to_resource(np, 0, &regs)) { printk(KERN_ERR "%s: Can't get registers address\n", np->full_name); goto err_free; } rc = -ENOMEM; dev->base = (struct tah_regs __iomem *)ioremap(regs.start, sizeof(struct tah_regs)); if (dev->base == NULL) { printk(KERN_ERR "%s: Can't map device registers!\n", np->full_name); goto err_free; } dev_set_drvdata(&ofdev->dev, dev); /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); printk(KERN_INFO "TAH %s initialized\n", ofdev->dev.of_node->full_name); wmb(); return 0; err_free: kfree(dev); err_gone: return rc; } static int __devexit tah_remove(struct platform_device *ofdev) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); dev_set_drvdata(&ofdev->dev, NULL); WARN_ON(dev->users != 0); iounmap(dev->base); kfree(dev); return 0; } static struct of_device_id tah_match[] = { { .compatible = "ibm,tah", }, /* For backward compat with old DT */ { .type = "tah", }, {}, }; static struct platform_driver tah_driver = { .driver = { .name = "emac-tah", .owner = THIS_MODULE, .of_match_table = tah_match, }, .probe = tah_probe, .remove = tah_remove, }; int __init tah_init(void) { return platform_driver_register(&tah_driver); } void tah_exit(void) { platform_driver_unregister(&tah_driver); }
gpl-2.0
jarpii/android_kernel_huawei_y530
drivers/net/ethernet/amd/am79c961a.c
4976
18387
/* * linux/drivers/net/ethernet/amd/am79c961a.c * * by Russell King <rmk@arm.linux.org.uk> 1995-2001. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Derived from various things including skeleton.c * * This is a special driver for the am79c961A Lance chip used in the * Intel (formally Digital Equipment Corp) EBSA110 platform. Please * note that this can not be built as a module (it doesn't make sense). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> #define TX_BUFFERS 15 #define RX_BUFFERS 25 #include "am79c961a.h" static irqreturn_t am79c961_interrupt (int irq, void *dev_id); static unsigned int net_debug = NET_DEBUG; static const char version[] = "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n"; /* --------------------------------------------------------------------------- */ #ifdef __arm__ static void write_rreg(u_long base, u_int reg, u_int val) { asm volatile( "str%?h %1, [%2] @ NET_RAP\n\t" "str%?h %0, [%2, #-4] @ NET_RDP" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); } static inline unsigned short read_rreg(u_long base_addr, u_int reg) { unsigned short v; asm volatile( "str%?h %1, [%2] @ NET_RAP\n\t" "ldr%?h %0, [%2, #-4] @ NET_RDP" : "=r" (v) : "r" (reg), "r" (ISAIO_BASE + 0x0464)); return v; } static inline void write_ireg(u_long base, u_int reg, u_int val) { asm volatile( "str%?h %1, [%2] @ NET_RAP\n\t" "str%?h %0, [%2, #8] @ NET_IDP" : : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); } static inline unsigned short read_ireg(u_long base_addr, u_int reg) { u_short v; asm volatile( "str%?h %1, [%2] @ NAT_RAP\n\t" "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" : "=r" (v) : "r" (reg), "r" (ISAIO_BASE + 0x0464)); return v; } #define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1)) #define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1)) static void am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) { offset = ISAMEM_BASE + (offset << 1); length = (length + 1) & ~1; if ((int)buf & 2) { asm volatile("str%?h %2, [%0], #4" : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); buf += 2; length -= 2; } while (length > 8) { register unsigned int tmp asm("r2"), tmp2 asm("r3"); asm volatile( "ldm%?ia %0!, {%1, %2}" : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); length -= 8; asm volatile( "str%?h %1, [%0], #4\n\t" "mov%? %1, %1, lsr #16\n\t" "str%?h %1, [%0], #4\n\t" "str%?h %2, [%0], #4\n\t" "mov%? %2, %2, lsr #16\n\t" "str%?h %2, [%0], #4" : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); } while (length > 0) { asm volatile("str%?h %2, [%0], #4" : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); buf += 2; length -= 2; } } static void am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) { offset = ISAMEM_BASE + (offset << 1); length = (length + 1) & ~1; if ((int)buf & 2) { unsigned int tmp; asm volatile( "ldr%?h %2, [%0], #4\n\t" "str%?b %2, [%1], #1\n\t" "mov%? %2, %2, lsr #8\n\t" "str%?b %2, [%1], #1" : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); length -= 2; } while (length > 8) { register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; asm volatile( "ldr%?h %2, [%0], #4\n\t" "ldr%?h %4, [%0], #4\n\t" "ldr%?h %3, [%0], #4\n\t" "orr%? %2, %2, %4, lsl #16\n\t" "ldr%?h %4, [%0], #4\n\t" "orr%? %3, %3, %4, lsl #16\n\t" "stm%?ia %1!, {%2, %3}" : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) : "0" (offset), "1" (buf)); length -= 8; } while (length > 0) { unsigned int tmp; asm volatile( "ldr%?h %2, [%0], #4\n\t" "str%?b %2, [%1], #1\n\t" "mov%? %2, %2, lsr #8\n\t" "str%?b %2, [%1], #1" : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); length -= 2; } } #else #error Not compatible #endif static int am79c961_ramtest(struct net_device *dev, unsigned int val) { unsigned char *buffer = kmalloc (65536, GFP_KERNEL); int i, error = 0, errorcount = 0; if (!buffer) return 0; memset (buffer, val, 65536); am_writebuffer(dev, 0, buffer, 65536); memset (buffer, val ^ 255, 65536); am_readbuffer(dev, 0, buffer, 65536); for (i = 0; i < 65536; i++) { if (buffer[i] != val && !error) { printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i); error = 1; errorcount ++; } else if (error && buffer[i] == val) { printk ("%05X\n", i); error = 0; } } if (error) printk ("10000\n"); kfree (buffer); return errorcount; } static void am79c961_mc_hash(char *addr, u16 *hash) { int idx, bit; u32 crc; crc = ether_crc_le(ETH_ALEN, addr); idx = crc >> 30; bit = (crc >> 26) & 15; hash[idx] |= 1 << bit; } static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash) { unsigned int mode = MODE_PORT_10BT; if (dev->flags & IFF_PROMISC) { mode |= MODE_PROMISC; memset(hash, 0xff, 4 * sizeof(*hash)); } else if (dev->flags & IFF_ALLMULTI) { memset(hash, 0xff, 4 * sizeof(*hash)); } else { struct netdev_hw_addr *ha; memset(hash, 0, 4 * sizeof(*hash)); netdev_for_each_mc_addr(ha, dev) am79c961_mc_hash(ha->addr, hash); } return mode; } static void am79c961_init_for_open(struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); unsigned long flags; unsigned char *p; u_int hdr_addr, first_free_addr; u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); int i; /* * Stop the chip. */ spin_lock_irqsave(&priv->chip_lock, flags); write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP); spin_unlock_irqrestore(&priv->chip_lock, flags); write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */ write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */ write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */ write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */ for (i = LADRL; i <= LADRH; i++) write_rreg (dev->base_addr, i, multi_hash[i - LADRL]); for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2) write_rreg (dev->base_addr, i, p[0] | (p[1] << 8)); write_rreg (dev->base_addr, MODE, mode); write_rreg (dev->base_addr, POLLINT, 0); write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS); write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS); first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16; hdr_addr = 0; priv->rxhead = 0; priv->rxtail = 0; priv->rxhdr = hdr_addr; for (i = 0; i < RX_BUFFERS; i++) { priv->rxbuffer[i] = first_free_addr; am_writeword (dev, hdr_addr, first_free_addr); am_writeword (dev, hdr_addr + 2, RMD_OWN); am_writeword (dev, hdr_addr + 4, (-1600)); am_writeword (dev, hdr_addr + 6, 0); first_free_addr += 1600; hdr_addr += 8; } priv->txhead = 0; priv->txtail = 0; priv->txhdr = hdr_addr; for (i = 0; i < TX_BUFFERS; i++) { priv->txbuffer[i] = first_free_addr; am_writeword (dev, hdr_addr, first_free_addr); am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP); am_writeword (dev, hdr_addr + 4, 0xf000); am_writeword (dev, hdr_addr + 6, 0); first_free_addr += 1600; hdr_addr += 8; } write_rreg (dev->base_addr, BASERXL, priv->rxhdr); write_rreg (dev->base_addr, BASERXH, 0); write_rreg (dev->base_addr, BASETXL, priv->txhdr); write_rreg (dev->base_addr, BASERXH, 0); write_rreg (dev->base_addr, CSR0, CSR0_STOP); write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO); write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM); write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT); } static void am79c961_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct dev_priv *priv = netdev_priv(dev); unsigned int lnkstat, carrier; unsigned long flags; spin_lock_irqsave(&priv->chip_lock, flags); lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; spin_unlock_irqrestore(&priv->chip_lock, flags); carrier = netif_carrier_ok(dev); if (lnkstat && !carrier) { netif_carrier_on(dev); printk("%s: link up\n", dev->name); } else if (!lnkstat && carrier) { netif_carrier_off(dev); printk("%s: link down\n", dev->name); } mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500)); } /* * Open/initialize the board. */ static int am79c961_open(struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); int ret; ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev); if (ret) return ret; am79c961_init_for_open(dev); netif_carrier_off(dev); priv->timer.expires = jiffies; add_timer(&priv->timer); netif_start_queue(dev); return 0; } /* * The inverse routine to am79c961_open(). */ static int am79c961_close(struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); unsigned long flags; del_timer_sync(&priv->timer); netif_stop_queue(dev); netif_carrier_off(dev); spin_lock_irqsave(&priv->chip_lock, flags); write_rreg (dev->base_addr, CSR0, CSR0_STOP); write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); spin_unlock_irqrestore(&priv->chip_lock, flags); free_irq (dev->irq, dev); return 0; } /* * Set or clear promiscuous/multicast mode filter for this adapter. */ static void am79c961_setmulticastlist (struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); unsigned long flags; u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); int i, stopped; spin_lock_irqsave(&priv->chip_lock, flags); stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; if (!stopped) { /* * Put the chip into suspend mode */ write_rreg(dev->base_addr, CTRL1, CTRL1_SPND); /* * Spin waiting for chip to report suspend mode */ while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) { spin_unlock_irqrestore(&priv->chip_lock, flags); nop(); spin_lock_irqsave(&priv->chip_lock, flags); } } /* * Update the multicast hash table */ for (i = 0; i < ARRAY_SIZE(multi_hash); i++) write_rreg(dev->base_addr, i + LADRL, multi_hash[i]); /* * Write the mode register */ write_rreg(dev->base_addr, MODE, mode); if (!stopped) { /* * Put the chip back into running mode */ write_rreg(dev->base_addr, CTRL1, 0); } spin_unlock_irqrestore(&priv->chip_lock, flags); } static void am79c961_timeout(struct net_device *dev) { printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n", dev->name); /* * ought to do some setup of the tx side here */ netif_wake_queue(dev); } /* * Transmit a packet */ static int am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); unsigned int hdraddr, bufaddr; unsigned int head; unsigned long flags; head = priv->txhead; hdraddr = priv->txhdr + (head << 3); bufaddr = priv->txbuffer[head]; head += 1; if (head >= TX_BUFFERS) head = 0; am_writebuffer (dev, bufaddr, skb->data, skb->len); am_writeword (dev, hdraddr + 4, -skb->len); am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP); priv->txhead = head; spin_lock_irqsave(&priv->chip_lock, flags); write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA); spin_unlock_irqrestore(&priv->chip_lock, flags); /* * If the next packet is owned by the ethernet device, * then the tx ring is full and we can't add another * packet. */ if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN) netif_stop_queue(dev); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * If we have a good packet(s), get it/them out of the buffers. */ static void am79c961_rx(struct net_device *dev, struct dev_priv *priv) { do { struct sk_buff *skb; u_int hdraddr; u_int pktaddr; u_int status; int len; hdraddr = priv->rxhdr + (priv->rxtail << 3); pktaddr = priv->rxbuffer[priv->rxtail]; status = am_readword (dev, hdraddr + 2); if (status & RMD_OWN) /* do we own it? */ break; priv->rxtail ++; if (priv->rxtail >= RX_BUFFERS) priv->rxtail = 0; if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) { am_writeword (dev, hdraddr + 2, RMD_OWN); dev->stats.rx_errors++; if (status & RMD_ERR) { if (status & RMD_FRAM) dev->stats.rx_frame_errors++; if (status & RMD_CRC) dev->stats.rx_crc_errors++; } else if (status & RMD_STP) dev->stats.rx_length_errors++; continue; } len = am_readword(dev, hdraddr + 6); skb = netdev_alloc_skb(dev, len + 2); if (skb) { skb_reserve(skb, 2); am_readbuffer(dev, pktaddr, skb_put(skb, len), len); am_writeword(dev, hdraddr + 2, RMD_OWN); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_bytes += len; dev->stats.rx_packets++; } else { am_writeword (dev, hdraddr + 2, RMD_OWN); printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; break; } } while (1); } /* * Update stats for the transmitted packet */ static void am79c961_tx(struct net_device *dev, struct dev_priv *priv) { do { short len; u_int hdraddr; u_int status; hdraddr = priv->txhdr + (priv->txtail << 3); status = am_readword (dev, hdraddr + 2); if (status & TMD_OWN) break; priv->txtail ++; if (priv->txtail >= TX_BUFFERS) priv->txtail = 0; if (status & TMD_ERR) { u_int status2; dev->stats.tx_errors++; status2 = am_readword (dev, hdraddr + 6); /* * Clear the error byte */ am_writeword (dev, hdraddr + 6, 0); if (status2 & TST_RTRY) dev->stats.collisions += 16; if (status2 & TST_LCOL) dev->stats.tx_window_errors++; if (status2 & TST_LCAR) dev->stats.tx_carrier_errors++; if (status2 & TST_UFLO) dev->stats.tx_fifo_errors++; continue; } dev->stats.tx_packets++; len = am_readword (dev, hdraddr + 4); dev->stats.tx_bytes += -len; } while (priv->txtail != priv->txhead); netif_wake_queue(dev); } static irqreturn_t am79c961_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct dev_priv *priv = netdev_priv(dev); u_int status, n = 100; int handled = 0; do { status = read_rreg(dev->base_addr, CSR0); write_rreg(dev->base_addr, CSR0, status & (CSR0_IENA|CSR0_TINT|CSR0_RINT| CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL)); if (status & CSR0_RINT) { handled = 1; am79c961_rx(dev, priv); } if (status & CSR0_TINT) { handled = 1; am79c961_tx(dev, priv); } if (status & CSR0_MISS) { handled = 1; dev->stats.rx_dropped++; } if (status & CSR0_CERR) { handled = 1; mod_timer(&priv->timer, jiffies); } } while (--n && status & (CSR0_RINT | CSR0_TINT)); return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER static void am79c961_poll_controller(struct net_device *dev) { unsigned long flags; local_irq_save(flags); am79c961_interrupt(dev->irq, dev); local_irq_restore(flags); } #endif /* * Initialise the chip. Note that we always expect * to be entered with interrupts enabled. */ static int am79c961_hw_init(struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); spin_lock_irq(&priv->chip_lock); write_rreg (dev->base_addr, CSR0, CSR0_STOP); write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); spin_unlock_irq(&priv->chip_lock); am79c961_ramtest(dev, 0x66); am79c961_ramtest(dev, 0x99); return 0; } static void __init am79c961_banner(void) { static unsigned version_printed; if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } static const struct net_device_ops am79c961_netdev_ops = { .ndo_open = am79c961_open, .ndo_stop = am79c961_close, .ndo_start_xmit = am79c961_sendpacket, .ndo_set_rx_mode = am79c961_setmulticastlist, .ndo_tx_timeout = am79c961_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = am79c961_poll_controller, #endif }; static int __devinit am79c961_probe(struct platform_device *pdev) { struct resource *res; struct net_device *dev; struct dev_priv *priv; int i, ret; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) return -ENODEV; dev = alloc_etherdev(sizeof(struct dev_priv)); ret = -ENOMEM; if (!dev) goto out; SET_NETDEV_DEV(dev, &pdev->dev); priv = netdev_priv(dev); /* * Fixed address and IRQ lines here. * The PNP initialisation should have been * done by the ether bootp loader. */ dev->base_addr = res->start; ret = platform_get_irq(pdev, 0); if (ret < 0) { ret = -ENODEV; goto nodev; } dev->irq = ret; ret = -ENODEV; if (!request_region(dev->base_addr, 0x18, dev->name)) goto nodev; /* * Reset the device. */ inb(dev->base_addr + NET_RESET); udelay(5); /* * Check the manufacturer part of the * ether address. */ if (inb(dev->base_addr) != 0x08 || inb(dev->base_addr + 2) != 0x00 || inb(dev->base_addr + 4) != 0x2b) goto release; for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; am79c961_banner(); spin_lock_init(&priv->chip_lock); init_timer(&priv->timer); priv->timer.data = (unsigned long)dev; priv->timer.function = am79c961_timer; if (am79c961_hw_init(dev)) goto release; dev->netdev_ops = &am79c961_netdev_ops; ret = register_netdev(dev); if (ret == 0) { printk(KERN_INFO "%s: ether address %pM\n", dev->name, dev->dev_addr); return 0; } release: release_region(dev->base_addr, 0x18); nodev: free_netdev(dev); out: return ret; } static struct platform_driver am79c961_driver = { .probe = am79c961_probe, .driver = { .name = "am79c961", }, }; static int __init am79c961_init(void) { return platform_driver_register(&am79c961_driver); } __initcall(am79c961_init);
gpl-2.0
SamueleCiprietti/nova_kernel
arch/arm/plat-mxc/devices/platform-mxc_nand.c
8048
2388
/* * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_mxc_nand_data_entry_single(soc, _size) \ { \ .iobase = soc ## _NFC_BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_NFC \ } #define imx_mxc_nandv3_data_entry_single(soc, _size) \ { \ .id = -1, \ .iobase = soc ## _NFC_BASE_ADDR, \ .iosize = _size, \ .axibase = soc ## _NFC_AXI_BASE_ADDR, \ .irq = soc ## _INT_NFC \ } #ifdef CONFIG_SOC_IMX21 const struct imx_mxc_nand_data imx21_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX21, SZ_4K); #endif /* ifdef CONFIG_SOC_IMX21 */ #ifdef CONFIG_SOC_IMX25 const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX25, SZ_8K); #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_mxc_nand_data imx27_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX27, SZ_4K); #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_mxc_nand_data imx31_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX31, SZ_4K); #endif #ifdef CONFIG_SOC_IMX35 const struct imx_mxc_nand_data imx35_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX35, SZ_8K); #endif #ifdef CONFIG_SOC_IMX51 const struct imx_mxc_nand_data imx51_mxc_nand_data __initconst = imx_mxc_nandv3_data_entry_single(MX51, SZ_16K); #endif struct platform_device *__init imx_add_mxc_nand( const struct imx_mxc_nand_data *data, const struct mxc_nand_platform_data *pdata) { /* AXI has to come first, that's how the mxc_nand driver expect it */ struct resource res[] = { { .start = data->axibase, .end = data->axibase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("mxc_nand", data->id, res + !data->axibase, ARRAY_SIZE(res) - !data->axibase, pdata, sizeof(*pdata)); }
gpl-2.0
USBhost/Simple-Stream
arch/x86/math-emu/fpu_aux.c
12656
4394
/*---------------------------------------------------------------------------+ | fpu_aux.c | | | | Code to implement some of the FPU auxiliary instructions. | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "fpu_system.h" #include "exception.h" #include "fpu_emu.h" #include "status_w.h" #include "control_w.h" static void fnop(void) { } static void fclex(void) { partial_status &= ~(SW_Backward | SW_Summary | SW_Stack_Fault | SW_Precision | SW_Underflow | SW_Overflow | SW_Zero_Div | SW_Denorm_Op | SW_Invalid); no_ip_update = 1; } /* Needs to be externally visible */ void finit_soft_fpu(struct i387_soft_struct *soft) { struct address *oaddr, *iaddr; memset(soft, 0, sizeof(*soft)); soft->cwd = 0x037f; soft->swd = 0; soft->ftop = 0; /* We don't keep top in the status word internally. */ soft->twd = 0xffff; /* The behaviour is different from that detailed in Section 15.1.6 of the Intel manual */ oaddr = (struct address *)&soft->foo; oaddr->offset = 0; oaddr->selector = 0; iaddr = (struct address *)&soft->fip; iaddr->offset = 0; iaddr->selector = 0; iaddr->opcode = 0; soft->no_update = 1; } void finit(void) { finit_soft_fpu(&current->thread.fpu.state->soft); } /* * These are nops on the i387.. */ #define feni fnop #define fdisi fnop #define fsetpm fnop static FUNC const finit_table[] = { feni, fdisi, fclex, finit, fsetpm, FPU_illegal, FPU_illegal, FPU_illegal }; void finit_(void) { (finit_table[FPU_rm]) (); } static void fstsw_ax(void) { *(short *)&FPU_EAX = status_word(); no_ip_update = 1; } static FUNC const fstsw_table[] = { fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal }; void fstsw_(void) { (fstsw_table[FPU_rm]) (); } static FUNC const fp_nop_table[] = { fnop, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal }; void fp_nop(void) { (fp_nop_table[FPU_rm]) (); } void fld_i_(void) { FPU_REG *st_new_ptr; int i; u_char tag; if (STACK_OVERFLOW) { FPU_stack_overflow(); return; } /* fld st(i) */ i = FPU_rm; if (NOT_EMPTY(i)) { reg_copy(&st(i), st_new_ptr); tag = FPU_gettagi(i); push(); FPU_settag0(tag); } else { if (control_word & CW_Invalid) { /* The masked response */ FPU_stack_underflow(); } else EXCEPTION(EX_StackUnder); } } void fxch_i(void) { /* fxch st(i) */ FPU_REG t; int i = FPU_rm; FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i); long tag_word = fpu_tag_word; int regnr = top & 7, regnri = ((regnr + i) & 7); u_char st0_tag = (tag_word >> (regnr * 2)) & 3; u_char sti_tag = (tag_word >> (regnri * 2)) & 3; if (st0_tag == TAG_Empty) { if (sti_tag == TAG_Empty) { FPU_stack_underflow(); FPU_stack_underflow_i(i); return; } if (control_word & CW_Invalid) { /* Masked response */ FPU_copy_to_reg0(sti_ptr, sti_tag); } FPU_stack_underflow_i(i); return; } if (sti_tag == TAG_Empty) { if (control_word & CW_Invalid) { /* Masked response */ FPU_copy_to_regi(st0_ptr, st0_tag, i); } FPU_stack_underflow(); return; } clear_C1(); reg_copy(st0_ptr, &t); reg_copy(sti_ptr, st0_ptr); reg_copy(&t, sti_ptr); tag_word &= ~(3 << (regnr * 2)) & ~(3 << (regnri * 2)); tag_word |= (sti_tag << (regnr * 2)) | (st0_tag << (regnri * 2)); fpu_tag_word = tag_word; } void ffree_(void) { /* ffree st(i) */ FPU_settagi(FPU_rm, TAG_Empty); } void ffreep(void) { /* ffree st(i) + pop - unofficial code */ FPU_settagi(FPU_rm, TAG_Empty); FPU_pop(); } void fst_i_(void) { /* fst st(i) */ FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm); } void fstp_i(void) { /* fstp st(i) */ FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm); FPU_pop(); }
gpl-2.0
viaembedded/vab820-kernel-bsp
drivers/ide/falconide.c
14704
3961
/* * Atari Falcon IDE Driver * * Created 12 Jul 1997 by Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/ide.h> #define DRV_NAME "falconide" /* * Base of the IDE interface */ #define ATA_HD_BASE 0xfff00000 /* * Offsets from the above base */ #define ATA_HD_CONTROL 0x39 /* * falconide_intr_lock is used to obtain access to the IDE interrupt, * which is shared between several drivers. */ static int falconide_intr_lock; static void falconide_release_lock(void) { if (falconide_intr_lock == 0) { printk(KERN_ERR "%s: bug\n", __func__); return; } falconide_intr_lock = 0; stdma_release(); } static void falconide_get_lock(irq_handler_t handler, void *data) { if (falconide_intr_lock == 0) { if (in_interrupt() > 0) panic("Falcon IDE hasn't ST-DMA lock in interrupt"); stdma_lock(handler, data); falconide_intr_lock = 1; } } static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long data_addr = drive->hwif->io_ports.data_addr; if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { __ide_mm_insw(data_addr, buf, (len + 1) / 2); return; } raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long data_addr = drive->hwif->io_ports.data_addr; if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { __ide_mm_outsw(data_addr, buf, (len + 1) / 2); return; } raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } /* Atari has a byte-swapped IDE interface */ static const struct ide_tp_ops falconide_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = falconide_input_data, .output_data = falconide_output_data, }; static const struct ide_port_info falconide_port_info = { .get_lock = falconide_get_lock, .release_lock = falconide_release_lock, .tp_ops = &falconide_tp_ops, .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .chipset = ide_generic, }; static void __init falconide_setup_ports(struct ide_hw *hw) { int i; memset(hw, 0, sizeof(*hw)); hw->io_ports.data_addr = ATA_HD_BASE; for (i = 1; i < 8; i++) hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4; hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL; hw->irq = IRQ_MFP_IDE; } /* * Probe for a Falcon IDE interface */ static int __init falconide_init(void) { struct ide_host *host; struct ide_hw hw, *hws[] = { &hw }; int rc; if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) return -ENODEV; printk(KERN_INFO "ide: Falcon IDE controller\n"); if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) { printk(KERN_ERR "%s: resources busy\n", DRV_NAME); return -EBUSY; } falconide_setup_ports(&hw); host = ide_host_alloc(&falconide_port_info, hws, 1); if (host == NULL) { rc = -ENOMEM; goto err; } falconide_get_lock(NULL, NULL); rc = ide_host_register(host, &falconide_port_info, hws); falconide_release_lock(); if (rc) goto err_free; return 0; err_free: ide_host_free(host); err: release_mem_region(ATA_HD_BASE, 0x40); return rc; } module_init(falconide_init); MODULE_LICENSE("GPL");
gpl-2.0
sonicxml/Spectrum
drivers/ide/falconide.c
14704
3961
/* * Atari Falcon IDE Driver * * Created 12 Jul 1997 by Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/ide.h> #define DRV_NAME "falconide" /* * Base of the IDE interface */ #define ATA_HD_BASE 0xfff00000 /* * Offsets from the above base */ #define ATA_HD_CONTROL 0x39 /* * falconide_intr_lock is used to obtain access to the IDE interrupt, * which is shared between several drivers. */ static int falconide_intr_lock; static void falconide_release_lock(void) { if (falconide_intr_lock == 0) { printk(KERN_ERR "%s: bug\n", __func__); return; } falconide_intr_lock = 0; stdma_release(); } static void falconide_get_lock(irq_handler_t handler, void *data) { if (falconide_intr_lock == 0) { if (in_interrupt() > 0) panic("Falcon IDE hasn't ST-DMA lock in interrupt"); stdma_lock(handler, data); falconide_intr_lock = 1; } } static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long data_addr = drive->hwif->io_ports.data_addr; if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { __ide_mm_insw(data_addr, buf, (len + 1) / 2); return; } raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long data_addr = drive->hwif->io_ports.data_addr; if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { __ide_mm_outsw(data_addr, buf, (len + 1) / 2); return; } raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } /* Atari has a byte-swapped IDE interface */ static const struct ide_tp_ops falconide_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = falconide_input_data, .output_data = falconide_output_data, }; static const struct ide_port_info falconide_port_info = { .get_lock = falconide_get_lock, .release_lock = falconide_release_lock, .tp_ops = &falconide_tp_ops, .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .chipset = ide_generic, }; static void __init falconide_setup_ports(struct ide_hw *hw) { int i; memset(hw, 0, sizeof(*hw)); hw->io_ports.data_addr = ATA_HD_BASE; for (i = 1; i < 8; i++) hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4; hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL; hw->irq = IRQ_MFP_IDE; } /* * Probe for a Falcon IDE interface */ static int __init falconide_init(void) { struct ide_host *host; struct ide_hw hw, *hws[] = { &hw }; int rc; if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) return -ENODEV; printk(KERN_INFO "ide: Falcon IDE controller\n"); if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) { printk(KERN_ERR "%s: resources busy\n", DRV_NAME); return -EBUSY; } falconide_setup_ports(&hw); host = ide_host_alloc(&falconide_port_info, hws, 1); if (host == NULL) { rc = -ENOMEM; goto err; } falconide_get_lock(NULL, NULL); rc = ide_host_register(host, &falconide_port_info, hws); falconide_release_lock(); if (rc) goto err_free; return 0; err_free: ide_host_free(host); err: release_mem_region(ATA_HD_BASE, 0x40); return rc; } module_init(falconide_init); MODULE_LICENSE("GPL");
gpl-2.0
mdeejay/shooter_u-gb-crc
scripts/kconfig/images.c
16496
6565
/* * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> * Released under the terms of the GNU GPL v2.0. */ static const char *xpm_load[] = { "22 22 5 1", ". c None", "# c #000000", "c c #838100", "a c #ffff00", "b c #ffffff", "......................", "......................", "......................", "............####....#.", "...........#....##.##.", "..................###.", ".................####.", ".####...........#####.", "#abab##########.......", "#babababababab#.......", "#ababababababa#.......", "#babababababab#.......", "#ababab###############", "#babab##cccccccccccc##", "#abab##cccccccccccc##.", "#bab##cccccccccccc##..", "#ab##cccccccccccc##...", "#b##cccccccccccc##....", "###cccccccccccc##.....", "##cccccccccccc##......", "###############.......", "......................"}; static const char *xpm_save[] = { "22 22 5 1", ". c None", "# c #000000", "a c #838100", "b c #c5c2c5", "c c #cdb6d5", "......................", ".####################.", ".#aa#bbbbbbbbbbbb#bb#.", ".#aa#bbbbbbbbbbbb#bb#.", ".#aa#bbbbbbbbbcbb####.", ".#aa#bbbccbbbbbbb#aa#.", ".#aa#bbbccbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aaa############aaa#.", ".#aaaaaaaaaaaaaaaaaa#.", ".#aaaaaaaaaaaaaaaaaa#.", ".#aaa#############aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", "..##################..", "......................"}; static const char *xpm_back[] = { "22 22 3 1", ". c None", "# c #000083", "a c #838183", "......................", "......................", "......................", "......................", "......................", "...........######a....", "..#......##########...", "..##...####......##a..", "..###.###.........##..", "..######..........##..", "..#####...........##..", "..######..........##..", "..#######.........##..", "..########.......##a..", "...............a###...", "...............###....", "......................", "......................", "......................", "......................", "......................", "......................"}; static const char *xpm_tree_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......................", "......................"}; static const char *xpm_single_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "......................", "......................"}; static const char *xpm_split_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......................", "......................"}; static const char *xpm_symbol_no[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " .......... ", " "}; static const char *xpm_symbol_mod[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . .. . ", " . .... . ", " . .... . ", " . .. . ", " . . ", " . . ", " .......... ", " "}; static const char *xpm_symbol_yes[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . . . ", " . .. . ", " . . .. . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_choice_no[] = { "12 12 2 1", " c white", ". c black", " ", " .... ", " .. .. ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " .. .. ", " .... ", " "}; static const char *xpm_choice_yes[] = { "12 12 2 1", " c white", ". c black", " ", " .... ", " .. .. ", " . . ", " . .. . ", " . .... . ", " . .... . ", " . .. . ", " . . ", " .. .. ", " .... ", " "}; static const char *xpm_menu[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . .. . ", " . .... . ", " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_menu_inv[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " .......... ", " .. ...... ", " .. .... ", " .. .. ", " .. .. ", " .. .... ", " .. ...... ", " .......... ", " .......... ", " "}; static const char *xpm_menuback[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . .. . ", " . .... . ", " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_void[] = { "12 12 2 1", " c white", ". c black", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "};
gpl-2.0
antonblanchard/linux
drivers/media/pci/ttpci/av7110_hw.c
113
31559
// SPDX-License-Identifier: GPL-2.0-or-later /* * av7110_hw.c: av7110 low level hardware access and firmware interface * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * originally based on code by: * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de> * * the project's page is at https://linuxtv.org */ /* for debugging ARM communication: */ //#define COM_DEBUG #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/fs.h> #include "av7110.h" #include "av7110_hw.h" #define _NOHANDSHAKE /* * Max transfer size done by av7110_fw_cmd() * * The maximum size passed to this function is 6 bytes. The buffer also * uses two additional ones for type and size. So, 8 bytes is enough. */ #define MAX_XFER_SIZE 8 /**************************************************************************** * DEBI functions ****************************************************************************/ /* This DEBI code is based on the Stradis driver by Nathan Laredo <laredo@gnu.org> */ int av7110_debiwrite(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count) { struct saa7146_dev *dev = av7110->dev; if (count > 32764) { printk("%s: invalid count %d\n", __func__, count); return -1; } if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) { printk("%s: wait_for_debi_done failed\n", __func__); return -1; } saa7146_write(dev, DEBI_CONFIG, config); if (count <= 4) /* immediate transfer */ saa7146_write(dev, DEBI_AD, val); else /* block transfer */ saa7146_write(dev, DEBI_AD, av7110->debi_bus); saa7146_write(dev, DEBI_COMMAND, (count << 17) | (addr & 0xffff)); saa7146_write(dev, MC2, (2 << 16) | 2); return 0; } u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count) { struct saa7146_dev *dev = av7110->dev; u32 result = 0; if (count > 32764) { printk("%s: invalid count %d\n", __func__, count); return 0; } if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) { printk("%s: wait_for_debi_done #1 failed\n", __func__); return 0; } saa7146_write(dev, DEBI_AD, av7110->debi_bus); saa7146_write(dev, DEBI_COMMAND, (count << 17) | 0x10000 | (addr & 0xffff)); saa7146_write(dev, DEBI_CONFIG, config); saa7146_write(dev, MC2, (2 << 16) | 2); if (count > 4) return count; if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) { printk("%s: wait_for_debi_done #2 failed\n", __func__); return 0; } result = saa7146_read(dev, DEBI_AD); result &= (0xffffffffUL >> ((4 - count) * 8)); return result; } /* av7110 ARM core boot stuff */ #if 0 void av7110_reset_arm(struct av7110 *av7110) { saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTLO); /* Disable DEBI and GPIO irq */ SAA7146_IER_DISABLE(av7110->dev, MASK_19 | MASK_03); SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03); saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTHI); msleep(30); /* the firmware needs some time to initialize */ ARM_ResetMailBox(av7110); SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03); SAA7146_IER_ENABLE(av7110->dev, MASK_03); av7110->arm_ready = 1; dprintk(1, "reset ARM\n"); } #endif /* 0 */ static int waitdebi(struct av7110 *av7110, int adr, int state) { int k; dprintk(4, "%p\n", av7110); for (k = 0; k < 100; k++) { if (irdebi(av7110, DEBINOSWAP, adr, 0, 2) == state) return 0; udelay(5); } return -ETIMEDOUT; } static int load_dram(struct av7110 *av7110, u32 *data, int len) { int i; int blocks, rest; u32 base, bootblock = AV7110_BOOT_BLOCK; dprintk(4, "%p\n", av7110); blocks = len / AV7110_BOOT_MAX_SIZE; rest = len % AV7110_BOOT_MAX_SIZE; base = DRAM_START_CODE; for (i = 0; i < blocks; i++) { if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) { printk(KERN_ERR "dvb-ttpci: load_dram(): timeout at block %d\n", i); return -ETIMEDOUT; } dprintk(4, "writing DRAM block %d\n", i); mwdebi(av7110, DEBISWAB, bootblock, ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE); bootblock ^= 0x1400; iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4); iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, AV7110_BOOT_MAX_SIZE, 2); iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); base += AV7110_BOOT_MAX_SIZE; } if (rest > 0) { if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) { printk(KERN_ERR "dvb-ttpci: load_dram(): timeout at last block\n"); return -ETIMEDOUT; } if (rest > 4) mwdebi(av7110, DEBISWAB, bootblock, ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, rest); else mwdebi(av7110, DEBISWAB, bootblock, ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4); iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4); iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, rest, 2); iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); } if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) { printk(KERN_ERR "dvb-ttpci: load_dram(): timeout after last block\n"); return -ETIMEDOUT; } iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, 0, 2); iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_AV7110_BOOT_COMPLETE) < 0) { printk(KERN_ERR "dvb-ttpci: load_dram(): final handshake timeout\n"); return -ETIMEDOUT; } return 0; } /* we cannot write av7110 DRAM directly, so load a bootloader into * the DPRAM which implements a simple boot protocol */ int av7110_bootarm(struct av7110 *av7110) { const struct firmware *fw; const char *fw_name = "av7110/bootcode.bin"; struct saa7146_dev *dev = av7110->dev; u32 ret; int i; dprintk(4, "%p\n", av7110); av7110->arm_ready = 0; saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO); /* Disable DEBI and GPIO irq */ SAA7146_IER_DISABLE(av7110->dev, MASK_03 | MASK_19); SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03); /* enable DEBI */ saa7146_write(av7110->dev, MC1, 0x08800880); saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000); saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); /* test DEBI */ iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4); /* FIXME: Why does Nexus CA require 2x iwdebi for first init? */ iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4); if ((ret=irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4)) != 0x10325476) { printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: %08x != %08x (check your BIOS 'Plug&Play OS' settings)\n", ret, 0x10325476); return -1; } for (i = 0; i < 8192; i += 4) iwdebi(av7110, DEBISWAP, DPRAM_BASE + i, 0x00, 4); dprintk(2, "debi test OK\n"); /* boot */ dprintk(1, "load boot code\n"); saa7146_setgpio(dev, ARM_IRQ_LINE, SAA7146_GPIO_IRQLO); //saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT); //saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT); ret = request_firmware(&fw, fw_name, &dev->pci->dev); if (ret) { printk(KERN_ERR "dvb-ttpci: Failed to load firmware \"%s\"\n", fw_name); return ret; } mwdebi(av7110, DEBISWAB, DPRAM_BASE, fw->data, fw->size); release_firmware(fw); iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); if (saa7146_wait_for_debi_done(av7110->dev, 1)) { printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out\n"); return -ETIMEDOUT; } saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI); mdelay(1); dprintk(1, "load dram code\n"); if (load_dram(av7110, (u32 *)av7110->bin_root, av7110->size_root) < 0) { printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): load_dram() failed\n"); return -1; } saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO); mdelay(1); dprintk(1, "load dpram code\n"); mwdebi(av7110, DEBISWAB, DPRAM_BASE, av7110->bin_dpram, av7110->size_dpram); if (saa7146_wait_for_debi_done(av7110->dev, 1)) { printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out after loading DRAM\n"); return -ETIMEDOUT; } saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI); msleep(30); /* the firmware needs some time to initialize */ //ARM_ClearIrq(av7110); ARM_ResetMailBox(av7110); SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03); SAA7146_IER_ENABLE(av7110->dev, MASK_03); av7110->arm_errors = 0; av7110->arm_ready = 1; return 0; } MODULE_FIRMWARE("av7110/bootcode.bin"); /**************************************************************************** * DEBI command polling ****************************************************************************/ int av7110_wait_msgstate(struct av7110 *av7110, u16 flags) { unsigned long start; u32 stat; int err; if (FW_VERSION(av7110->arm_app) <= 0x261c) { /* not supported by old firmware */ msleep(50); return 0; } /* new firmware */ start = jiffies; for (;;) { err = time_after(jiffies, start + ARM_WAIT_FREE); if (mutex_lock_interruptible(&av7110->dcomlock)) return -ERESTARTSYS; stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2); mutex_unlock(&av7110->dcomlock); if ((stat & flags) == 0) break; if (err) { printk(KERN_ERR "%s: timeout waiting for MSGSTATE %04x\n", __func__, stat & flags); return -ETIMEDOUT; } msleep(1); } return 0; } static int __av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length) { int i; unsigned long start; char *type = NULL; u16 flags[2] = {0, 0}; u32 stat; int err; // dprintk(4, "%p\n", av7110); if (!av7110->arm_ready) { dprintk(1, "arm not ready.\n"); return -ENXIO; } start = jiffies; while (1) { err = time_after(jiffies, start + ARM_WAIT_FREE); if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0) break; if (err) { printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for COMMAND idle\n", __func__); av7110->arm_errors++; return -ETIMEDOUT; } msleep(1); } if (FW_VERSION(av7110->arm_app) <= 0x261f) wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0xffff, 2); #ifndef _NOHANDSHAKE start = jiffies; while (1) { err = time_after(jiffies, start + ARM_WAIT_SHAKE); if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0) break; if (err) { printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for HANDSHAKE_REG\n", __func__); return -ETIMEDOUT; } msleep(1); } #endif switch ((buf[0] >> 8) & 0xff) { case COMTYPE_PIDFILTER: case COMTYPE_ENCODER: case COMTYPE_REC_PLAY: case COMTYPE_MPEGDECODER: type = "MSG"; flags[0] = GPMQOver; flags[1] = GPMQFull; break; case COMTYPE_OSD: type = "OSD"; flags[0] = OSDQOver; flags[1] = OSDQFull; break; case COMTYPE_MISC: if (FW_VERSION(av7110->arm_app) >= 0x261d) { type = "MSG"; flags[0] = GPMQOver; flags[1] = GPMQBusy; } break; default: break; } if (type != NULL) { /* non-immediate COMMAND type */ start = jiffies; for (;;) { err = time_after(jiffies, start + ARM_WAIT_FREE); stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2); if (stat & flags[0]) { printk(KERN_ERR "%s: %s QUEUE overflow\n", __func__, type); return -1; } if ((stat & flags[1]) == 0) break; if (err) { printk(KERN_ERR "%s: timeout waiting on busy %s QUEUE\n", __func__, type); av7110->arm_errors++; return -ETIMEDOUT; } msleep(1); } } for (i = 2; i < length; i++) wdebi(av7110, DEBINOSWAP, COMMAND + 2 * i, (u32) buf[i], 2); if (length) wdebi(av7110, DEBINOSWAP, COMMAND + 2, (u32) buf[1], 2); else wdebi(av7110, DEBINOSWAP, COMMAND + 2, 0, 2); wdebi(av7110, DEBINOSWAP, COMMAND, (u32) buf[0], 2); if (FW_VERSION(av7110->arm_app) <= 0x261f) wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0x0000, 2); #ifdef COM_DEBUG start = jiffies; while (1) { err = time_after(jiffies, start + ARM_WAIT_FREE); if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0) break; if (err) { printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for COMMAND %d to complete\n", __func__, (buf[0] >> 8) & 0xff); return -ETIMEDOUT; } msleep(1); } stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2); if (stat & GPMQOver) { printk(KERN_ERR "dvb-ttpci: %s(): GPMQOver\n", __func__); return -ENOSPC; } else if (stat & OSDQOver) { printk(KERN_ERR "dvb-ttpci: %s(): OSDQOver\n", __func__); return -ENOSPC; } #endif return 0; } static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length) { int ret; // dprintk(4, "%p\n", av7110); if (!av7110->arm_ready) { dprintk(1, "arm not ready.\n"); return -1; } if (mutex_lock_interruptible(&av7110->dcomlock)) return -ERESTARTSYS; ret = __av7110_send_fw_cmd(av7110, buf, length); mutex_unlock(&av7110->dcomlock); if (ret && ret!=-ERESTARTSYS) printk(KERN_ERR "dvb-ttpci: %s(): av7110_send_fw_cmd error %d\n", __func__, ret); return ret; } int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...) { va_list args; u16 buf[MAX_XFER_SIZE]; int i, ret; // dprintk(4, "%p\n", av7110); if (2 + num > ARRAY_SIZE(buf)) { printk(KERN_WARNING "%s: %s len=%d is too big!\n", KBUILD_MODNAME, __func__, num); return -EINVAL; } buf[0] = ((type << 8) | com); buf[1] = num; if (num) { va_start(args, num); for (i = 0; i < num; i++) buf[i + 2] = va_arg(args, u32); va_end(args); } ret = av7110_send_fw_cmd(av7110, buf, num + 2); if (ret && ret != -ERESTARTSYS) printk(KERN_ERR "dvb-ttpci: av7110_fw_cmd error %d\n", ret); return ret; } #if 0 int av7110_send_ci_cmd(struct av7110 *av7110, u8 subcom, u8 *buf, u8 len) { int i, ret; u16 cmd[18] = { ((COMTYPE_COMMON_IF << 8) + subcom), 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; dprintk(4, "%p\n", av7110); for(i = 0; i < len && i < 32; i++) { if(i % 2 == 0) cmd[(i / 2) + 2] = (u16)(buf[i]) << 8; else cmd[(i / 2) + 2] |= buf[i]; } ret = av7110_send_fw_cmd(av7110, cmd, 18); if (ret && ret != -ERESTARTSYS) printk(KERN_ERR "dvb-ttpci: av7110_send_ci_cmd error %d\n", ret); return ret; } #endif /* 0 */ int av7110_fw_request(struct av7110 *av7110, u16 *request_buf, int request_buf_len, u16 *reply_buf, int reply_buf_len) { int err; s16 i; unsigned long start; #ifdef COM_DEBUG u32 stat; #endif dprintk(4, "%p\n", av7110); if (!av7110->arm_ready) { dprintk(1, "arm not ready.\n"); return -1; } if (mutex_lock_interruptible(&av7110->dcomlock)) return -ERESTARTSYS; if ((err = __av7110_send_fw_cmd(av7110, request_buf, request_buf_len)) < 0) { mutex_unlock(&av7110->dcomlock); printk(KERN_ERR "dvb-ttpci: av7110_fw_request error %d\n", err); return err; } start = jiffies; while (1) { err = time_after(jiffies, start + ARM_WAIT_FREE); if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0) break; if (err) { printk(KERN_ERR "%s: timeout waiting for COMMAND to complete\n", __func__); mutex_unlock(&av7110->dcomlock); return -ETIMEDOUT; } #ifdef _NOHANDSHAKE msleep(1); #endif } #ifndef _NOHANDSHAKE start = jiffies; while (1) { err = time_after(jiffies, start + ARM_WAIT_SHAKE); if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0) break; if (err) { printk(KERN_ERR "%s: timeout waiting for HANDSHAKE_REG\n", __func__); mutex_unlock(&av7110->dcomlock); return -ETIMEDOUT; } msleep(1); } #endif #ifdef COM_DEBUG stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2); if (stat & GPMQOver) { printk(KERN_ERR "%s: GPMQOver\n", __func__); mutex_unlock(&av7110->dcomlock); return -1; } else if (stat & OSDQOver) { printk(KERN_ERR "%s: OSDQOver\n", __func__); mutex_unlock(&av7110->dcomlock); return -1; } #endif for (i = 0; i < reply_buf_len; i++) reply_buf[i] = rdebi(av7110, DEBINOSWAP, COM_BUFF + 2 * i, 0, 2); mutex_unlock(&av7110->dcomlock); return 0; } static int av7110_fw_query(struct av7110 *av7110, u16 tag, u16* buf, s16 length) { int ret; ret = av7110_fw_request(av7110, &tag, 0, buf, length); if (ret) printk(KERN_ERR "dvb-ttpci: av7110_fw_query error %d\n", ret); return ret; } /**************************************************************************** * Firmware commands ****************************************************************************/ /* get version of the firmware ROM, RTSL, video ucode and ARM application */ int av7110_firmversion(struct av7110 *av7110) { u16 buf[20]; u16 tag = ((COMTYPE_REQUEST << 8) + ReqVersion); dprintk(4, "%p\n", av7110); if (av7110_fw_query(av7110, tag, buf, 16)) { printk("dvb-ttpci: failed to boot firmware @ card %d\n", av7110->dvb_adapter.num); return -EIO; } av7110->arm_fw = (buf[0] << 16) + buf[1]; av7110->arm_rtsl = (buf[2] << 16) + buf[3]; av7110->arm_vid = (buf[4] << 16) + buf[5]; av7110->arm_app = (buf[6] << 16) + buf[7]; av7110->avtype = (buf[8] << 16) + buf[9]; printk("dvb-ttpci: info @ card %d: firm %08x, rtsl %08x, vid %08x, app %08x\n", av7110->dvb_adapter.num, av7110->arm_fw, av7110->arm_rtsl, av7110->arm_vid, av7110->arm_app); /* print firmware capabilities */ if (FW_CI_LL_SUPPORT(av7110->arm_app)) printk("dvb-ttpci: firmware @ card %d supports CI link layer interface\n", av7110->dvb_adapter.num); else printk("dvb-ttpci: no firmware support for CI link layer interface @ card %d\n", av7110->dvb_adapter.num); return 0; } int av7110_diseqc_send(struct av7110 *av7110, int len, u8 *msg, unsigned long burst) { int i, ret; u16 buf[18] = { ((COMTYPE_AUDIODAC << 8) + SendDiSEqC), 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; dprintk(4, "%p\n", av7110); if (len > 10) len = 10; buf[1] = len + 2; buf[2] = len; if (burst != -1) buf[3] = burst ? 0x01 : 0x00; else buf[3] = 0xffff; for (i = 0; i < len; i++) buf[i + 4] = msg[i]; ret = av7110_send_fw_cmd(av7110, buf, 18); if (ret && ret!=-ERESTARTSYS) printk(KERN_ERR "dvb-ttpci: av7110_diseqc_send error %d\n", ret); return ret; } #ifdef CONFIG_DVB_AV7110_OSD static inline int SetColorBlend(struct av7110 *av7110, u8 windownr) { return av7110_fw_cmd(av7110, COMTYPE_OSD, SetCBlend, 1, windownr); } static inline int SetBlend_(struct av7110 *av7110, u8 windownr, enum av7110_osd_palette_type colordepth, u16 index, u8 blending) { return av7110_fw_cmd(av7110, COMTYPE_OSD, SetBlend, 4, windownr, colordepth, index, blending); } static inline int SetColor_(struct av7110 *av7110, u8 windownr, enum av7110_osd_palette_type colordepth, u16 index, u16 colorhi, u16 colorlo) { return av7110_fw_cmd(av7110, COMTYPE_OSD, SetColor, 5, windownr, colordepth, index, colorhi, colorlo); } static inline int SetFont(struct av7110 *av7110, u8 windownr, u8 fontsize, u16 colorfg, u16 colorbg) { return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Font, 4, windownr, fontsize, colorfg, colorbg); } static int FlushText(struct av7110 *av7110) { unsigned long start; int err; if (mutex_lock_interruptible(&av7110->dcomlock)) return -ERESTARTSYS; start = jiffies; while (1) { err = time_after(jiffies, start + ARM_WAIT_OSD); if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0) break; if (err) { printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for BUFF1_BASE == 0\n", __func__); mutex_unlock(&av7110->dcomlock); return -ETIMEDOUT; } msleep(1); } mutex_unlock(&av7110->dcomlock); return 0; } static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, char *buf) { int i, ret; unsigned long start; int length = strlen(buf) + 1; u16 cbuf[5] = { (COMTYPE_OSD << 8) + DText, 3, win, x, y }; if (mutex_lock_interruptible(&av7110->dcomlock)) return -ERESTARTSYS; start = jiffies; while (1) { ret = time_after(jiffies, start + ARM_WAIT_OSD); if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0) break; if (ret) { printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for BUFF1_BASE == 0\n", __func__); mutex_unlock(&av7110->dcomlock); return -ETIMEDOUT; } msleep(1); } #ifndef _NOHANDSHAKE start = jiffies; while (1) { ret = time_after(jiffies, start + ARM_WAIT_SHAKE); if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0) break; if (ret) { printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for HANDSHAKE_REG\n", __func__); mutex_unlock(&av7110->dcomlock); return -ETIMEDOUT; } msleep(1); } #endif for (i = 0; i < length / 2; i++) wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2, swab16(*(u16 *)(buf + 2 * i)), 2); if (length & 1) wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2, 0, 2); ret = __av7110_send_fw_cmd(av7110, cbuf, 5); mutex_unlock(&av7110->dcomlock); if (ret && ret!=-ERESTARTSYS) printk(KERN_ERR "dvb-ttpci: WriteText error %d\n", ret); return ret; } static inline int DrawLine(struct av7110 *av7110, u8 windownr, u16 x, u16 y, u16 dx, u16 dy, u16 color) { return av7110_fw_cmd(av7110, COMTYPE_OSD, DLine, 6, windownr, x, y, dx, dy, color); } static inline int DrawBlock(struct av7110 *av7110, u8 windownr, u16 x, u16 y, u16 dx, u16 dy, u16 color) { return av7110_fw_cmd(av7110, COMTYPE_OSD, DBox, 6, windownr, x, y, dx, dy, color); } static inline int HideWindow(struct av7110 *av7110, u8 windownr) { return av7110_fw_cmd(av7110, COMTYPE_OSD, WHide, 1, windownr); } static inline int MoveWindowRel(struct av7110 *av7110, u8 windownr, u16 x, u16 y) { return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveD, 3, windownr, x, y); } static inline int MoveWindowAbs(struct av7110 *av7110, u8 windownr, u16 x, u16 y) { return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveA, 3, windownr, x, y); } static inline int DestroyOSDWindow(struct av7110 *av7110, u8 windownr) { return av7110_fw_cmd(av7110, COMTYPE_OSD, WDestroy, 1, windownr); } static inline int CreateOSDWindow(struct av7110 *av7110, u8 windownr, osd_raw_window_t disptype, u16 width, u16 height) { return av7110_fw_cmd(av7110, COMTYPE_OSD, WCreate, 4, windownr, disptype, width, height); } static enum av7110_osd_palette_type bpp2pal[8] = { Pal1Bit, Pal2Bit, 0, Pal4Bit, 0, 0, 0, Pal8Bit }; static osd_raw_window_t bpp2bit[8] = { OSD_BITMAP1, OSD_BITMAP2, 0, OSD_BITMAP4, 0, 0, 0, OSD_BITMAP8 }; static inline int WaitUntilBmpLoaded(struct av7110 *av7110) { int ret = wait_event_timeout(av7110->bmpq, av7110->bmp_state != BMP_LOADING, 10*HZ); if (ret == 0) { printk("dvb-ttpci: warning: timeout waiting in LoadBitmap: %d, %d\n", ret, av7110->bmp_state); av7110->bmp_state = BMP_NONE; return -ETIMEDOUT; } return 0; } static inline int LoadBitmap(struct av7110 *av7110, u16 dx, u16 dy, int inc, u8 __user * data) { u16 format; int bpp; int i; int d, delta; u8 c; int ret; dprintk(4, "%p\n", av7110); format = bpp2bit[av7110->osdbpp[av7110->osdwin]]; av7110->bmp_state = BMP_LOADING; if (format == OSD_BITMAP8) { bpp=8; delta = 1; } else if (format == OSD_BITMAP4) { bpp=4; delta = 2; } else if (format == OSD_BITMAP2) { bpp=2; delta = 4; } else if (format == OSD_BITMAP1) { bpp=1; delta = 8; } else { av7110->bmp_state = BMP_NONE; return -EINVAL; } av7110->bmplen = ((dx * dy * bpp + 7) & ~7) / 8; av7110->bmpp = 0; if (av7110->bmplen > 32768) { av7110->bmp_state = BMP_NONE; return -EINVAL; } for (i = 0; i < dy; i++) { if (copy_from_user(av7110->bmpbuf + 1024 + i * dx, data + i * inc, dx)) { av7110->bmp_state = BMP_NONE; return -EINVAL; } } if (format != OSD_BITMAP8) { for (i = 0; i < dx * dy / delta; i++) { c = ((u8 *)av7110->bmpbuf)[1024 + i * delta + delta - 1]; for (d = delta - 2; d >= 0; d--) { c |= (((u8 *)av7110->bmpbuf)[1024 + i * delta + d] << ((delta - d - 1) * bpp)); ((u8 *)av7110->bmpbuf)[1024 + i] = c; } } } av7110->bmplen += 1024; dprintk(4, "av7110_fw_cmd: LoadBmp size %d\n", av7110->bmplen); ret = av7110_fw_cmd(av7110, COMTYPE_OSD, LoadBmp, 3, format, dx, dy); if (!ret) ret = WaitUntilBmpLoaded(av7110); return ret; } static int BlitBitmap(struct av7110 *av7110, u16 x, u16 y) { dprintk(4, "%p\n", av7110); return av7110_fw_cmd(av7110, COMTYPE_OSD, BlitBmp, 4, av7110->osdwin, x, y, 0); } static inline int ReleaseBitmap(struct av7110 *av7110) { dprintk(4, "%p\n", av7110); if (av7110->bmp_state != BMP_LOADED && FW_VERSION(av7110->arm_app) < 0x261e) return -1; if (av7110->bmp_state == BMP_LOADING) dprintk(1,"ReleaseBitmap called while BMP_LOADING\n"); av7110->bmp_state = BMP_NONE; return av7110_fw_cmd(av7110, COMTYPE_OSD, ReleaseBmp, 0); } static u32 RGB2YUV(u16 R, u16 G, u16 B) { u16 y, u, v; u16 Y, Cr, Cb; y = R * 77 + G * 150 + B * 29; /* Luma=0.299R+0.587G+0.114B 0..65535 */ u = 2048 + B * 8 -(y >> 5); /* Cr 0..4095 */ v = 2048 + R * 8 -(y >> 5); /* Cb 0..4095 */ Y = y / 256; Cb = u / 16; Cr = v / 16; return Cr | (Cb << 16) | (Y << 8); } static int OSDSetColor(struct av7110 *av7110, u8 color, u8 r, u8 g, u8 b, u8 blend) { int ret; u16 ch, cl; u32 yuv; yuv = blend ? RGB2YUV(r,g,b) : 0; cl = (yuv & 0xffff); ch = ((yuv >> 16) & 0xffff); ret = SetColor_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]], color, ch, cl); if (!ret) ret = SetBlend_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]], color, ((blend >> 4) & 0x0f)); return ret; } static int OSDSetPalette(struct av7110 *av7110, u32 __user * colors, u8 first, u8 last) { int i; int length = last - first + 1; if (length * 4 > DATA_BUFF3_SIZE) return -EINVAL; for (i = 0; i < length; i++) { u32 color, blend, yuv; if (get_user(color, colors + i)) return -EFAULT; blend = (color & 0xF0000000) >> 4; yuv = blend ? RGB2YUV(color & 0xFF, (color >> 8) & 0xFF, (color >> 16) & 0xFF) | blend : 0; yuv = ((yuv & 0xFFFF0000) >> 16) | ((yuv & 0x0000FFFF) << 16); wdebi(av7110, DEBINOSWAP, DATA_BUFF3_BASE + i * 4, yuv, 4); } return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Palette, 4, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]], first, last); } static int OSDSetBlock(struct av7110 *av7110, int x0, int y0, int x1, int y1, int inc, u8 __user * data) { uint w, h, bpp, bpl, size, lpb, bnum, brest; int i; int rc,release_rc; w = x1 - x0 + 1; h = y1 - y0 + 1; if (inc <= 0) inc = w; if (w <= 0 || w > 720 || h <= 0 || h > 576) return -EINVAL; bpp = av7110->osdbpp[av7110->osdwin] + 1; bpl = ((w * bpp + 7) & ~7) / 8; size = h * bpl; lpb = (32 * 1024) / bpl; bnum = size / (lpb * bpl); brest = size - bnum * lpb * bpl; if (av7110->bmp_state == BMP_LOADING) { /* possible if syscall is repeated by -ERESTARTSYS and if firmware cannot abort */ BUG_ON (FW_VERSION(av7110->arm_app) >= 0x261e); rc = WaitUntilBmpLoaded(av7110); if (rc) return rc; /* just continue. This should work for all fw versions * if bnum==1 && !brest && LoadBitmap was successful */ } rc = 0; for (i = 0; i < bnum; i++) { rc = LoadBitmap(av7110, w, lpb, inc, data); if (rc) break; rc = BlitBitmap(av7110, x0, y0 + i * lpb); if (rc) break; data += lpb * inc; } if (!rc && brest) { rc = LoadBitmap(av7110, w, brest / bpl, inc, data); if (!rc) rc = BlitBitmap(av7110, x0, y0 + bnum * lpb); } release_rc = ReleaseBitmap(av7110); if (!rc) rc = release_rc; if (rc) dprintk(1,"returns %d\n",rc); return rc; } int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc) { int ret; if (mutex_lock_interruptible(&av7110->osd_mutex)) return -ERESTARTSYS; switch (dc->cmd) { case OSD_Close: ret = DestroyOSDWindow(av7110, av7110->osdwin); break; case OSD_Open: av7110->osdbpp[av7110->osdwin] = (dc->color - 1) & 7; ret = CreateOSDWindow(av7110, av7110->osdwin, bpp2bit[av7110->osdbpp[av7110->osdwin]], dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1); if (ret) break; if (!dc->data) { ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0); if (ret) break; ret = SetColorBlend(av7110, av7110->osdwin); } break; case OSD_Show: ret = MoveWindowRel(av7110, av7110->osdwin, 0, 0); break; case OSD_Hide: ret = HideWindow(av7110, av7110->osdwin); break; case OSD_Clear: ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, 0); break; case OSD_Fill: ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, dc->color); break; case OSD_SetColor: ret = OSDSetColor(av7110, dc->color, dc->x0, dc->y0, dc->x1, dc->y1); break; case OSD_SetPalette: if (FW_VERSION(av7110->arm_app) >= 0x2618) ret = OSDSetPalette(av7110, dc->data, dc->color, dc->x0); else { int i, len = dc->x0-dc->color+1; u8 __user *colors = (u8 __user *)dc->data; u8 r, g = 0, b = 0, blend = 0; ret = 0; for (i = 0; i<len; i++) { if (get_user(r, colors + i * 4) || get_user(g, colors + i * 4 + 1) || get_user(b, colors + i * 4 + 2) || get_user(blend, colors + i * 4 + 3)) { ret = -EFAULT; break; } ret = OSDSetColor(av7110, dc->color + i, r, g, b, blend); if (ret) break; } } break; case OSD_SetPixel: ret = DrawLine(av7110, av7110->osdwin, dc->x0, dc->y0, 0, 0, dc->color); break; case OSD_SetRow: dc->y1 = dc->y0; /* fall through */ case OSD_SetBlock: ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data); break; case OSD_FillRow: ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0, dc->x1-dc->x0+1, dc->y1, dc->color); break; case OSD_FillBlock: ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0, dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1, dc->color); break; case OSD_Line: ret = DrawLine(av7110, av7110->osdwin, dc->x0, dc->y0, dc->x1 - dc->x0, dc->y1 - dc->y0, dc->color); break; case OSD_Text: { char textbuf[240]; if (strncpy_from_user(textbuf, dc->data, 240) < 0) { ret = -EFAULT; break; } textbuf[239] = 0; if (dc->x1 > 3) dc->x1 = 3; ret = SetFont(av7110, av7110->osdwin, dc->x1, (u16) (dc->color & 0xffff), (u16) (dc->color >> 16)); if (!ret) ret = FlushText(av7110); if (!ret) ret = WriteText(av7110, av7110->osdwin, dc->x0, dc->y0, textbuf); break; } case OSD_SetWindow: if (dc->x0 < 1 || dc->x0 > 7) ret = -EINVAL; else { av7110->osdwin = dc->x0; ret = 0; } break; case OSD_MoveWindow: ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0); if (!ret) ret = SetColorBlend(av7110, av7110->osdwin); break; case OSD_OpenRaw: if (dc->color < OSD_BITMAP1 || dc->color > OSD_CURSOR) { ret = -EINVAL; break; } if (dc->color >= OSD_BITMAP1 && dc->color <= OSD_BITMAP8HR) av7110->osdbpp[av7110->osdwin] = (1 << (dc->color & 3)) - 1; else av7110->osdbpp[av7110->osdwin] = 0; ret = CreateOSDWindow(av7110, av7110->osdwin, (osd_raw_window_t)dc->color, dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1); if (ret) break; if (!dc->data) { ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0); if (!ret) ret = SetColorBlend(av7110, av7110->osdwin); } break; default: ret = -EINVAL; break; } mutex_unlock(&av7110->osd_mutex); if (ret==-ERESTARTSYS) dprintk(1, "av7110_osd_cmd(%d) returns with -ERESTARTSYS\n",dc->cmd); else if (ret) dprintk(1, "av7110_osd_cmd(%d) returns with %d\n",dc->cmd,ret); return ret; } int av7110_osd_capability(struct av7110 *av7110, osd_cap_t *cap) { switch (cap->cmd) { case OSD_CAP_MEMSIZE: if (FW_4M_SDRAM(av7110->arm_app)) cap->val = 1000000; else cap->val = 92000; return 0; default: return -EINVAL; } } #endif /* CONFIG_DVB_AV7110_OSD */
gpl-2.0
truefitness/vlc
compat/tdestroy.c
113
6268
/***************************************************************************** * tdestroy.c : implement every t* fuctions *****************************************************************************/ #ifdef HAVE_CONFIG_H # include <config.h> #endif /** search.h is not present so every t* functions has to be implemented */ #ifndef HAVE_SEARCH_H #include <assert.h> #include <stdlib.h> typedef struct node { char *key; struct node *llink, *rlink; } node_t; /* $NetBSD: tdelete.c,v 1.4 2006/03/19 01:12:08 christos Exp $ */ /* * Tree search generalized from Knuth (6.2.2) Algorithm T just like * the AT&T man page says. * * The node_t structure is for internal use only, lint doesn't grok it. * * Written by reading the System V Interface Definition, not the code. * * Totally public domain. */ /* delete node with given key */ void * tdelete(vkey, vrootp, compar) const void *vkey; /* key to be deleted */ void **vrootp; /* address of the root of tree */ int (*compar) (const void *, const void *); { node_t **rootp = (node_t **)vrootp; node_t *p, *q, *r; int cmp; assert(vkey != NULL); assert(compar != NULL); if (rootp == NULL || (p = *rootp) == NULL) return NULL; while ((cmp = (*compar)(vkey, (*rootp)->key)) != 0) { p = *rootp; rootp = (cmp < 0) ? &(*rootp)->llink : /* follow llink branch */ &(*rootp)->rlink; /* follow rlink branch */ if (*rootp == NULL) return NULL; /* key not found */ } r = (*rootp)->rlink; /* D1: */ if ((q = (*rootp)->llink) == NULL) /* Left NULL? */ q = r; else if (r != NULL) { /* Right link is NULL? */ if (r->llink == NULL) { /* D2: Find successor */ r->llink = q; q = r; } else { /* D3: Find NULL link */ for (q = r->llink; q->llink != NULL; q = r->llink) r = q; r->llink = q->rlink; q->llink = (*rootp)->llink; q->rlink = (*rootp)->rlink; } } if (p != *rootp) free(*rootp); /* D4: Free node */ *rootp = q; /* link parent to new node */ return p; } /* $NetBSD: tdestroy.c,v 1.2 1999/09/16 11:45:37 lukem Exp $ */ /* * Tree search generalized from Knuth (6.2.2) Algorithm T just like * the AT&T man page says. * * The node_t structure is for internal use only, lint doesn't grok it. * * Written by reading the System V Interface Definition, not the code. * * Totally public domain. */ /* Walk the nodes of a tree */ static void tdestroy_recurse(node_t* root, void (*free_action)(void *)) { if (root->llink != NULL) tdestroy_recurse(root->llink, free_action); if (root->rlink != NULL) tdestroy_recurse(root->rlink, free_action); (*free_action) ((void *) root->key); free(root); } void tdestroy(vrootp, freefct) void *vrootp; void (*freefct)(void *); { node_t *root = (node_t *) vrootp; if (root != NULL) tdestroy_recurse(root, freefct); } /* $NetBSD: tfind.c,v 1.5 2005/03/23 08:16:53 kleink Exp $ */ /* * Tree search generalized from Knuth (6.2.2) Algorithm T just like * the AT&T man page says. * * The node_t structure is for internal use only, lint doesn't grok it. * * Written by reading the System V Interface Definition, not the code. * * Totally public domain. */ /* find a node, or return 0 */ void * tfind(vkey, vrootp, compar) const void *vkey; /* key to be found */ const void **vrootp; /* address of the tree root */ int (*compar) (const void *, const void *); { node_t * const *rootp = (node_t * const*)vrootp; assert(vkey != NULL); assert(compar != NULL); if (rootp == NULL) return NULL; while (*rootp != NULL) { /* T1: */ int r; if ((r = (*compar)(vkey, (*rootp)->key)) == 0) /* T2: */ return *rootp; /* key found */ rootp = (r < 0) ? &(*rootp)->llink : /* T3: follow left branch */ &(*rootp)->rlink; /* T4: follow right branch */ } return NULL; } /* $NetBSD: tsearch.c,v 1.5 2005/11/29 03:12:00 christos Exp $ */ /* * Tree search generalized from Knuth (6.2.2) Algorithm T just like * the AT&T man page says. * * The node_t structure is for internal use only, lint doesn't grok it. * * Written by reading the System V Interface Definition, not the code. * * Totally public domain. */ /* find or insert datum into search tree */ void * tsearch(vkey, vrootp, compar) const void *vkey; /* key to be located */ void **vrootp; /* address of tree root */ int (*compar) (const void *, const void *); { node_t *q; node_t **rootp = (node_t **)vrootp; assert(vkey != NULL); assert(compar != NULL); if (rootp == NULL) return NULL; while (*rootp != NULL) { /* Knuth's T1: */ int r; if ((r = (*compar)(vkey, (*rootp)->key)) == 0) /* T2: */ return *rootp; /* we found it! */ rootp = (r < 0) ? &(*rootp)->llink : /* T3: follow left branch */ &(*rootp)->rlink; /* T4: follow right branch */ } q = malloc(sizeof(node_t)); /* T5: key not found */ if (q != 0) { /* make new node */ *rootp = q; /* link new node to old */ q->key = (void*)vkey; /* initialize new node */ q->llink = q->rlink = NULL; } return q; } /* $NetBSD: twalk.c,v 1.2 1999/09/16 11:45:37 lukem Exp $ */ /* * Tree search generalized from Knuth (6.2.2) Algorithm T just like * the AT&T man page says. * * The node_t structure is for internal use only, lint doesn't grok it. * * Written by reading the System V Interface Definition, not the code. * * Totally public domain. */ /* Walk the nodes of a tree */ static void twalk_recurse(root, action, level) const node_t *root; /* Root of the tree to be walked */ void (*action) (const void *, VISIT, int); int level; { assert(root != NULL); assert(action != NULL); if (root->llink == NULL && root->rlink == NULL) (*action)(root, leaf, level); else { (*action)(root, preorder, level); if (root->llink != NULL) twalk_recurse(root->llink, action, level + 1); (*action)(root, postorder, level); if (root->rlink != NULL) twalk_recurse(root->rlink, action, level + 1); (*action)(root, endorder, level); } } /* Walk the nodes of a tree */ void twalk(vroot, action) const void *vroot; /* Root of the tree to be walked */ void (*action) (const void *, VISIT, int); { if (vroot != NULL && action != NULL) twalk_recurse(vroot, action, 0); } #endif // HAVE_SEARCH_H
gpl-2.0
dhcstruggle/jz2440-kernel
arch/i386/math-emu/fpu_aux.c
113
4342
/*---------------------------------------------------------------------------+ | fpu_aux.c | | | | Code to implement some of the FPU auxiliary instructions. | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "fpu_system.h" #include "exception.h" #include "fpu_emu.h" #include "status_w.h" #include "control_w.h" static void fnop(void) { } static void fclex(void) { partial_status &= ~(SW_Backward|SW_Summary|SW_Stack_Fault|SW_Precision| SW_Underflow|SW_Overflow|SW_Zero_Div|SW_Denorm_Op| SW_Invalid); no_ip_update = 1; } /* Needs to be externally visible */ void finit(void) { control_word = 0x037f; partial_status = 0; top = 0; /* We don't keep top in the status word internally. */ fpu_tag_word = 0xffff; /* The behaviour is different from that detailed in Section 15.1.6 of the Intel manual */ operand_address.offset = 0; operand_address.selector = 0; instruction_address.offset = 0; instruction_address.selector = 0; instruction_address.opcode = 0; no_ip_update = 1; } /* * These are nops on the i387.. */ #define feni fnop #define fdisi fnop #define fsetpm fnop static FUNC const finit_table[] = { feni, fdisi, fclex, finit, fsetpm, FPU_illegal, FPU_illegal, FPU_illegal }; void finit_(void) { (finit_table[FPU_rm])(); } static void fstsw_ax(void) { *(short *) &FPU_EAX = status_word(); no_ip_update = 1; } static FUNC const fstsw_table[] = { fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal }; void fstsw_(void) { (fstsw_table[FPU_rm])(); } static FUNC const fp_nop_table[] = { fnop, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal }; void fp_nop(void) { (fp_nop_table[FPU_rm])(); } void fld_i_(void) { FPU_REG *st_new_ptr; int i; u_char tag; if ( STACK_OVERFLOW ) { FPU_stack_overflow(); return; } /* fld st(i) */ i = FPU_rm; if ( NOT_EMPTY(i) ) { reg_copy(&st(i), st_new_ptr); tag = FPU_gettagi(i); push(); FPU_settag0(tag); } else { if ( control_word & CW_Invalid ) { /* The masked response */ FPU_stack_underflow(); } else EXCEPTION(EX_StackUnder); } } void fxch_i(void) { /* fxch st(i) */ FPU_REG t; int i = FPU_rm; FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i); long tag_word = fpu_tag_word; int regnr = top & 7, regnri = ((regnr + i) & 7); u_char st0_tag = (tag_word >> (regnr*2)) & 3; u_char sti_tag = (tag_word >> (regnri*2)) & 3; if ( st0_tag == TAG_Empty ) { if ( sti_tag == TAG_Empty ) { FPU_stack_underflow(); FPU_stack_underflow_i(i); return; } if ( control_word & CW_Invalid ) { /* Masked response */ FPU_copy_to_reg0(sti_ptr, sti_tag); } FPU_stack_underflow_i(i); return; } if ( sti_tag == TAG_Empty ) { if ( control_word & CW_Invalid ) { /* Masked response */ FPU_copy_to_regi(st0_ptr, st0_tag, i); } FPU_stack_underflow(); return; } clear_C1(); reg_copy(st0_ptr, &t); reg_copy(sti_ptr, st0_ptr); reg_copy(&t, sti_ptr); tag_word &= ~(3 << (regnr*2)) & ~(3 << (regnri*2)); tag_word |= (sti_tag << (regnr*2)) | (st0_tag << (regnri*2)); fpu_tag_word = tag_word; } void ffree_(void) { /* ffree st(i) */ FPU_settagi(FPU_rm, TAG_Empty); } void ffreep(void) { /* ffree st(i) + pop - unofficial code */ FPU_settagi(FPU_rm, TAG_Empty); FPU_pop(); } void fst_i_(void) { /* fst st(i) */ FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm); } void fstp_i(void) { /* fstp st(i) */ FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm); FPU_pop(); }
gpl-2.0
val2k/linux
net/netfilter/xt_state.c
113
1999
/* Kernel module to match connection tracking information. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_state.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module"); MODULE_ALIAS("ipt_state"); MODULE_ALIAS("ip6t_state"); static bool state_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_state_info *sinfo = par->matchinfo; enum ip_conntrack_info ctinfo; unsigned int statebit; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (ct) statebit = XT_STATE_BIT(ctinfo); else if (ctinfo == IP_CT_UNTRACKED) statebit = XT_STATE_UNTRACKED; else statebit = XT_STATE_INVALID; return (sinfo->statemask & statebit); } static int state_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_netns_get(par->net, par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void state_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_netns_put(par->net, par->family); } static struct xt_match state_mt_reg __read_mostly = { .name = "state", .family = NFPROTO_UNSPEC, .checkentry = state_mt_check, .match = state_mt, .destroy = state_mt_destroy, .matchsize = sizeof(struct xt_state_info), .me = THIS_MODULE, }; static int __init state_mt_init(void) { return xt_register_match(&state_mt_reg); } static void __exit state_mt_exit(void) { xt_unregister_match(&state_mt_reg); } module_init(state_mt_init); module_exit(state_mt_exit);
gpl-2.0
ff94315/AA
target/linux/ramips/files/arch/mips/ralink/rt305x/setup.c
113
2077
/* * Ralink RT305x SoC specific setup * * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> * * Parts of this file are based on Ralink's 2.6.21 BSP * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/err.h> #include <linux/clk.h> #include <asm/mips_machine.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/mach-ralink/common.h> #include <asm/mach-ralink/rt305x.h> #include <asm/mach-ralink/rt305x_regs.h> #include "common.h" static void rt305x_restart(char *command) { rt305x_sysc_wr(RT305X_RESET_SYSTEM, SYSC_REG_RESET_CTRL); while (1) if (cpu_wait) cpu_wait(); } static void rt305x_halt(void) { while (1) if (cpu_wait) cpu_wait(); } unsigned int __cpuinit get_c0_compare_irq(void) { return CP0_LEGACY_COMPARE_IRQ; } void __init ramips_soc_setup(void) { struct clk *clk; rt305x_sysc_base = ioremap_nocache(RT305X_SYSC_BASE, PAGE_SIZE); rt305x_memc_base = ioremap_nocache(RT305X_MEMC_BASE, PAGE_SIZE); rt305x_clocks_init(); clk = clk_get(NULL, "cpu"); if (IS_ERR(clk)) panic("unable to get CPU clock, err=%ld", PTR_ERR(clk)); printk(KERN_INFO "%s running at %lu.%02lu MHz\n", ramips_sys_type, clk_get_rate(clk) / 1000000, (clk_get_rate(clk) % 1000000) * 100 / 1000000); _machine_restart = rt305x_restart; _machine_halt = rt305x_halt; pm_power_off = rt305x_halt; clk = clk_get(NULL, "uart"); if (IS_ERR(clk)) panic("unable to get UART clock, err=%ld", PTR_ERR(clk)); ramips_early_serial_setup(0, RT305X_UART0_BASE, clk_get_rate(clk), RT305X_INTC_IRQ_UART0); ramips_early_serial_setup(1, RT305X_UART1_BASE, clk_get_rate(clk), RT305X_INTC_IRQ_UART1); } void __init plat_time_init(void) { struct clk *clk; clk = clk_get(NULL, "cpu"); if (IS_ERR(clk)) panic("unable to get CPU clock, err=%ld", PTR_ERR(clk)); mips_hpt_frequency = clk_get_rate(clk) / 2; }
gpl-2.0
ktd2004/linux-stable
arch/x86/kernel/kgdb.c
881
21355
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * */ /* * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com> * Copyright (C) 2000-2001 VERITAS Software Corporation. * Copyright (C) 2002 Andi Kleen, SuSE Labs * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd. * Copyright (C) 2007 MontaVista Software, Inc. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc. */ /**************************************************************************** * Contributor: Lake Stevens Instrument Division$ * Written by: Glenn Engel $ * Updated by: Amit Kale<akale@veritas.com> * Updated by: Tom Rini <trini@kernel.crashing.org> * Updated by: Jason Wessel <jason.wessel@windriver.com> * Modified for 386 by Jim Kingdon, Cygnus Support. * Origianl kgdb, compatibility with 2.1.xx kernel by * David Grothe <dave@gcom.com> * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com> * X86_64 changes from Andi Kleen's patch merged by Jim Houston */ #include <linux/spinlock.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/kgdb.h> #include <linux/smp.h> #include <linux/nmi.h> #include <linux/hw_breakpoint.h> #include <linux/uaccess.h> #include <linux/memory.h> #include <asm/debugreg.h> #include <asm/apicdef.h> #include <asm/apic.h> #include <asm/nmi.h> struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { #ifdef CONFIG_X86_32 { "ax", 4, offsetof(struct pt_regs, ax) }, { "cx", 4, offsetof(struct pt_regs, cx) }, { "dx", 4, offsetof(struct pt_regs, dx) }, { "bx", 4, offsetof(struct pt_regs, bx) }, { "sp", 4, offsetof(struct pt_regs, sp) }, { "bp", 4, offsetof(struct pt_regs, bp) }, { "si", 4, offsetof(struct pt_regs, si) }, { "di", 4, offsetof(struct pt_regs, di) }, { "ip", 4, offsetof(struct pt_regs, ip) }, { "flags", 4, offsetof(struct pt_regs, flags) }, { "cs", 4, offsetof(struct pt_regs, cs) }, { "ss", 4, offsetof(struct pt_regs, ss) }, { "ds", 4, offsetof(struct pt_regs, ds) }, { "es", 4, offsetof(struct pt_regs, es) }, #else { "ax", 8, offsetof(struct pt_regs, ax) }, { "bx", 8, offsetof(struct pt_regs, bx) }, { "cx", 8, offsetof(struct pt_regs, cx) }, { "dx", 8, offsetof(struct pt_regs, dx) }, { "si", 8, offsetof(struct pt_regs, dx) }, { "di", 8, offsetof(struct pt_regs, di) }, { "bp", 8, offsetof(struct pt_regs, bp) }, { "sp", 8, offsetof(struct pt_regs, sp) }, { "r8", 8, offsetof(struct pt_regs, r8) }, { "r9", 8, offsetof(struct pt_regs, r9) }, { "r10", 8, offsetof(struct pt_regs, r10) }, { "r11", 8, offsetof(struct pt_regs, r11) }, { "r12", 8, offsetof(struct pt_regs, r12) }, { "r13", 8, offsetof(struct pt_regs, r13) }, { "r14", 8, offsetof(struct pt_regs, r14) }, { "r15", 8, offsetof(struct pt_regs, r15) }, { "ip", 8, offsetof(struct pt_regs, ip) }, { "flags", 4, offsetof(struct pt_regs, flags) }, { "cs", 4, offsetof(struct pt_regs, cs) }, { "ss", 4, offsetof(struct pt_regs, ss) }, { "ds", 4, -1 }, { "es", 4, -1 }, #endif { "fs", 4, -1 }, { "gs", 4, -1 }, }; int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if ( #ifdef CONFIG_X86_32 regno == GDB_SS || regno == GDB_FS || regno == GDB_GS || #endif regno == GDB_SP || regno == GDB_ORIG_AX) return 0; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno == GDB_ORIG_AX) { memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax)); return "orig_ax"; } if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].offset != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); #ifdef CONFIG_X86_32 switch (regno) { case GDB_SS: if (!user_mode_vm(regs)) *(unsigned long *)mem = __KERNEL_DS; break; case GDB_SP: if (!user_mode_vm(regs)) *(unsigned long *)mem = kernel_stack_pointer(regs); break; case GDB_GS: case GDB_FS: *(unsigned long *)mem = 0xFFFF; break; } #endif return dbg_reg_def[regno].name; } /** * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs * @gdb_regs: A pointer to hold the registers in the order GDB wants. * @p: The &struct task_struct of the desired process. * * Convert the register values of the sleeping process in @p to * the format that GDB expects. * This function is called when kgdb does not have access to the * &struct pt_regs and therefore it should fill the gdb registers * @gdb_regs with what has been saved in &struct thread_struct * thread field during switch_to. */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { #ifndef CONFIG_X86_32 u32 *gdb_regs32 = (u32 *)gdb_regs; #endif gdb_regs[GDB_AX] = 0; gdb_regs[GDB_BX] = 0; gdb_regs[GDB_CX] = 0; gdb_regs[GDB_DX] = 0; gdb_regs[GDB_SI] = 0; gdb_regs[GDB_DI] = 0; gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp; #ifdef CONFIG_X86_32 gdb_regs[GDB_DS] = __KERNEL_DS; gdb_regs[GDB_ES] = __KERNEL_DS; gdb_regs[GDB_PS] = 0; gdb_regs[GDB_CS] = __KERNEL_CS; gdb_regs[GDB_PC] = p->thread.ip; gdb_regs[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_FS] = 0xFFFF; gdb_regs[GDB_GS] = 0xFFFF; #else gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); gdb_regs32[GDB_CS] = __KERNEL_CS; gdb_regs32[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_PC] = 0; gdb_regs[GDB_R8] = 0; gdb_regs[GDB_R9] = 0; gdb_regs[GDB_R10] = 0; gdb_regs[GDB_R11] = 0; gdb_regs[GDB_R12] = 0; gdb_regs[GDB_R13] = 0; gdb_regs[GDB_R14] = 0; gdb_regs[GDB_R15] = 0; #endif gdb_regs[GDB_SP] = p->thread.sp; } static struct hw_breakpoint { unsigned enabled; unsigned long addr; int len; int type; struct perf_event * __percpu *pev; } breakinfo[HBP_NUM]; static unsigned long early_dr7; static void kgdb_correct_hw_break(void) { int breakno; for (breakno = 0; breakno < HBP_NUM; breakno++) { struct perf_event *bp; struct arch_hw_breakpoint *info; int val; int cpu = raw_smp_processor_id(); if (!breakinfo[breakno].enabled) continue; if (dbg_is_early) { set_debugreg(breakinfo[breakno].addr, breakno); early_dr7 |= encode_dr7(breakno, breakinfo[breakno].len, breakinfo[breakno].type); set_debugreg(early_dr7, 7); continue; } bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); info = counter_arch_bp(bp); if (bp->attr.disabled != 1) continue; bp->attr.bp_addr = breakinfo[breakno].addr; bp->attr.bp_len = breakinfo[breakno].len; bp->attr.bp_type = breakinfo[breakno].type; info->address = breakinfo[breakno].addr; info->len = breakinfo[breakno].len; info->type = breakinfo[breakno].type; val = arch_install_hw_breakpoint(bp); if (!val) bp->attr.disabled = 0; } if (!dbg_is_early) hw_breakpoint_restore(); } static int hw_break_reserve_slot(int breakno) { int cpu; int cnt = 0; struct perf_event **pevent; if (dbg_is_early) return 0; for_each_online_cpu(cpu) { cnt++; pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); if (dbg_reserve_bp_slot(*pevent)) goto fail; } return 0; fail: for_each_online_cpu(cpu) { cnt--; if (!cnt) break; pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); dbg_release_bp_slot(*pevent); } return -1; } static int hw_break_release_slot(int breakno) { struct perf_event **pevent; int cpu; if (dbg_is_early) return 0; for_each_online_cpu(cpu) { pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); if (dbg_release_bp_slot(*pevent)) /* * The debugger is responsible for handing the retry on * remove failure. */ return -1; } return 0; } static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) { int i; for (i = 0; i < HBP_NUM; i++) if (breakinfo[i].addr == addr && breakinfo[i].enabled) break; if (i == HBP_NUM) return -1; if (hw_break_release_slot(i)) { printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); return -1; } breakinfo[i].enabled = 0; return 0; } static void kgdb_remove_all_hw_break(void) { int i; int cpu = raw_smp_processor_id(); struct perf_event *bp; for (i = 0; i < HBP_NUM; i++) { if (!breakinfo[i].enabled) continue; bp = *per_cpu_ptr(breakinfo[i].pev, cpu); if (!bp->attr.disabled) { arch_uninstall_hw_breakpoint(bp); bp->attr.disabled = 1; continue; } if (dbg_is_early) early_dr7 &= ~encode_dr7(i, breakinfo[i].len, breakinfo[i].type); else if (hw_break_release_slot(i)) printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr); breakinfo[i].enabled = 0; } } static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) { int i; for (i = 0; i < HBP_NUM; i++) if (!breakinfo[i].enabled) break; if (i == HBP_NUM) return -1; switch (bptype) { case BP_HARDWARE_BREAKPOINT: len = 1; breakinfo[i].type = X86_BREAKPOINT_EXECUTE; break; case BP_WRITE_WATCHPOINT: breakinfo[i].type = X86_BREAKPOINT_WRITE; break; case BP_ACCESS_WATCHPOINT: breakinfo[i].type = X86_BREAKPOINT_RW; break; default: return -1; } switch (len) { case 1: breakinfo[i].len = X86_BREAKPOINT_LEN_1; break; case 2: breakinfo[i].len = X86_BREAKPOINT_LEN_2; break; case 4: breakinfo[i].len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case 8: breakinfo[i].len = X86_BREAKPOINT_LEN_8; break; #endif default: return -1; } breakinfo[i].addr = addr; if (hw_break_reserve_slot(i)) { breakinfo[i].addr = 0; return -1; } breakinfo[i].enabled = 1; return 0; } /** * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. * @regs: Current &struct pt_regs. * * This function will be called if the particular architecture must * disable hardware debugging while it is processing gdb packets or * handling exception. */ static void kgdb_disable_hw_debug(struct pt_regs *regs) { int i; int cpu = raw_smp_processor_id(); struct perf_event *bp; /* Disable hardware debugging while we are in kgdb: */ set_debugreg(0UL, 7); for (i = 0; i < HBP_NUM; i++) { if (!breakinfo[i].enabled) continue; if (dbg_is_early) { early_dr7 &= ~encode_dr7(i, breakinfo[i].len, breakinfo[i].type); continue; } bp = *per_cpu_ptr(breakinfo[i].pev, cpu); if (bp->attr.disabled == 1) continue; arch_uninstall_hw_breakpoint(bp); bp->attr.disabled = 1; } } #ifdef CONFIG_SMP /** * kgdb_roundup_cpus - Get other CPUs into a holding pattern * @flags: Current IRQ state * * On SMP systems, we need to get the attention of the other CPUs * and get them be in a known state. This should do what is needed * to get the other CPUs to call kgdb_wait(). Note that on some arches, * the NMI approach is not used for rounding up all the CPUs. For example, * in case of MIPS, smp_call_function() is used to roundup CPUs. In * this case, we have to make sure that interrupts are enabled before * calling smp_call_function(). The argument to this function is * the flags that will be used when restoring the interrupts. There is * local_irq_save() call before kgdb_roundup_cpus(). * * On non-SMP systems, this is not called. */ void kgdb_roundup_cpus(unsigned long flags) { apic->send_IPI_allbutself(APIC_DM_NMI); } #endif /** * kgdb_arch_handle_exception - Handle architecture specific GDB packets. * @e_vector: The error vector of the exception that happened. * @signo: The signal number of the exception that happened. * @err_code: The error code of the exception that happened. * @remcomInBuffer: The buffer of the packet we have read. * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into. * @linux_regs: The &struct pt_regs of the current process. * * This function MUST handle the 'c' and 's' command packets, * as well packets to set / remove a hardware breakpoint, if used. * If there are additional packets which the hardware needs to handle, * they are handled here. The code should return -1 if it wants to * process more packets, and a %0 or %1 if it wants to exit from the * kgdb callback. */ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, char *remcomInBuffer, char *remcomOutBuffer, struct pt_regs *linux_regs) { unsigned long addr; char *ptr; switch (remcomInBuffer[0]) { case 'c': case 's': /* try to read optional parameter, pc unchanged if no parm */ ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ip = addr; case 'D': case 'k': /* clear the trace bit */ linux_regs->flags &= ~X86_EFLAGS_TF; atomic_set(&kgdb_cpu_doing_single_step, -1); /* set the trace bit if we're stepping */ if (remcomInBuffer[0] == 's') { linux_regs->flags |= X86_EFLAGS_TF; atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } return 0; } /* this means that we do not want to exit from the handler: */ return -1; } static inline int single_step_cont(struct pt_regs *regs, struct die_args *args) { /* * Single step exception from kernel space to user space so * eat the exception and continue the process: */ printk(KERN_ERR "KGDB: trap/step from kernel to user space, " "resuming...\n"); kgdb_arch_handle_exception(args->trapnr, args->signr, args->err, "c", "", regs); /* * Reset the BS bit in dr6 (pointed by args->err) to * denote completion of processing */ (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; return NOTIFY_STOP; } static int was_in_debug_nmi[NR_CPUS]; static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs) { switch (cmd) { case NMI_LOCAL: if (atomic_read(&kgdb_active) != -1) { /* KGDB CPU roundup */ kgdb_nmicallback(raw_smp_processor_id(), regs); was_in_debug_nmi[raw_smp_processor_id()] = 1; touch_nmi_watchdog(); return NMI_HANDLED; } break; case NMI_UNKNOWN: if (was_in_debug_nmi[raw_smp_processor_id()]) { was_in_debug_nmi[raw_smp_processor_id()] = 0; return NMI_HANDLED; } break; default: /* do nothing */ break; } return NMI_DONE; } static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; switch (cmd) { case DIE_DEBUG: if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { if (user_mode(regs)) return single_step_cont(regs, args); break; } else if (test_thread_flag(TIF_SINGLESTEP)) /* This means a user thread is single stepping * a system call which should be ignored */ return NOTIFY_DONE; /* fall through */ default: if (user_mode(regs)) return NOTIFY_DONE; } if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs)) return NOTIFY_DONE; /* Must touch watchdog before return to normal operation */ touch_nmi_watchdog(); return NOTIFY_STOP; } int kgdb_ll_trap(int cmd, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig, }; if (!kgdb_io_module_registered) return NOTIFY_DONE; return __kgdb_notify(&args, cmd); } static int kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { unsigned long flags; int ret; local_irq_save(flags); ret = __kgdb_notify(ptr, cmd); local_irq_restore(flags); return ret; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_notify, }; /** * kgdb_arch_init - Perform any architecture specific initalization. * * This function will handle the initalization of any architecture * specific callbacks. */ int kgdb_arch_init(void) { int retval; retval = register_die_notifier(&kgdb_notifier); if (retval) goto out; retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler, 0, "kgdb"); if (retval) goto out1; retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler, 0, "kgdb"); if (retval) goto out2; return retval; out2: unregister_nmi_handler(NMI_LOCAL, "kgdb"); out1: unregister_die_notifier(&kgdb_notifier); out: return retval; } static void kgdb_hw_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { struct task_struct *tsk = current; int i; for (i = 0; i < 4; i++) if (breakinfo[i].enabled) tsk->thread.debugreg6 |= (DR_TRAP0 << i); } void kgdb_arch_late(void) { int i, cpu; struct perf_event_attr attr; struct perf_event **pevent; /* * Pre-allocate the hw breakpoint structions in the non-atomic * portion of kgdb because this operation requires mutexs to * complete. */ hw_breakpoint_init(&attr); attr.bp_addr = (unsigned long)kgdb_arch_init; attr.bp_len = HW_BREAKPOINT_LEN_1; attr.bp_type = HW_BREAKPOINT_W; attr.disabled = 1; for (i = 0; i < HBP_NUM; i++) { if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); if (IS_ERR((void * __force)breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; kgdb_arch_exit(); return; } for_each_online_cpu(cpu) { pevent = per_cpu_ptr(breakinfo[i].pev, cpu); pevent[0]->hw.sample_period = 1; pevent[0]->overflow_handler = kgdb_hw_overflow_handler; if (pevent[0]->destroy != NULL) { pevent[0]->destroy = NULL; release_bp_slot(*pevent); } } } } /** * kgdb_arch_exit - Perform any architecture specific uninitalization. * * This function will handle the uninitalization of any architecture * specific callbacks, for dynamic registration and unregistration. */ void kgdb_arch_exit(void) { int i; for (i = 0; i < 4; i++) { if (breakinfo[i].pev) { unregister_wide_hw_breakpoint(breakinfo[i].pev); breakinfo[i].pev = NULL; } } unregister_nmi_handler(NMI_UNKNOWN, "kgdb"); unregister_nmi_handler(NMI_LOCAL, "kgdb"); unregister_die_notifier(&kgdb_notifier); } /** * * kgdb_skipexception - Bail out of KGDB when we've been triggered. * @exception: Exception vector number * @regs: Current &struct pt_regs. * * On some architectures we need to skip a breakpoint exception when * it occurs after a breakpoint has been removed. * * Skip an int3 exception when it occurs after a breakpoint has been * removed. Backtrack eip by 1 since the int3 would have caused it to * increment by 1. */ int kgdb_skipexception(int exception, struct pt_regs *regs) { if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) { regs->ip -= 1; return 1; } return 0; } unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) { if (exception == 3) return instruction_pointer(regs) - 1; return instruction_pointer(regs); } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; } int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; #ifdef CONFIG_DEBUG_RODATA char opc[BREAK_INSTR_SIZE]; #endif /* CONFIG_DEBUG_RODATA */ bpt->type = BP_BREAKPOINT; err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; err = probe_kernel_write((char *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); #ifdef CONFIG_DEBUG_RODATA if (!err) return err; /* * It is safe to call text_poke() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) return -EBUSY; text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) return -EINVAL; bpt->type = BP_POKE_BREAKPOINT; #endif /* CONFIG_DEBUG_RODATA */ return err; } int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { #ifdef CONFIG_DEBUG_RODATA int err; char opc[BREAK_INSTR_SIZE]; if (bpt->type != BP_POKE_BREAKPOINT) goto knl_write; /* * It is safe to call text_poke() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) goto knl_write; text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) goto knl_write; return err; knl_write: #endif /* CONFIG_DEBUG_RODATA */ return probe_kernel_write((char *)bpt->bpt_addr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); } struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: */ .gdb_bpt_instr = { 0xcc }, .flags = KGDB_HW_BREAKPOINT, .set_hw_breakpoint = kgdb_set_hw_break, .remove_hw_breakpoint = kgdb_remove_hw_break, .disable_hw_break = kgdb_disable_hw_debug, .remove_all_hw_break = kgdb_remove_all_hw_break, .correct_hw_break = kgdb_correct_hw_break, };
gpl-2.0
ugur2323/WhisperKernelAveaInTouch4
arch/s390/oprofile/hwsampler.c
2161
25105
/* * Copyright IBM Corp. 2010 * Author: Heinz Graalfs <graalfs@de.ibm.com> */ #include <linux/kernel_stat.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/semaphore.h> #include <linux/oom.h> #include <linux/oprofile.h> #include <asm/facility.h> #include <asm/cpu_mf.h> #include <asm/irq.h> #include "hwsampler.h" #include "op_counter.h" #define MAX_NUM_SDB 511 #define MIN_NUM_SDB 1 #define ALERT_REQ_MASK 0x4000000000000000ul #define BUFFER_FULL_MASK 0x8000000000000000ul DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); struct hws_execute_parms { void *buffer; signed int rc; }; DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer); static DEFINE_MUTEX(hws_sem); static DEFINE_MUTEX(hws_sem_oom); static unsigned char hws_flush_all; static unsigned int hws_oom; static struct workqueue_struct *hws_wq; static unsigned int hws_state; enum { HWS_INIT = 1, HWS_DEALLOCATED, HWS_STOPPED, HWS_STARTED, HWS_STOPPING }; /* set to 1 if called by kernel during memory allocation */ static unsigned char oom_killer_was_active; /* size of SDBT and SDB as of allocate API */ static unsigned long num_sdbt = 100; static unsigned long num_sdb = 511; /* sampling interval (machine cycles) */ static unsigned long interval; static unsigned long min_sampler_rate; static unsigned long max_sampler_rate; static int ssctl(void *buffer) { int cc; /* set in order to detect a program check */ cc = 1; asm volatile( "0: .insn s,0xB2870000,0(%1)\n" "1: ipm %0\n" " srl %0,28\n" "2:\n" EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "+d" (cc), "+a" (buffer) : "m" (*((struct hws_ssctl_request_block *)buffer)) : "cc", "memory"); return cc ? -EINVAL : 0 ; } static int qsi(void *buffer) { int cc; cc = 1; asm volatile( "0: .insn s,0xB2860000,0(%1)\n" "1: lhi %0,0\n" "2:\n" EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "=d" (cc), "+a" (buffer) : "m" (*((struct hws_qsi_info_block *)buffer)) : "cc", "memory"); return cc ? -EINVAL : 0; } static void execute_qsi(void *parms) { struct hws_execute_parms *ep = parms; ep->rc = qsi(ep->buffer); } static void execute_ssctl(void *parms) { struct hws_execute_parms *ep = parms; ep->rc = ssctl(ep->buffer); } static int smp_ctl_ssctl_stop(int cpu) { int rc; struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->ssctl.es = 0; cb->ssctl.cs = 0; ep.buffer = &cb->ssctl; smp_call_function_single(cpu, execute_ssctl, &ep, 1); rc = ep.rc; if (rc) { printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); dump_stack(); } ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); if (cb->qsi.es || cb->qsi.cs) { printk(KERN_EMERG "CPUMF sampling did not stop properly.\n"); dump_stack(); } return rc; } static int smp_ctl_ssctl_deactivate(int cpu) { int rc; struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->ssctl.es = 1; cb->ssctl.cs = 0; ep.buffer = &cb->ssctl; smp_call_function_single(cpu, execute_ssctl, &ep, 1); rc = ep.rc; if (rc) printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); if (cb->qsi.cs) printk(KERN_EMERG "CPUMF sampling was not set inactive.\n"); return rc; } static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval) { int rc; struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->ssctl.h = 1; cb->ssctl.tear = cb->first_sdbt; cb->ssctl.dear = *(unsigned long *) cb->first_sdbt; cb->ssctl.interval = interval; cb->ssctl.es = 1; cb->ssctl.cs = 1; ep.buffer = &cb->ssctl; smp_call_function_single(cpu, execute_ssctl, &ep, 1); rc = ep.rc; if (rc) printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); if (ep.rc) printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu); return rc; } static int smp_ctl_qsi(int cpu) { struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); return ep.rc; } static inline unsigned long *trailer_entry_ptr(unsigned long v) { void *ret; ret = (void *)v; ret += PAGE_SIZE; ret -= sizeof(struct hws_trailer_entry); return (unsigned long *) ret; } static void hws_ext_handler(struct ext_code ext_code, unsigned int param32, unsigned long param64) { struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer); if (!(param32 & CPU_MF_INT_SF_MASK)) return; inc_irq_stat(IRQEXT_CMS); atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); if (hws_wq) queue_work(hws_wq, &cb->worker); } static void worker(struct work_struct *work); static void add_samples_to_oprofile(unsigned cpu, unsigned long *, unsigned long *dear); static void init_all_cpu_buffers(void) { int cpu; struct hws_cpu_buffer *cb; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); memset(cb, 0, sizeof(struct hws_cpu_buffer)); } } static int is_link_entry(unsigned long *s) { return *s & 0x1ul ? 1 : 0; } static unsigned long *get_next_sdbt(unsigned long *s) { return (unsigned long *) (*s & ~0x1ul); } static int prepare_cpu_buffers(void) { int cpu; int rc; struct hws_cpu_buffer *cb; rc = 0; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); atomic_set(&cb->ext_params, 0); cb->worker_entry = 0; cb->sample_overflow = 0; cb->req_alert = 0; cb->incorrect_sdbt_entry = 0; cb->invalid_entry_address = 0; cb->loss_of_sample_data = 0; cb->sample_auth_change_alert = 0; cb->finish = 0; cb->oom = 0; cb->stop_mode = 0; } return rc; } /* * allocate_sdbt() - allocate sampler memory * @cpu: the cpu for which sampler memory is allocated * * A 4K page is allocated for each requested SDBT. * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs. * Set ALERT_REQ mask in each SDBs trailer. * Returns zero if successful, <0 otherwise. */ static int allocate_sdbt(int cpu) { int j, k, rc; unsigned long *sdbt; unsigned long sdb; unsigned long *tail; unsigned long *trailer; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); if (cb->first_sdbt) return -EINVAL; sdbt = NULL; tail = sdbt; for (j = 0; j < num_sdbt; j++) { sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL); mutex_lock(&hws_sem_oom); /* OOM killer might have been activated */ barrier(); if (oom_killer_was_active || !sdbt) { if (sdbt) free_page((unsigned long)sdbt); goto allocate_sdbt_error; } if (cb->first_sdbt == 0) cb->first_sdbt = (unsigned long)sdbt; /* link current page to tail of chain */ if (tail) *tail = (unsigned long)(void *)sdbt + 1; mutex_unlock(&hws_sem_oom); for (k = 0; k < num_sdb; k++) { /* get and set SDB page */ sdb = get_zeroed_page(GFP_KERNEL); mutex_lock(&hws_sem_oom); /* OOM killer might have been activated */ barrier(); if (oom_killer_was_active || !sdb) { if (sdb) free_page(sdb); goto allocate_sdbt_error; } *sdbt = sdb; trailer = trailer_entry_ptr(*sdbt); *trailer = ALERT_REQ_MASK; sdbt++; mutex_unlock(&hws_sem_oom); } tail = sdbt; } mutex_lock(&hws_sem_oom); if (oom_killer_was_active) goto allocate_sdbt_error; rc = 0; if (tail) *tail = (unsigned long) ((void *)cb->first_sdbt) + 1; allocate_sdbt_exit: mutex_unlock(&hws_sem_oom); return rc; allocate_sdbt_error: rc = -ENOMEM; goto allocate_sdbt_exit; } /* * deallocate_sdbt() - deallocate all sampler memory * * For each online CPU all SDBT trees are deallocated. * Returns the number of freed pages. */ static int deallocate_sdbt(void) { int cpu; int counter; counter = 0; for_each_online_cpu(cpu) { unsigned long start; unsigned long sdbt; unsigned long *curr; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); if (!cb->first_sdbt) continue; sdbt = cb->first_sdbt; curr = (unsigned long *) sdbt; start = sdbt; /* we'll free the SDBT after all SDBs are processed... */ while (1) { if (!*curr || !sdbt) break; /* watch for link entry reset if found */ if (is_link_entry(curr)) { curr = get_next_sdbt(curr); if (sdbt) free_page(sdbt); /* we are done if we reach the start */ if ((unsigned long) curr == start) break; else sdbt = (unsigned long) curr; } else { /* process SDB pointer */ if (*curr) { free_page(*curr); curr++; } } counter++; } cb->first_sdbt = 0; } return counter; } static int start_sampling(int cpu) { int rc; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); rc = smp_ctl_ssctl_enable_activate(cpu, interval); if (rc) { printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu); goto start_exit; } rc = -EINVAL; if (!cb->qsi.es) { printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu); goto start_exit; } if (!cb->qsi.cs) { printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu); goto start_exit; } printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n", cpu, interval); rc = 0; start_exit: return rc; } static int stop_sampling(int cpu) { unsigned long v; int rc; struct hws_cpu_buffer *cb; rc = smp_ctl_qsi(cpu); WARN_ON(rc); cb = &per_cpu(sampler_cpu_buffer, cpu); if (!rc && !cb->qsi.es) printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu); rc = smp_ctl_ssctl_stop(cpu); if (rc) { printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n", cpu, rc); goto stop_exit; } printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu); stop_exit: v = cb->req_alert; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert," " count=%lu.\n", cpu, v); v = cb->loss_of_sample_data; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data," " count=%lu.\n", cpu, v); v = cb->invalid_entry_address; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address," " count=%lu.\n", cpu, v); v = cb->incorrect_sdbt_entry; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Incorrect SDBT address," " count=%lu.\n", cpu, v); v = cb->sample_auth_change_alert; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Sample authorization change," " count=%lu.\n", cpu, v); return rc; } static int check_hardware_prerequisites(void) { if (!test_facility(68)) return -EOPNOTSUPP; return 0; } /* * hws_oom_callback() - the OOM callback function * * In case the callback is invoked during memory allocation for the * hw sampler, all obtained memory is deallocated and a flag is set * so main sampler memory allocation can exit with a failure code. * In case the callback is invoked during sampling the hw sampler * is deactivated for all CPUs. */ static int hws_oom_callback(struct notifier_block *nfb, unsigned long dummy, void *parm) { unsigned long *freed; int cpu; struct hws_cpu_buffer *cb; freed = parm; mutex_lock(&hws_sem_oom); if (hws_state == HWS_DEALLOCATED) { /* during memory allocation */ if (oom_killer_was_active == 0) { oom_killer_was_active = 1; *freed += deallocate_sdbt(); } } else { int i; cpu = get_cpu(); cb = &per_cpu(sampler_cpu_buffer, cpu); if (!cb->oom) { for_each_online_cpu(i) { smp_ctl_ssctl_deactivate(i); cb->oom = 1; } cb->finish = 1; printk(KERN_INFO "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n", cpu); } } mutex_unlock(&hws_sem_oom); return NOTIFY_OK; } static struct notifier_block hws_oom_notifier = { .notifier_call = hws_oom_callback }; static int hws_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { /* We do not have sampler space available for all possible CPUs. All CPUs should be online when hw sampling is activated. */ return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD; } static struct notifier_block hws_cpu_notifier = { .notifier_call = hws_cpu_callback }; /** * hwsampler_deactivate() - set hardware sampling temporarily inactive * @cpu: specifies the CPU to be set inactive. * * Returns 0 on success, !0 on failure. */ int hwsampler_deactivate(unsigned int cpu) { /* * Deactivate hw sampling temporarily and flush the buffer * by pushing all the pending samples to oprofile buffer. * * This function can be called under one of the following conditions: * Memory unmap, task is exiting. */ int rc; struct hws_cpu_buffer *cb; rc = 0; mutex_lock(&hws_sem); cb = &per_cpu(sampler_cpu_buffer, cpu); if (hws_state == HWS_STARTED) { rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (cb->qsi.cs) { rc = smp_ctl_ssctl_deactivate(cpu); if (rc) { printk(KERN_INFO "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); cb->finish = 1; hws_state = HWS_STOPPING; } else { hws_flush_all = 1; /* Add work to queue to read pending samples.*/ queue_work_on(cpu, hws_wq, &cb->worker); } } } mutex_unlock(&hws_sem); if (hws_wq) flush_workqueue(hws_wq); return rc; } /** * hwsampler_activate() - activate/resume hardware sampling which was deactivated * @cpu: specifies the CPU to be set active. * * Returns 0 on success, !0 on failure. */ int hwsampler_activate(unsigned int cpu) { /* * Re-activate hw sampling. This should be called in pair with * hwsampler_deactivate(). */ int rc; struct hws_cpu_buffer *cb; rc = 0; mutex_lock(&hws_sem); cb = &per_cpu(sampler_cpu_buffer, cpu); if (hws_state == HWS_STARTED) { rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (!cb->qsi.cs) { hws_flush_all = 0; rc = smp_ctl_ssctl_enable_activate(cpu, interval); if (rc) { printk(KERN_ERR "CPU %d, CPUMF activate sampling failed.\n", cpu); } } } mutex_unlock(&hws_sem); return rc; } static int check_qsi_on_setup(void) { int rc; unsigned int cpu; struct hws_cpu_buffer *cb; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (rc) return -EOPNOTSUPP; if (!cb->qsi.as) { printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n"); return -EINVAL; } if (cb->qsi.es) { printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n"); rc = smp_ctl_ssctl_stop(cpu); if (rc) return -EINVAL; printk(KERN_INFO "CPU %d, CPUMF Sampling stopped now.\n", cpu); } } return 0; } static int check_qsi_on_start(void) { unsigned int cpu; int rc; struct hws_cpu_buffer *cb; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (!cb->qsi.as) return -EINVAL; if (cb->qsi.es) return -EINVAL; if (cb->qsi.cs) return -EINVAL; } return 0; } static void worker_on_start(unsigned int cpu) { struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->worker_entry = cb->first_sdbt; } static int worker_check_error(unsigned int cpu, int ext_params) { int rc; unsigned long *sdbt; struct hws_cpu_buffer *cb; rc = 0; cb = &per_cpu(sampler_cpu_buffer, cpu); sdbt = (unsigned long *) cb->worker_entry; if (!sdbt || !*sdbt) return -EINVAL; if (ext_params & CPU_MF_INT_SF_PRA) cb->req_alert++; if (ext_params & CPU_MF_INT_SF_LSDA) cb->loss_of_sample_data++; if (ext_params & CPU_MF_INT_SF_IAE) { cb->invalid_entry_address++; rc = -EINVAL; } if (ext_params & CPU_MF_INT_SF_ISE) { cb->incorrect_sdbt_entry++; rc = -EINVAL; } if (ext_params & CPU_MF_INT_SF_SACA) { cb->sample_auth_change_alert++; rc = -EINVAL; } return rc; } static void worker_on_finish(unsigned int cpu) { int rc, i; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); if (cb->finish) { rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (cb->qsi.es) { printk(KERN_INFO "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n", cpu); rc = smp_ctl_ssctl_stop(cpu); if (rc) printk(KERN_INFO "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); for_each_online_cpu(i) { if (i == cpu) continue; if (!cb->finish) { cb->finish = 1; queue_work_on(i, hws_wq, &cb->worker); } } } } } static void worker_on_interrupt(unsigned int cpu) { unsigned long *sdbt; unsigned char done; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); sdbt = (unsigned long *) cb->worker_entry; done = 0; /* do not proceed if stop was entered, * forget the buffers not yet processed */ while (!done && !cb->stop_mode) { unsigned long *trailer; struct hws_trailer_entry *te; unsigned long *dear = 0; trailer = trailer_entry_ptr(*sdbt); /* leave loop if no more work to do */ if (!(*trailer & BUFFER_FULL_MASK)) { done = 1; if (!hws_flush_all) continue; } te = (struct hws_trailer_entry *)trailer; cb->sample_overflow += te->overflow; add_samples_to_oprofile(cpu, sdbt, dear); /* reset trailer */ xchg((unsigned char *) te, 0x40); /* advance to next sdb slot in current sdbt */ sdbt++; /* in case link bit is set use address w/o link bit */ if (is_link_entry(sdbt)) sdbt = get_next_sdbt(sdbt); cb->worker_entry = (unsigned long)sdbt; } } static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, unsigned long *dear) { struct hws_data_entry *sample_data_ptr; unsigned long *trailer; trailer = trailer_entry_ptr(*sdbt); if (dear) { if (dear > trailer) return; trailer = dear; } sample_data_ptr = (struct hws_data_entry *)(*sdbt); while ((unsigned long *)sample_data_ptr < trailer) { struct pt_regs *regs = NULL; struct task_struct *tsk = NULL; /* * Check sampling mode, 1 indicates basic (=customer) sampling * mode. */ if (sample_data_ptr->def != 1) { /* sample slot is not yet written */ break; } else { /* make sure we don't use it twice, * the next time the sampler will set it again */ sample_data_ptr->def = 0; } /* Get pt_regs. */ if (sample_data_ptr->P == 1) { /* userspace sample */ unsigned int pid = sample_data_ptr->prim_asn; if (!counter_config.user) goto skip_sample; rcu_read_lock(); tsk = pid_task(find_vpid(pid), PIDTYPE_PID); if (tsk) regs = task_pt_regs(tsk); rcu_read_unlock(); } else { /* kernelspace sample */ if (!counter_config.kernel) goto skip_sample; regs = task_pt_regs(current); } mutex_lock(&hws_sem); oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, !sample_data_ptr->P, tsk); mutex_unlock(&hws_sem); skip_sample: sample_data_ptr++; } } static void worker(struct work_struct *work) { unsigned int cpu; int ext_params; struct hws_cpu_buffer *cb; cb = container_of(work, struct hws_cpu_buffer, worker); cpu = smp_processor_id(); ext_params = atomic_xchg(&cb->ext_params, 0); if (!cb->worker_entry) worker_on_start(cpu); if (worker_check_error(cpu, ext_params)) return; if (!cb->finish) worker_on_interrupt(cpu); if (cb->finish) worker_on_finish(cpu); } /** * hwsampler_allocate() - allocate memory for the hardware sampler * @sdbt: number of SDBTs per online CPU (must be > 0) * @sdb: number of SDBs per SDBT (minimum 1, maximum 511) * * Returns 0 on success, !0 on failure. */ int hwsampler_allocate(unsigned long sdbt, unsigned long sdb) { int cpu, rc; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state != HWS_DEALLOCATED) goto allocate_exit; if (sdbt < 1) goto allocate_exit; if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB) goto allocate_exit; num_sdbt = sdbt; num_sdb = sdb; oom_killer_was_active = 0; register_oom_notifier(&hws_oom_notifier); for_each_online_cpu(cpu) { if (allocate_sdbt(cpu)) { unregister_oom_notifier(&hws_oom_notifier); goto allocate_error; } } unregister_oom_notifier(&hws_oom_notifier); if (oom_killer_was_active) goto allocate_error; hws_state = HWS_STOPPED; rc = 0; allocate_exit: mutex_unlock(&hws_sem); return rc; allocate_error: rc = -ENOMEM; printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n"); goto allocate_exit; } /** * hwsampler_deallocate() - deallocate hardware sampler memory * * Returns 0 on success, !0 on failure. */ int hwsampler_deallocate(void) { int rc; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state != HWS_STOPPED) goto deallocate_exit; measurement_alert_subclass_unregister(); deallocate_sdbt(); hws_state = HWS_DEALLOCATED; rc = 0; deallocate_exit: mutex_unlock(&hws_sem); return rc; } unsigned long hwsampler_query_min_interval(void) { return min_sampler_rate; } unsigned long hwsampler_query_max_interval(void) { return max_sampler_rate; } unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) { struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); return cb->sample_overflow; } int hwsampler_setup(void) { int rc; int cpu; struct hws_cpu_buffer *cb; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state) goto setup_exit; hws_state = HWS_INIT; init_all_cpu_buffers(); rc = check_hardware_prerequisites(); if (rc) goto setup_exit; rc = check_qsi_on_setup(); if (rc) goto setup_exit; rc = -EINVAL; hws_wq = create_workqueue("hwsampler"); if (!hws_wq) goto setup_exit; register_cpu_notifier(&hws_cpu_notifier); for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); INIT_WORK(&cb->worker, worker); rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (min_sampler_rate != cb->qsi.min_sampl_rate) { if (min_sampler_rate) { printk(KERN_WARNING "hwsampler: different min sampler rate values.\n"); if (min_sampler_rate < cb->qsi.min_sampl_rate) min_sampler_rate = cb->qsi.min_sampl_rate; } else min_sampler_rate = cb->qsi.min_sampl_rate; } if (max_sampler_rate != cb->qsi.max_sampl_rate) { if (max_sampler_rate) { printk(KERN_WARNING "hwsampler: different max sampler rate values.\n"); if (max_sampler_rate > cb->qsi.max_sampl_rate) max_sampler_rate = cb->qsi.max_sampl_rate; } else max_sampler_rate = cb->qsi.max_sampl_rate; } } register_external_interrupt(0x1407, hws_ext_handler); hws_state = HWS_DEALLOCATED; rc = 0; setup_exit: mutex_unlock(&hws_sem); return rc; } int hwsampler_shutdown(void) { int rc; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) { mutex_unlock(&hws_sem); if (hws_wq) flush_workqueue(hws_wq); mutex_lock(&hws_sem); if (hws_state == HWS_STOPPED) { measurement_alert_subclass_unregister(); deallocate_sdbt(); } if (hws_wq) { destroy_workqueue(hws_wq); hws_wq = NULL; } unregister_external_interrupt(0x1407, hws_ext_handler); hws_state = HWS_INIT; rc = 0; } mutex_unlock(&hws_sem); unregister_cpu_notifier(&hws_cpu_notifier); return rc; } /** * hwsampler_start_all() - start hardware sampling on all online CPUs * @rate: specifies the used interval when samples are taken * * Returns 0 on success, !0 on failure. */ int hwsampler_start_all(unsigned long rate) { int rc, cpu; mutex_lock(&hws_sem); hws_oom = 0; rc = -EINVAL; if (hws_state != HWS_STOPPED) goto start_all_exit; interval = rate; /* fail if rate is not valid */ if (interval < min_sampler_rate || interval > max_sampler_rate) goto start_all_exit; rc = check_qsi_on_start(); if (rc) goto start_all_exit; rc = prepare_cpu_buffers(); if (rc) goto start_all_exit; for_each_online_cpu(cpu) { rc = start_sampling(cpu); if (rc) break; } if (rc) { for_each_online_cpu(cpu) { stop_sampling(cpu); } goto start_all_exit; } hws_state = HWS_STARTED; rc = 0; start_all_exit: mutex_unlock(&hws_sem); if (rc) return rc; register_oom_notifier(&hws_oom_notifier); hws_oom = 1; hws_flush_all = 0; /* now let them in, 1407 CPUMF external interrupts */ measurement_alert_subclass_register(); return 0; } /** * hwsampler_stop_all() - stop hardware sampling on all online CPUs * * Returns 0 on success, !0 on failure. */ int hwsampler_stop_all(void) { int tmp_rc, rc, cpu; struct hws_cpu_buffer *cb; mutex_lock(&hws_sem); rc = 0; if (hws_state == HWS_INIT) { mutex_unlock(&hws_sem); return rc; } hws_state = HWS_STOPPING; mutex_unlock(&hws_sem); for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); cb->stop_mode = 1; tmp_rc = stop_sampling(cpu); if (tmp_rc) rc = tmp_rc; } if (hws_wq) flush_workqueue(hws_wq); mutex_lock(&hws_sem); if (hws_oom) { unregister_oom_notifier(&hws_oom_notifier); hws_oom = 0; } hws_state = HWS_STOPPED; mutex_unlock(&hws_sem); return rc; }
gpl-2.0
Split-Screen/android_kernel_asus_fugu
arch/arm/mach-s5pv210/clock.c
2161
34192
/* linux/arch/arm/mach-s5pv210/clock.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5PV210 - Clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/io.h> #include <mach/map.h> #include <plat/cpu-freq.h> #include <mach/regs-clock.h> #include <plat/clock.h> #include <plat/cpu.h> #include <plat/pll.h> #include <plat/s5p-clock.h> #include <plat/clock-clksrc.h> #include "common.h" static unsigned long xtal; static struct clksrc_clk clk_mout_apll = { .clk = { .name = "mout_apll", }, .sources = &clk_src_apll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 }, }; static struct clksrc_clk clk_mout_epll = { .clk = { .name = "mout_epll", }, .sources = &clk_src_epll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 }, }; static struct clksrc_clk clk_mout_mpll = { .clk = { .name = "mout_mpll", }, .sources = &clk_src_mpll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 }, }; static struct clk *clkset_armclk_list[] = { [0] = &clk_mout_apll.clk, [1] = &clk_mout_mpll.clk, }; static struct clksrc_sources clkset_armclk = { .sources = clkset_armclk_list, .nr_sources = ARRAY_SIZE(clkset_armclk_list), }; static struct clksrc_clk clk_armclk = { .clk = { .name = "armclk", }, .sources = &clkset_armclk, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 3 }, }; static struct clksrc_clk clk_hclk_msys = { .clk = { .name = "hclk_msys", .parent = &clk_armclk.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 }, }; static struct clksrc_clk clk_pclk_msys = { .clk = { .name = "pclk_msys", .parent = &clk_hclk_msys.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 }, }; static struct clksrc_clk clk_sclk_a2m = { .clk = { .name = "sclk_a2m", .parent = &clk_mout_apll.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 }, }; static struct clk *clkset_hclk_sys_list[] = { [0] = &clk_mout_mpll.clk, [1] = &clk_sclk_a2m.clk, }; static struct clksrc_sources clkset_hclk_sys = { .sources = clkset_hclk_sys_list, .nr_sources = ARRAY_SIZE(clkset_hclk_sys_list), }; static struct clksrc_clk clk_hclk_dsys = { .clk = { .name = "hclk_dsys", }, .sources = &clkset_hclk_sys, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 4 }, }; static struct clksrc_clk clk_pclk_dsys = { .clk = { .name = "pclk_dsys", .parent = &clk_hclk_dsys.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 }, }; static struct clksrc_clk clk_hclk_psys = { .clk = { .name = "hclk_psys", }, .sources = &clkset_hclk_sys, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 24, .size = 4 }, }; static struct clksrc_clk clk_pclk_psys = { .clk = { .name = "pclk_psys", .parent = &clk_hclk_psys.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 }, }; static int s5pv210_clk_ip0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable); } static int s5pv210_clk_ip1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP1, clk, enable); } static int s5pv210_clk_ip2_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP2, clk, enable); } static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable); } static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable); } static int s5pv210_clk_mask1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLK_SRC_MASK1, clk, enable); } static int s5pv210_clk_hdmiphy_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable); } static int exynos4_clk_dac_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_DAC_PHY_CONTROL, clk, enable); } static struct clk clk_sclk_hdmi27m = { .name = "sclk_hdmi27m", .rate = 27000000, }; static struct clk clk_sclk_hdmiphy = { .name = "sclk_hdmiphy", }; static struct clk clk_sclk_usbphy0 = { .name = "sclk_usbphy0", }; static struct clk clk_sclk_usbphy1 = { .name = "sclk_usbphy1", }; static struct clk clk_pcmcdclk0 = { .name = "pcmcdclk", }; static struct clk clk_pcmcdclk1 = { .name = "pcmcdclk", }; static struct clk clk_pcmcdclk2 = { .name = "pcmcdclk", }; static struct clk *clkset_vpllsrc_list[] = { [0] = &clk_fin_vpll, [1] = &clk_sclk_hdmi27m, }; static struct clksrc_sources clkset_vpllsrc = { .sources = clkset_vpllsrc_list, .nr_sources = ARRAY_SIZE(clkset_vpllsrc_list), }; static struct clksrc_clk clk_vpllsrc = { .clk = { .name = "vpll_src", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 7), }, .sources = &clkset_vpllsrc, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 28, .size = 1 }, }; static struct clk *clkset_sclk_vpll_list[] = { [0] = &clk_vpllsrc.clk, [1] = &clk_fout_vpll, }; static struct clksrc_sources clkset_sclk_vpll = { .sources = clkset_sclk_vpll_list, .nr_sources = ARRAY_SIZE(clkset_sclk_vpll_list), }; static struct clksrc_clk clk_sclk_vpll = { .clk = { .name = "sclk_vpll", }, .sources = &clkset_sclk_vpll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 }, }; static struct clk *clkset_moutdmc0src_list[] = { [0] = &clk_sclk_a2m.clk, [1] = &clk_mout_mpll.clk, [2] = NULL, [3] = NULL, }; static struct clksrc_sources clkset_moutdmc0src = { .sources = clkset_moutdmc0src_list, .nr_sources = ARRAY_SIZE(clkset_moutdmc0src_list), }; static struct clksrc_clk clk_mout_dmc0 = { .clk = { .name = "mout_dmc0", }, .sources = &clkset_moutdmc0src, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 }, }; static struct clksrc_clk clk_sclk_dmc0 = { .clk = { .name = "sclk_dmc0", .parent = &clk_mout_dmc0.clk, }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 }, }; static unsigned long s5pv210_clk_imem_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / 2; } static struct clk_ops clk_hclk_imem_ops = { .get_rate = s5pv210_clk_imem_get_rate, }; static unsigned long s5pv210_clk_fout_apll_get_rate(struct clk *clk) { return s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508); } static struct clk_ops clk_fout_apll_ops = { .get_rate = s5pv210_clk_fout_apll_get_rate, }; static struct clk init_clocks_off[] = { { .name = "rot", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1<<29), }, { .name = "fimc", .devname = "s5pv210-fimc.0", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 24), }, { .name = "fimc", .devname = "s5pv210-fimc.1", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 25), }, { .name = "fimc", .devname = "s5pv210-fimc.2", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 26), }, { .name = "jpeg", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 28), }, { .name = "mfc", .devname = "s5p-mfc", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 16), }, { .name = "dac", .devname = "s5p-sdo", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 10), }, { .name = "mixer", .devname = "s5p-mixer", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 9), }, { .name = "vp", .devname = "s5p-mixer", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 8), }, { .name = "hdmi", .devname = "s5pv210-hdmi", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 11), }, { .name = "hdmiphy", .devname = "s5pv210-hdmi", .enable = s5pv210_clk_hdmiphy_ctrl, .ctrlbit = (1 << 0), }, { .name = "dacphy", .devname = "s5p-sdo", .enable = exynos4_clk_dac_ctrl, .ctrlbit = (1 << 0), }, { .name = "otg", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<16), }, { .name = "usb-host", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<17), }, { .name = "lcd", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<0), }, { .name = "cfcon", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<25), }, { .name = "systimer", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<16), }, { .name = "watchdog", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<22), }, { .name = "rtc", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<15), }, { .name = "i2c", .devname = "s3c2440-i2c.0", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<7), }, { .name = "i2c", .devname = "s3c2440-i2c.1", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 10), }, { .name = "i2c", .devname = "s3c2440-i2c.2", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<9), }, { .name = "i2c", .devname = "s3c2440-hdmiphy-i2c", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 11), }, { .name = "spi", .devname = "s5pv210-spi.0", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<12), }, { .name = "spi", .devname = "s5pv210-spi.1", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<13), }, { .name = "spi", .devname = "s5pv210-spi.2", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<14), }, { .name = "timers", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<23), }, { .name = "adc", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<24), }, { .name = "keypad", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<21), }, { .name = "iis", .devname = "samsung-i2s.0", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<4), }, { .name = "iis", .devname = "samsung-i2s.1", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 5), }, { .name = "iis", .devname = "samsung-i2s.2", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 6), }, { .name = "spdif", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 0), }, }; static struct clk init_clocks[] = { { .name = "hclk_imem", .parent = &clk_hclk_msys.clk, .ctrlbit = (1 << 5), .enable = s5pv210_clk_ip0_ctrl, .ops = &clk_hclk_imem_ops, }, { .name = "uart", .devname = "s5pv210-uart.0", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 17), }, { .name = "uart", .devname = "s5pv210-uart.1", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 18), }, { .name = "uart", .devname = "s5pv210-uart.2", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 19), }, { .name = "uart", .devname = "s5pv210-uart.3", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 20), }, { .name = "sromc", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 26), }, }; static struct clk clk_hsmmc0 = { .name = "hsmmc", .devname = "s3c-sdhci.0", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<16), }; static struct clk clk_hsmmc1 = { .name = "hsmmc", .devname = "s3c-sdhci.1", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<17), }; static struct clk clk_hsmmc2 = { .name = "hsmmc", .devname = "s3c-sdhci.2", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<18), }; static struct clk clk_hsmmc3 = { .name = "hsmmc", .devname = "s3c-sdhci.3", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<19), }; static struct clk clk_pdma0 = { .name = "pdma0", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 3), }; static struct clk clk_pdma1 = { .name = "pdma1", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 4), }; static struct clk *clkset_uart_list[] = { [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, }; static struct clksrc_sources clkset_uart = { .sources = clkset_uart_list, .nr_sources = ARRAY_SIZE(clkset_uart_list), }; static struct clk *clkset_group1_list[] = { [0] = &clk_sclk_a2m.clk, [1] = &clk_mout_mpll.clk, [2] = &clk_mout_epll.clk, [3] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_group1 = { .sources = clkset_group1_list, .nr_sources = ARRAY_SIZE(clkset_group1_list), }; static struct clk *clkset_sclk_onenand_list[] = { [0] = &clk_hclk_psys.clk, [1] = &clk_hclk_dsys.clk, }; static struct clksrc_sources clkset_sclk_onenand = { .sources = clkset_sclk_onenand_list, .nr_sources = ARRAY_SIZE(clkset_sclk_onenand_list), }; static struct clk *clkset_sclk_dac_list[] = { [0] = &clk_sclk_vpll.clk, [1] = &clk_sclk_hdmiphy, }; static struct clksrc_sources clkset_sclk_dac = { .sources = clkset_sclk_dac_list, .nr_sources = ARRAY_SIZE(clkset_sclk_dac_list), }; static struct clksrc_clk clk_sclk_dac = { .clk = { .name = "sclk_dac", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 2), }, .sources = &clkset_sclk_dac, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 1 }, }; static struct clksrc_clk clk_sclk_pixel = { .clk = { .name = "sclk_pixel", .parent = &clk_sclk_vpll.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 4}, }; static struct clk *clkset_sclk_hdmi_list[] = { [0] = &clk_sclk_pixel.clk, [1] = &clk_sclk_hdmiphy, }; static struct clksrc_sources clkset_sclk_hdmi = { .sources = clkset_sclk_hdmi_list, .nr_sources = ARRAY_SIZE(clkset_sclk_hdmi_list), }; static struct clksrc_clk clk_sclk_hdmi = { .clk = { .name = "sclk_hdmi", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 0), }, .sources = &clkset_sclk_hdmi, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 }, }; static struct clk *clkset_sclk_mixer_list[] = { [0] = &clk_sclk_dac.clk, [1] = &clk_sclk_hdmi.clk, }; static struct clksrc_sources clkset_sclk_mixer = { .sources = clkset_sclk_mixer_list, .nr_sources = ARRAY_SIZE(clkset_sclk_mixer_list), }; static struct clksrc_clk clk_sclk_mixer = { .clk = { .name = "sclk_mixer", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 1), }, .sources = &clkset_sclk_mixer, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 1 }, }; static struct clksrc_clk *sclk_tv[] = { &clk_sclk_dac, &clk_sclk_pixel, &clk_sclk_hdmi, &clk_sclk_mixer, }; static struct clk *clkset_sclk_audio0_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_pcmcdclk0, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_sclk_audio0 = { .sources = clkset_sclk_audio0_list, .nr_sources = ARRAY_SIZE(clkset_sclk_audio0_list), }; static struct clksrc_clk clk_sclk_audio0 = { .clk = { .name = "sclk_audio", .devname = "soc-audio.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 24), }, .sources = &clkset_sclk_audio0, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 0, .size = 4 }, }; static struct clk *clkset_sclk_audio1_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_pcmcdclk1, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_sclk_audio1 = { .sources = clkset_sclk_audio1_list, .nr_sources = ARRAY_SIZE(clkset_sclk_audio1_list), }; static struct clksrc_clk clk_sclk_audio1 = { .clk = { .name = "sclk_audio", .devname = "soc-audio.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 25), }, .sources = &clkset_sclk_audio1, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 4, .size = 4 }, }; static struct clk *clkset_sclk_audio2_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_pcmcdclk0, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_sclk_audio2 = { .sources = clkset_sclk_audio2_list, .nr_sources = ARRAY_SIZE(clkset_sclk_audio2_list), }; static struct clksrc_clk clk_sclk_audio2 = { .clk = { .name = "sclk_audio", .devname = "soc-audio.2", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 26), }, .sources = &clkset_sclk_audio2, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 8, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 8, .size = 4 }, }; static struct clk *clkset_sclk_spdif_list[] = { [0] = &clk_sclk_audio0.clk, [1] = &clk_sclk_audio1.clk, [2] = &clk_sclk_audio2.clk, }; static struct clksrc_sources clkset_sclk_spdif = { .sources = clkset_sclk_spdif_list, .nr_sources = ARRAY_SIZE(clkset_sclk_spdif_list), }; static struct clksrc_clk clk_sclk_spdif = { .clk = { .name = "sclk_spdif", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 27), .ops = &s5p_sclk_spdif_ops, }, .sources = &clkset_sclk_spdif, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 12, .size = 2 }, }; static struct clk *clkset_group2_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_xusbxti, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_group2 = { .sources = clkset_group2_list, .nr_sources = ARRAY_SIZE(clkset_group2_list), }; static struct clksrc_clk clksrcs[] = { { .clk = { .name = "sclk_dmc", }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 }, }, { .clk = { .name = "sclk_onenand", }, .sources = &clkset_sclk_onenand, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 28, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 12, .size = 3 }, }, { .clk = { .name = "sclk_fimc", .devname = "s5pv210-fimc.0", .enable = s5pv210_clk_mask1_ctrl, .ctrlbit = (1 << 2), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 12, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .devname = "s5pv210-fimc.1", .enable = s5pv210_clk_mask1_ctrl, .ctrlbit = (1 << 3), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .devname = "s5pv210-fimc.2", .enable = s5pv210_clk_mask1_ctrl, .ctrlbit = (1 << 4), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 }, }, { .clk = { .name = "sclk_cam0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 3), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 }, }, { .clk = { .name = "sclk_cam1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 4), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_fimd", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 5), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 4 }, }, { .clk = { .name = "sclk_mfc", .devname = "s5p-mfc", .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 16), }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 }, }, { .clk = { .name = "sclk_g2d", .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 12), }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 }, }, { .clk = { .name = "sclk_g3d", .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 8), }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_csis", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 6), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 28, .size = 4 }, }, { .clk = { .name = "sclk_pwi", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 29), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 24, .size = 4 }, }, { .clk = { .name = "sclk_pwm", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 19), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC5, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV5, .shift = 12, .size = 4 }, }, }; static struct clksrc_clk clk_sclk_uart0 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 12), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 }, }; static struct clksrc_clk clk_sclk_uart1 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 13), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 }, }; static struct clksrc_clk clk_sclk_uart2 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.2", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 14), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 24, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 24, .size = 4 }, }; static struct clksrc_clk clk_sclk_uart3 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.3", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 15), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 28, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 28, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc0 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 8), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc1 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 9), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 4, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc2 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.2", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 10), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 8, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 8, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc3 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.3", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 11), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 }, }; static struct clksrc_clk clk_sclk_spi0 = { .clk = { .name = "sclk_spi", .devname = "s5pv210-spi.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 16), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC5, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV5, .shift = 0, .size = 4 }, }; static struct clksrc_clk clk_sclk_spi1 = { .clk = { .name = "sclk_spi", .devname = "s5pv210-spi.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 17), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC5, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV5, .shift = 4, .size = 4 }, }; static struct clksrc_clk *clksrc_cdev[] = { &clk_sclk_uart0, &clk_sclk_uart1, &clk_sclk_uart2, &clk_sclk_uart3, &clk_sclk_mmc0, &clk_sclk_mmc1, &clk_sclk_mmc2, &clk_sclk_mmc3, &clk_sclk_spi0, &clk_sclk_spi1, }; static struct clk *clk_cdev[] = { &clk_hsmmc0, &clk_hsmmc1, &clk_hsmmc2, &clk_hsmmc3, &clk_pdma0, &clk_pdma1, }; /* Clock initialisation code */ static struct clksrc_clk *sysclks[] = { &clk_mout_apll, &clk_mout_epll, &clk_mout_mpll, &clk_armclk, &clk_hclk_msys, &clk_sclk_a2m, &clk_hclk_dsys, &clk_hclk_psys, &clk_pclk_msys, &clk_pclk_dsys, &clk_pclk_psys, &clk_vpllsrc, &clk_sclk_vpll, &clk_mout_dmc0, &clk_sclk_dmc0, &clk_sclk_audio0, &clk_sclk_audio1, &clk_sclk_audio2, &clk_sclk_spdif, }; static u32 epll_div[][6] = { { 48000000, 0, 48, 3, 3, 0 }, { 96000000, 0, 48, 3, 2, 0 }, { 144000000, 1, 72, 3, 2, 0 }, { 192000000, 0, 48, 3, 1, 0 }, { 288000000, 1, 72, 3, 1, 0 }, { 32750000, 1, 65, 3, 4, 35127 }, { 32768000, 1, 65, 3, 4, 35127 }, { 45158400, 0, 45, 3, 3, 10355 }, { 45000000, 0, 45, 3, 3, 10355 }, { 45158000, 0, 45, 3, 3, 10355 }, { 49125000, 0, 49, 3, 3, 9961 }, { 49152000, 0, 49, 3, 3, 9961 }, { 67737600, 1, 67, 3, 3, 48366 }, { 67738000, 1, 67, 3, 3, 48366 }, { 73800000, 1, 73, 3, 3, 47710 }, { 73728000, 1, 73, 3, 3, 47710 }, { 36000000, 1, 32, 3, 4, 0 }, { 60000000, 1, 60, 3, 3, 0 }, { 72000000, 1, 72, 3, 3, 0 }, { 80000000, 1, 80, 3, 3, 0 }, { 84000000, 0, 42, 3, 2, 0 }, { 50000000, 0, 50, 3, 3, 0 }, }; static int s5pv210_epll_set_rate(struct clk *clk, unsigned long rate) { unsigned int epll_con, epll_con_k; unsigned int i; /* Return if nothing changed */ if (clk->rate == rate) return 0; epll_con = __raw_readl(S5P_EPLL_CON); epll_con_k = __raw_readl(S5P_EPLL_CON1); epll_con_k &= ~PLL46XX_KDIV_MASK; epll_con &= ~(1 << 27 | PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT | PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT | PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT); for (i = 0; i < ARRAY_SIZE(epll_div); i++) { if (epll_div[i][0] == rate) { epll_con_k |= epll_div[i][5] << 0; epll_con |= (epll_div[i][1] << 27 | epll_div[i][2] << PLL46XX_MDIV_SHIFT | epll_div[i][3] << PLL46XX_PDIV_SHIFT | epll_div[i][4] << PLL46XX_SDIV_SHIFT); break; } } if (i == ARRAY_SIZE(epll_div)) { printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n", __func__); return -EINVAL; } __raw_writel(epll_con, S5P_EPLL_CON); __raw_writel(epll_con_k, S5P_EPLL_CON1); printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n", clk->rate, rate); clk->rate = rate; return 0; } static struct clk_ops s5pv210_epll_ops = { .set_rate = s5pv210_epll_set_rate, .get_rate = s5p_epll_get_rate, }; static u32 vpll_div[][5] = { { 54000000, 3, 53, 3, 0 }, { 108000000, 3, 53, 2, 0 }, }; static unsigned long s5pv210_vpll_get_rate(struct clk *clk) { return clk->rate; } static int s5pv210_vpll_set_rate(struct clk *clk, unsigned long rate) { unsigned int vpll_con; unsigned int i; /* Return if nothing changed */ if (clk->rate == rate) return 0; vpll_con = __raw_readl(S5P_VPLL_CON); vpll_con &= ~(0x1 << 27 | \ PLL90XX_MDIV_MASK << PLL90XX_MDIV_SHIFT | \ PLL90XX_PDIV_MASK << PLL90XX_PDIV_SHIFT | \ PLL90XX_SDIV_MASK << PLL90XX_SDIV_SHIFT); for (i = 0; i < ARRAY_SIZE(vpll_div); i++) { if (vpll_div[i][0] == rate) { vpll_con |= vpll_div[i][1] << PLL90XX_PDIV_SHIFT; vpll_con |= vpll_div[i][2] << PLL90XX_MDIV_SHIFT; vpll_con |= vpll_div[i][3] << PLL90XX_SDIV_SHIFT; vpll_con |= vpll_div[i][4] << 27; break; } } if (i == ARRAY_SIZE(vpll_div)) { printk(KERN_ERR "%s: Invalid Clock VPLL Frequency\n", __func__); return -EINVAL; } __raw_writel(vpll_con, S5P_VPLL_CON); /* Wait for VPLL lock */ while (!(__raw_readl(S5P_VPLL_CON) & (1 << PLL90XX_LOCKED_SHIFT))) continue; clk->rate = rate; return 0; } static struct clk_ops s5pv210_vpll_ops = { .get_rate = s5pv210_vpll_get_rate, .set_rate = s5pv210_vpll_set_rate, }; void __init_or_cpufreq s5pv210_setup_clocks(void) { struct clk *xtal_clk; unsigned long vpllsrc; unsigned long armclk; unsigned long hclk_msys; unsigned long hclk_dsys; unsigned long hclk_psys; unsigned long pclk_msys; unsigned long pclk_dsys; unsigned long pclk_psys; unsigned long apll; unsigned long mpll; unsigned long epll; unsigned long vpll; unsigned int ptr; u32 clkdiv0, clkdiv1; /* Set functions for clk_fout_epll */ clk_fout_epll.enable = s5p_epll_enable; clk_fout_epll.ops = &s5pv210_epll_ops; printk(KERN_DEBUG "%s: registering clocks\n", __func__); clkdiv0 = __raw_readl(S5P_CLK_DIV0); clkdiv1 = __raw_readl(S5P_CLK_DIV1); printk(KERN_DEBUG "%s: clkdiv0 = %08x, clkdiv1 = %08x\n", __func__, clkdiv0, clkdiv1); xtal_clk = clk_get(NULL, "xtal"); BUG_ON(IS_ERR(xtal_clk)); xtal = clk_get_rate(xtal_clk); clk_put(xtal_clk); printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508); mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502); epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON), __raw_readl(S5P_EPLL_CON1), pll_4600); vpllsrc = clk_get_rate(&clk_vpllsrc.clk); vpll = s5p_get_pll45xx(vpllsrc, __raw_readl(S5P_VPLL_CON), pll_4502); clk_fout_apll.ops = &clk_fout_apll_ops; clk_fout_mpll.rate = mpll; clk_fout_epll.rate = epll; clk_fout_vpll.ops = &s5pv210_vpll_ops; clk_fout_vpll.rate = vpll; printk(KERN_INFO "S5PV210: PLL settings, A=%ld, M=%ld, E=%ld V=%ld", apll, mpll, epll, vpll); armclk = clk_get_rate(&clk_armclk.clk); hclk_msys = clk_get_rate(&clk_hclk_msys.clk); hclk_dsys = clk_get_rate(&clk_hclk_dsys.clk); hclk_psys = clk_get_rate(&clk_hclk_psys.clk); pclk_msys = clk_get_rate(&clk_pclk_msys.clk); pclk_dsys = clk_get_rate(&clk_pclk_dsys.clk); pclk_psys = clk_get_rate(&clk_pclk_psys.clk); printk(KERN_INFO "S5PV210: ARMCLK=%ld, HCLKM=%ld, HCLKD=%ld\n" "HCLKP=%ld, PCLKM=%ld, PCLKD=%ld, PCLKP=%ld\n", armclk, hclk_msys, hclk_dsys, hclk_psys, pclk_msys, pclk_dsys, pclk_psys); clk_f.rate = armclk; clk_h.rate = hclk_psys; clk_p.rate = pclk_psys; for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_set_clksrc(&clksrcs[ptr], true); } static struct clk *clks[] __initdata = { &clk_sclk_hdmi27m, &clk_sclk_hdmiphy, &clk_sclk_usbphy0, &clk_sclk_usbphy1, &clk_pcmcdclk0, &clk_pcmcdclk1, &clk_pcmcdclk2, }; static struct clk_lookup s5pv210_clk_lookup[] = { CLKDEV_INIT(NULL, "clk_uart_baud0", &clk_p), CLKDEV_INIT("s5pv210-uart.0", "clk_uart_baud1", &clk_sclk_uart0.clk), CLKDEV_INIT("s5pv210-uart.1", "clk_uart_baud1", &clk_sclk_uart1.clk), CLKDEV_INIT("s5pv210-uart.2", "clk_uart_baud1", &clk_sclk_uart2.clk), CLKDEV_INIT("s5pv210-uart.3", "clk_uart_baud1", &clk_sclk_uart3.clk), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2), CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.0", &clk_hsmmc3), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk), CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &clk_sclk_mmc3.clk), CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), }; void __init s5pv210_register_clocks(void) { int ptr; s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++) s3c_register_clksrc(sysclks[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(sclk_tv); ptr++) s3c_register_clksrc(sclk_tv[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++) s3c_register_clksrc(clksrc_cdev[ptr], 1); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); clkdev_add_table(s5pv210_clk_lookup, ARRAY_SIZE(s5pv210_clk_lookup)); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) s3c_disable_clocks(clk_cdev[ptr], 1); s3c_pwmclk_init(); }
gpl-2.0
microzans/android_samsung_sm-g355h_kernel
drivers/ssb/driver_chipcommon_sflash.c
2161
3400
/* * Sonics Silicon Backplane * ChipCommon serial flash interface * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/ssb/ssb.h> #include "ssb_private.h" struct ssb_sflash_tbl_e { char *name; u32 id; u32 blocksize; u16 numblocks; }; static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = { { "M25P20", 0x11, 0x10000, 4, }, { "M25P40", 0x12, 0x10000, 8, }, { "M25P16", 0x14, 0x10000, 32, }, { "M25P32", 0x15, 0x10000, 64, }, { "M25P64", 0x16, 0x10000, 128, }, { "M25FL128", 0x17, 0x10000, 256, }, { 0 }, }; static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = { { "SST25WF512", 1, 0x1000, 16, }, { "SST25VF512", 0x48, 0x1000, 16, }, { "SST25WF010", 2, 0x1000, 32, }, { "SST25VF010", 0x49, 0x1000, 32, }, { "SST25WF020", 3, 0x1000, 64, }, { "SST25VF020", 0x43, 0x1000, 64, }, { "SST25WF040", 4, 0x1000, 128, }, { "SST25VF040", 0x44, 0x1000, 128, }, { "SST25VF040B", 0x8d, 0x1000, 128, }, { "SST25WF080", 5, 0x1000, 256, }, { "SST25VF080B", 0x8e, 0x1000, 256, }, { "SST25VF016", 0x41, 0x1000, 512, }, { "SST25VF032", 0x4a, 0x1000, 1024, }, { "SST25VF064", 0x4b, 0x1000, 2048, }, { 0 }, }; static struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = { { "AT45DB011", 0xc, 256, 512, }, { "AT45DB021", 0x14, 256, 1024, }, { "AT45DB041", 0x1c, 256, 2048, }, { "AT45DB081", 0x24, 256, 4096, }, { "AT45DB161", 0x2c, 512, 4096, }, { "AT45DB321", 0x34, 512, 8192, }, { "AT45DB642", 0x3c, 1024, 8192, }, { 0 }, }; static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode) { int i; chipco_write32(cc, SSB_CHIPCO_FLASHCTL, SSB_CHIPCO_FLASHCTL_START | opcode); for (i = 0; i < 1000; i++) { if (!(chipco_read32(cc, SSB_CHIPCO_FLASHCTL) & SSB_CHIPCO_FLASHCTL_BUSY)) return; cpu_relax(); } pr_err("SFLASH control command failed (timeout)!\n"); } /* Initialize serial flash access */ int ssb_sflash_init(struct ssb_chipcommon *cc) { struct ssb_sflash_tbl_e *e; u32 id, id2; switch (cc->capabilities & SSB_CHIPCO_CAP_FLASHT) { case SSB_CHIPCO_FLASHT_STSER: ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_ST_DP); chipco_write32(cc, SSB_CHIPCO_FLASHADDR, 0); ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_ST_RES); id = chipco_read32(cc, SSB_CHIPCO_FLASHDATA); chipco_write32(cc, SSB_CHIPCO_FLASHADDR, 1); ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_ST_RES); id2 = chipco_read32(cc, SSB_CHIPCO_FLASHDATA); switch (id) { case 0xbf: for (e = ssb_sflash_sst_tbl; e->name; e++) { if (e->id == id2) break; } break; case 0x13: return -ENOTSUPP; default: for (e = ssb_sflash_st_tbl; e->name; e++) { if (e->id == id) break; } break; } if (!e->name) { pr_err("Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2); return -ENOTSUPP; } break; case SSB_CHIPCO_FLASHT_ATSER: ssb_sflash_cmd(cc, SSB_CHIPCO_FLASHCTL_AT_STATUS); id = chipco_read32(cc, SSB_CHIPCO_FLASHDATA) & 0x3c; for (e = ssb_sflash_at_tbl; e->name; e++) { if (e->id == id) break; } if (!e->name) { pr_err("Unsupported Atmel serial flash (id: 0x%X)\n", id); return -ENOTSUPP; } break; default: pr_err("Unsupported flash type\n"); return -ENOTSUPP; } pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n", e->name, e->blocksize, e->numblocks); pr_err("Serial flash support is not implemented yet!\n"); return -ENOTSUPP; }
gpl-2.0
zenfone-legacy/android_kernel_asus_T00F
drivers/pinctrl/pinctrl-falcon.c
2161
14748
/* * linux/drivers/pinctrl/pinmux-falcon.c * based on linux/drivers/pinctrl/pinmux-pxa910.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com> * Copyright (C) 2012 John Crispin <blogic@openwrt.org> */ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include "pinctrl-lantiq.h" #include <lantiq_soc.h> /* Multiplexer Control Register */ #define LTQ_PADC_MUX(x) (x * 0x4) /* Pull Up Enable Register */ #define LTQ_PADC_PUEN 0x80 /* Pull Down Enable Register */ #define LTQ_PADC_PDEN 0x84 /* Slew Rate Control Register */ #define LTQ_PADC_SRC 0x88 /* Drive Current Control Register */ #define LTQ_PADC_DCC 0x8C /* Pad Control Availability Register */ #define LTQ_PADC_AVAIL 0xF0 #define pad_r32(p, reg) ltq_r32(p + reg) #define pad_w32(p, val, reg) ltq_w32(val, p + reg) #define pad_w32_mask(c, clear, set, reg) \ pad_w32(c, (pad_r32(c, reg) & ~(clear)) | (set), reg) #define pad_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p))) #define PORTS 5 #define PINS 32 #define PORT(x) (x / PINS) #define PORT_PIN(x) (x % PINS) #define MFP_FALCON(a, f0, f1, f2, f3) \ { \ .name = #a, \ .pin = a, \ .func = { \ FALCON_MUX_##f0, \ FALCON_MUX_##f1, \ FALCON_MUX_##f2, \ FALCON_MUX_##f3, \ }, \ } #define GRP_MUX(a, m, p) \ { \ .name = a, \ .mux = FALCON_MUX_##m, \ .pins = p, \ .npins = ARRAY_SIZE(p), \ } enum falcon_mux { FALCON_MUX_GPIO = 0, FALCON_MUX_RST, FALCON_MUX_NTR, FALCON_MUX_MDIO, FALCON_MUX_LED, FALCON_MUX_SPI, FALCON_MUX_ASC, FALCON_MUX_I2C, FALCON_MUX_HOSTIF, FALCON_MUX_SLIC, FALCON_MUX_JTAG, FALCON_MUX_PCM, FALCON_MUX_MII, FALCON_MUX_PHY, FALCON_MUX_NONE = 0xffff, }; static struct pinctrl_pin_desc falcon_pads[PORTS * PINS]; static int pad_count[PORTS]; static void lantiq_load_pin_desc(struct pinctrl_pin_desc *d, int bank, int len) { int base = bank * PINS; int i; for (i = 0; i < len; i++) { /* strlen("ioXYZ") + 1 = 6 */ char *name = kzalloc(6, GFP_KERNEL); snprintf(name, 6, "io%d", base + i); d[i].number = base + i; d[i].name = name; } pad_count[bank] = len; } static struct ltq_mfp_pin falcon_mfp[] = { /* pin f0 f1 f2 f3 */ MFP_FALCON(GPIO0, RST, GPIO, NONE, NONE), MFP_FALCON(GPIO1, GPIO, GPIO, NONE, NONE), MFP_FALCON(GPIO2, GPIO, GPIO, NONE, NONE), MFP_FALCON(GPIO3, GPIO, GPIO, NONE, NONE), MFP_FALCON(GPIO4, NTR, GPIO, NONE, NONE), MFP_FALCON(GPIO5, NTR, GPIO, NONE, NONE), MFP_FALCON(GPIO6, RST, GPIO, NONE, NONE), MFP_FALCON(GPIO7, MDIO, GPIO, NONE, NONE), MFP_FALCON(GPIO8, MDIO, GPIO, NONE, NONE), MFP_FALCON(GPIO9, LED, GPIO, NONE, NONE), MFP_FALCON(GPIO10, LED, GPIO, NONE, NONE), MFP_FALCON(GPIO11, LED, GPIO, NONE, NONE), MFP_FALCON(GPIO12, LED, GPIO, NONE, NONE), MFP_FALCON(GPIO13, LED, GPIO, NONE, NONE), MFP_FALCON(GPIO14, LED, GPIO, NONE, NONE), MFP_FALCON(GPIO32, ASC, GPIO, NONE, NONE), MFP_FALCON(GPIO33, ASC, GPIO, NONE, NONE), MFP_FALCON(GPIO34, SPI, GPIO, NONE, NONE), MFP_FALCON(GPIO35, SPI, GPIO, NONE, NONE), MFP_FALCON(GPIO36, SPI, GPIO, NONE, NONE), MFP_FALCON(GPIO37, SPI, GPIO, NONE, NONE), MFP_FALCON(GPIO38, SPI, GPIO, NONE, NONE), MFP_FALCON(GPIO39, I2C, GPIO, NONE, NONE), MFP_FALCON(GPIO40, I2C, GPIO, NONE, NONE), MFP_FALCON(GPIO41, HOSTIF, GPIO, HOSTIF, JTAG), MFP_FALCON(GPIO42, HOSTIF, GPIO, HOSTIF, NONE), MFP_FALCON(GPIO43, SLIC, GPIO, NONE, NONE), MFP_FALCON(GPIO44, SLIC, GPIO, PCM, ASC), MFP_FALCON(GPIO45, SLIC, GPIO, PCM, ASC), MFP_FALCON(GPIO64, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO65, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO66, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO67, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO68, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO69, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO70, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO71, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO72, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO73, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO74, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO75, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO76, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO77, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO78, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO79, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO80, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO81, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO82, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO83, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO84, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO85, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO86, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO87, MII, GPIO, NONE, NONE), MFP_FALCON(GPIO88, PHY, GPIO, NONE, NONE), }; static const unsigned pins_por[] = {GPIO0}; static const unsigned pins_ntr[] = {GPIO4}; static const unsigned pins_ntr8k[] = {GPIO5}; static const unsigned pins_hrst[] = {GPIO6}; static const unsigned pins_mdio[] = {GPIO7, GPIO8}; static const unsigned pins_bled[] = {GPIO9, GPIO10, GPIO11, GPIO12, GPIO13, GPIO14}; static const unsigned pins_asc0[] = {GPIO32, GPIO33}; static const unsigned pins_spi[] = {GPIO34, GPIO35, GPIO36}; static const unsigned pins_spi_cs0[] = {GPIO37}; static const unsigned pins_spi_cs1[] = {GPIO38}; static const unsigned pins_i2c[] = {GPIO39, GPIO40}; static const unsigned pins_jtag[] = {GPIO41}; static const unsigned pins_slic[] = {GPIO43, GPIO44, GPIO45}; static const unsigned pins_pcm[] = {GPIO44, GPIO45}; static const unsigned pins_asc1[] = {GPIO44, GPIO45}; static struct ltq_pin_group falcon_grps[] = { GRP_MUX("por", RST, pins_por), GRP_MUX("ntr", NTR, pins_ntr), GRP_MUX("ntr8k", NTR, pins_ntr8k), GRP_MUX("hrst", RST, pins_hrst), GRP_MUX("mdio", MDIO, pins_mdio), GRP_MUX("bootled", LED, pins_bled), GRP_MUX("asc0", ASC, pins_asc0), GRP_MUX("spi", SPI, pins_spi), GRP_MUX("spi cs0", SPI, pins_spi_cs0), GRP_MUX("spi cs1", SPI, pins_spi_cs1), GRP_MUX("i2c", I2C, pins_i2c), GRP_MUX("jtag", JTAG, pins_jtag), GRP_MUX("slic", SLIC, pins_slic), GRP_MUX("pcm", PCM, pins_pcm), GRP_MUX("asc1", ASC, pins_asc1), }; static const char * const ltq_rst_grps[] = {"por", "hrst"}; static const char * const ltq_ntr_grps[] = {"ntr", "ntr8k"}; static const char * const ltq_mdio_grps[] = {"mdio"}; static const char * const ltq_bled_grps[] = {"bootled"}; static const char * const ltq_asc_grps[] = {"asc0", "asc1"}; static const char * const ltq_spi_grps[] = {"spi", "spi cs0", "spi cs1"}; static const char * const ltq_i2c_grps[] = {"i2c"}; static const char * const ltq_jtag_grps[] = {"jtag"}; static const char * const ltq_slic_grps[] = {"slic"}; static const char * const ltq_pcm_grps[] = {"pcm"}; static struct ltq_pmx_func falcon_funcs[] = { {"rst", ARRAY_AND_SIZE(ltq_rst_grps)}, {"ntr", ARRAY_AND_SIZE(ltq_ntr_grps)}, {"mdio", ARRAY_AND_SIZE(ltq_mdio_grps)}, {"led", ARRAY_AND_SIZE(ltq_bled_grps)}, {"asc", ARRAY_AND_SIZE(ltq_asc_grps)}, {"spi", ARRAY_AND_SIZE(ltq_spi_grps)}, {"i2c", ARRAY_AND_SIZE(ltq_i2c_grps)}, {"jtag", ARRAY_AND_SIZE(ltq_jtag_grps)}, {"slic", ARRAY_AND_SIZE(ltq_slic_grps)}, {"pcm", ARRAY_AND_SIZE(ltq_pcm_grps)}, }; /* --------- pinconf related code --------- */ static int falcon_pinconf_group_get(struct pinctrl_dev *pctrldev, unsigned group, unsigned long *config) { return -ENOTSUPP; } static int falcon_pinconf_group_set(struct pinctrl_dev *pctrldev, unsigned group, unsigned long config) { return -ENOTSUPP; } static int falcon_pinconf_get(struct pinctrl_dev *pctrldev, unsigned pin, unsigned long *config) { struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev); enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(*config); void __iomem *mem = info->membase[PORT(pin)]; switch (param) { case LTQ_PINCONF_PARAM_DRIVE_CURRENT: *config = LTQ_PINCONF_PACK(param, !!pad_getbit(mem, LTQ_PADC_DCC, PORT_PIN(pin))); break; case LTQ_PINCONF_PARAM_SLEW_RATE: *config = LTQ_PINCONF_PACK(param, !!pad_getbit(mem, LTQ_PADC_SRC, PORT_PIN(pin))); break; case LTQ_PINCONF_PARAM_PULL: if (pad_getbit(mem, LTQ_PADC_PDEN, PORT_PIN(pin))) *config = LTQ_PINCONF_PACK(param, 1); else if (pad_getbit(mem, LTQ_PADC_PUEN, PORT_PIN(pin))) *config = LTQ_PINCONF_PACK(param, 2); else *config = LTQ_PINCONF_PACK(param, 0); break; default: return -ENOTSUPP; } return 0; } static int falcon_pinconf_set(struct pinctrl_dev *pctrldev, unsigned pin, unsigned long config) { enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(config); int arg = LTQ_PINCONF_UNPACK_ARG(config); struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev); void __iomem *mem = info->membase[PORT(pin)]; u32 reg; switch (param) { case LTQ_PINCONF_PARAM_DRIVE_CURRENT: reg = LTQ_PADC_DCC; break; case LTQ_PINCONF_PARAM_SLEW_RATE: reg = LTQ_PADC_SRC; break; case LTQ_PINCONF_PARAM_PULL: if (arg == 1) reg = LTQ_PADC_PDEN; else reg = LTQ_PADC_PUEN; break; default: pr_err("%s: Invalid config param %04x\n", pinctrl_dev_get_name(pctrldev), param); return -ENOTSUPP; } pad_w32(mem, BIT(PORT_PIN(pin)), reg); if (!(pad_r32(mem, reg) & BIT(PORT_PIN(pin)))) return -ENOTSUPP; return 0; } static void falcon_pinconf_dbg_show(struct pinctrl_dev *pctrldev, struct seq_file *s, unsigned offset) { unsigned long config; struct pin_desc *desc; struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev); int port = PORT(offset); seq_printf(s, " (port %d) mux %d -- ", port, pad_r32(info->membase[port], LTQ_PADC_MUX(PORT_PIN(offset)))); config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_PULL, 0); if (!falcon_pinconf_get(pctrldev, offset, &config)) seq_printf(s, "pull %d ", (int)LTQ_PINCONF_UNPACK_ARG(config)); config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_DRIVE_CURRENT, 0); if (!falcon_pinconf_get(pctrldev, offset, &config)) seq_printf(s, "drive-current %d ", (int)LTQ_PINCONF_UNPACK_ARG(config)); config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_SLEW_RATE, 0); if (!falcon_pinconf_get(pctrldev, offset, &config)) seq_printf(s, "slew-rate %d ", (int)LTQ_PINCONF_UNPACK_ARG(config)); desc = pin_desc_get(pctrldev, offset); if (desc) { if (desc->gpio_owner) seq_printf(s, " owner: %s", desc->gpio_owner); } else { seq_printf(s, " not registered"); } } static void falcon_pinconf_group_dbg_show(struct pinctrl_dev *pctrldev, struct seq_file *s, unsigned selector) { } static const struct pinconf_ops falcon_pinconf_ops = { .pin_config_get = falcon_pinconf_get, .pin_config_set = falcon_pinconf_set, .pin_config_group_get = falcon_pinconf_group_get, .pin_config_group_set = falcon_pinconf_group_set, .pin_config_dbg_show = falcon_pinconf_dbg_show, .pin_config_group_dbg_show = falcon_pinconf_group_dbg_show, }; static struct pinctrl_desc falcon_pctrl_desc = { .owner = THIS_MODULE, .pins = falcon_pads, .confops = &falcon_pinconf_ops, }; static inline int falcon_mux_apply(struct pinctrl_dev *pctrldev, int mfp, int mux) { struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev); int port = PORT(info->mfp[mfp].pin); if ((port >= PORTS) || (!info->membase[port])) return -ENODEV; pad_w32(info->membase[port], mux, LTQ_PADC_MUX(PORT_PIN(info->mfp[mfp].pin))); return 0; } static const struct ltq_cfg_param falcon_cfg_params[] = { {"lantiq,pull", LTQ_PINCONF_PARAM_PULL}, {"lantiq,drive-current", LTQ_PINCONF_PARAM_DRIVE_CURRENT}, {"lantiq,slew-rate", LTQ_PINCONF_PARAM_SLEW_RATE}, }; static struct ltq_pinmux_info falcon_info = { .desc = &falcon_pctrl_desc, .apply_mux = falcon_mux_apply, .params = falcon_cfg_params, .num_params = ARRAY_SIZE(falcon_cfg_params), }; /* --------- register the pinctrl layer --------- */ int pinctrl_falcon_get_range_size(int id) { u32 avail; if ((id >= PORTS) || (!falcon_info.membase[id])) return -EINVAL; avail = pad_r32(falcon_info.membase[id], LTQ_PADC_AVAIL); return fls(avail); } void pinctrl_falcon_add_gpio_range(struct pinctrl_gpio_range *range) { pinctrl_add_gpio_range(falcon_info.pctrl, range); } static int pinctrl_falcon_probe(struct platform_device *pdev) { struct device_node *np; int pad_count = 0; int ret = 0; /* load and remap the pad resources of the different banks */ for_each_compatible_node(np, NULL, "lantiq,pad-falcon") { struct platform_device *ppdev = of_find_device_by_node(np); const __be32 *bank = of_get_property(np, "lantiq,bank", NULL); struct resource res; u32 avail; int pins; if (!of_device_is_available(np)) continue; if (!ppdev) { dev_err(&pdev->dev, "failed to find pad pdev\n"); continue; } if (!bank || *bank >= PORTS) continue; if (of_address_to_resource(np, 0, &res)) continue; falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL); if (IS_ERR(falcon_info.clk[*bank])) { dev_err(&ppdev->dev, "failed to get clock\n"); return PTR_ERR(falcon_info.clk[*bank]); } falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev, &res); if (IS_ERR(falcon_info.membase[*bank])) return PTR_ERR(falcon_info.membase[*bank]); avail = pad_r32(falcon_info.membase[*bank], LTQ_PADC_AVAIL); pins = fls(avail); lantiq_load_pin_desc(&falcon_pads[pad_count], *bank, pins); pad_count += pins; clk_enable(falcon_info.clk[*bank]); dev_dbg(&pdev->dev, "found %s with %d pads\n", res.name, pins); } dev_dbg(&pdev->dev, "found a total of %d pads\n", pad_count); falcon_pctrl_desc.name = dev_name(&pdev->dev); falcon_pctrl_desc.npins = pad_count; falcon_info.mfp = falcon_mfp; falcon_info.num_mfp = ARRAY_SIZE(falcon_mfp); falcon_info.grps = falcon_grps; falcon_info.num_grps = ARRAY_SIZE(falcon_grps); falcon_info.funcs = falcon_funcs; falcon_info.num_funcs = ARRAY_SIZE(falcon_funcs); ret = ltq_pinctrl_register(pdev, &falcon_info); if (!ret) dev_info(&pdev->dev, "Init done\n"); return ret; } static const struct of_device_id falcon_match[] = { { .compatible = "lantiq,pinctrl-falcon" }, {}, }; MODULE_DEVICE_TABLE(of, falcon_match); static struct platform_driver pinctrl_falcon_driver = { .probe = pinctrl_falcon_probe, .driver = { .name = "pinctrl-falcon", .owner = THIS_MODULE, .of_match_table = falcon_match, }, }; int __init pinctrl_falcon_init(void) { return platform_driver_register(&pinctrl_falcon_driver); } core_initcall_sync(pinctrl_falcon_init);
gpl-2.0
AnonymousMediatekTeam/android_kernel_aio_otfp
drivers/usb/host/ehci-ps3.c
2161
6934
/* * PS3 EHCI Host Controller driver * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/firmware.h> #include <asm/ps3.h> static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci) { /* PS3 HC internal setup register offsets. */ enum ps3_ehci_hc_insnreg { ps3_ehci_hc_insnreg01 = 0x084, ps3_ehci_hc_insnreg02 = 0x088, ps3_ehci_hc_insnreg03 = 0x08c, }; /* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its * internal INSNREGXX setup regs back to the chip default values * on Host Controller Reset (CMD_RESET) or Light Host Controller * Reset (CMD_LRESET). The work-around for this is for the HC * driver to re-initialise these regs when ever the HC is reset. */ /* Set burst transfer counts to 256 out, 32 in. */ writel_be(0x01000020, (void __iomem *)ehci->regs + ps3_ehci_hc_insnreg01); /* Enable burst transfer counts. */ writel_be(0x00000001, (void __iomem *)ehci->regs + ps3_ehci_hc_insnreg03); } static int ps3_ehci_hc_reset(struct usb_hcd *hcd) { int result; struct ehci_hcd *ehci = hcd_to_ehci(hcd); ehci->big_endian_mmio = 1; ehci->caps = hcd->regs; result = ehci_setup(hcd); if (result) return result; ps3_ehci_setup_insnreg(ehci); return result; } static const struct hc_driver ps3_ehci_hc_driver = { .description = hcd_name, .product_desc = "PS3 EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), .irq = ehci_irq, .flags = HCD_MEMORY | HCD_USB2, .reset = ps3_ehci_hc_reset, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, .get_frame_number = ehci_get_frame, .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, #if defined(CONFIG_PM) .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static int ps3_ehci_probe(struct ps3_system_bus_device *dev) { int result; struct usb_hcd *hcd; unsigned int virq; static u64 dummy_mask = DMA_BIT_MASK(32); if (usb_disabled()) { result = -ENODEV; goto fail_start; } result = ps3_open_hv_device(dev); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed\n", __func__, __LINE__); goto fail_open; } result = ps3_dma_region_create(dev->d_region); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: " "(%d)\n", __func__, __LINE__, result); BUG_ON("check region type"); goto fail_dma_region; } result = ps3_mmio_region_create(dev->m_region); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n", __func__, __LINE__); result = -EPERM; goto fail_mmio_region; } dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__, __LINE__, dev->m_region->lpar_addr); result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n", __func__, __LINE__, virq); result = -EPERM; goto fail_irq; } dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */ hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev_name(&dev->core)); if (!hcd) { dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__, __LINE__); result = -ENOMEM; goto fail_create_hcd; } hcd->rsrc_start = dev->m_region->lpar_addr; hcd->rsrc_len = dev->m_region->len; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n", __func__, __LINE__); hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len); if (!hcd->regs) { dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__, __LINE__); result = -EPERM; goto fail_ioremap; } dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__, (unsigned long)hcd->rsrc_start); dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__, (unsigned long)hcd->rsrc_len); dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__, (unsigned long)hcd->regs); dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__, (unsigned long)virq); ps3_system_bus_set_drvdata(dev, hcd); result = usb_add_hcd(hcd, virq, 0); if (result) { dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n", __func__, __LINE__, result); goto fail_add_hcd; } return result; fail_add_hcd: iounmap(hcd->regs); fail_ioremap: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); fail_create_hcd: ps3_io_irq_destroy(virq); fail_irq: ps3_free_mmio_region(dev->m_region); fail_mmio_region: ps3_dma_region_free(dev->d_region); fail_dma_region: ps3_close_hv_device(dev); fail_open: fail_start: return result; } static int ps3_ehci_remove(struct ps3_system_bus_device *dev) { unsigned int tmp; struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev); BUG_ON(!hcd); dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs); dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq); tmp = hcd->irq; usb_remove_hcd(hcd); ps3_system_bus_set_drvdata(dev, NULL); BUG_ON(!hcd->regs); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); ps3_io_irq_destroy(tmp); ps3_free_mmio_region(dev->m_region); ps3_dma_region_free(dev->d_region); ps3_close_hv_device(dev); return 0; } static int __init ps3_ehci_driver_register(struct ps3_system_bus_driver *drv) { return firmware_has_feature(FW_FEATURE_PS3_LV1) ? ps3_system_bus_driver_register(drv) : 0; } static void ps3_ehci_driver_unregister(struct ps3_system_bus_driver *drv) { if (firmware_has_feature(FW_FEATURE_PS3_LV1)) ps3_system_bus_driver_unregister(drv); } MODULE_ALIAS(PS3_MODULE_ALIAS_EHCI); static struct ps3_system_bus_driver ps3_ehci_driver = { .core.name = "ps3-ehci-driver", .core.owner = THIS_MODULE, .match_id = PS3_MATCH_ID_EHCI, .probe = ps3_ehci_probe, .remove = ps3_ehci_remove, .shutdown = ps3_ehci_remove, };
gpl-2.0
jonypx09/kernel_ba2x_2.0
net/ipv4/netfilter/ipt_MASQUERADE.c
2673
4527
/* Masquerade. Simple mapping which alters range to a local IP address (depending on route). */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/netfilter.h> #include <net/protocol.h> #include <net/ip.h> #include <net/checksum.h> #include <net/route.h> #include <net/netfilter/nf_nat_rule.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter/x_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("Xtables: automatic-address SNAT"); /* FIXME: Multiple targets. --RR */ static int masquerade_tg_check(const struct xt_tgchk_param *par) { const struct nf_nat_multi_range_compat *mr = par->targinfo; if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } if (mr->rangesize != 1) { pr_debug("bad rangesize %u\n", mr->rangesize); return -EINVAL; } return 0; } static unsigned int masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range newrange; const struct nf_nat_multi_range_compat *mr; const struct rtable *rt; __be32 newsrc; NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); nat = nfct_nat(ct); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY)); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; mr = par->targinfo; rt = skb_rtable(skb); newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", par->out->name); return NF_DROP; } nat->masq_index = par->out->ifindex; /* Transfer from original range. */ newrange = ((struct nf_nat_range) { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, newsrc, newsrc, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC); } static int device_cmp(struct nf_conn *i, void *ifindex) { const struct nf_conn_nat *nat = nfct_nat(i); if (!nat) return 0; return nat->masq_index == (int)(long)ifindex; } static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct net_device *dev = ptr; struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { /* Device was downed. Search entire table for conntracks which were associated with that device, and forget them. */ NF_CT_ASSERT(dev->ifindex != 0); nf_ct_iterate_cleanup(net, device_cmp, (void *)(long)dev->ifindex); } return NOTIFY_DONE; } static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; return masq_device_event(this, event, dev); } static struct notifier_block masq_dev_notifier = { .notifier_call = masq_device_event, }; static struct notifier_block masq_inet_notifier = { .notifier_call = masq_inet_event, }; static struct xt_target masquerade_tg_reg __read_mostly = { .name = "MASQUERADE", .family = NFPROTO_IPV4, .target = masquerade_tg, .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = 1 << NF_INET_POST_ROUTING, .checkentry = masquerade_tg_check, .me = THIS_MODULE, }; static int __init masquerade_tg_init(void) { int ret; ret = xt_register_target(&masquerade_tg_reg); if (ret == 0) { /* Register for device down reports */ register_netdevice_notifier(&masq_dev_notifier); /* Register IP address change reports */ register_inetaddr_notifier(&masq_inet_notifier); } return ret; } static void __exit masquerade_tg_exit(void) { xt_unregister_target(&masquerade_tg_reg); unregister_netdevice_notifier(&masq_dev_notifier); unregister_inetaddr_notifier(&masq_inet_notifier); } module_init(masquerade_tg_init); module_exit(masquerade_tg_exit);
gpl-2.0
dianlujitao/android_kernel_huawei_msm8610
drivers/block/loop.c
2929
47366
/* * linux/drivers/block/loop.c * * Written by Theodore Ts'o, 3/29/93 * * Copyright 1993 by Theodore Ts'o. Redistribution of this file is * permitted under the GNU General Public License. * * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 * * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 * * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 * * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 * * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 * * Loadable modules and other fixes by AK, 1998 * * Make real block number available to downstream transfer functions, enables * CBC (and relatives) mode encryption requiring unique IVs per data block. * Reed H. Petty, rhp@draper.net * * Maximum number of loop devices now dynamic via max_loop module parameter. * Russell Kroll <rkroll@exploits.org> 19990701 * * Maximum number of loop devices when compiled-in now selectable by passing * max_loop=<1-255> to the kernel on boot. * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 * * Completely rewrite request handling to be make_request_fn style and * non blocking, pushing work to a helper thread. Lots of fixes from * Al Viro too. * Jens Axboe <axboe@suse.de>, Nov 2000 * * Support up to 256 loop devices * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 * * Support for falling back on the write file operation when the address space * operations write_begin is not available on the backing filesystem. * Anton Altaparmakov, 16 Feb 2005 * * Still To Fix: * - Advisory locking is ignored here. * - Should use an own CAP_* category instead of CAP_SYS_ADMIN * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/wait.h> #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/init.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/loop.h> #include <linux/compat.h> #include <linux/suspend.h> #include <linux/freezer.h> #include <linux/mutex.h> #include <linux/writeback.h> #include <linux/completion.h> #include <linux/highmem.h> #include <linux/kthread.h> #include <linux/splice.h> #include <linux/sysfs.h> #include <linux/miscdevice.h> #include <linux/falloc.h> #include <asm/uaccess.h> static DEFINE_IDR(loop_index_idr); static DEFINE_MUTEX(loop_index_mutex); static int max_part; static int part_shift; /* * Transfer functions */ static int transfer_none(struct loop_device *lo, int cmd, struct page *raw_page, unsigned raw_off, struct page *loop_page, unsigned loop_off, int size, sector_t real_block) { char *raw_buf = kmap_atomic(raw_page) + raw_off; char *loop_buf = kmap_atomic(loop_page) + loop_off; if (cmd == READ) memcpy(loop_buf, raw_buf, size); else memcpy(raw_buf, loop_buf, size); kunmap_atomic(loop_buf); kunmap_atomic(raw_buf); cond_resched(); return 0; } static int transfer_xor(struct loop_device *lo, int cmd, struct page *raw_page, unsigned raw_off, struct page *loop_page, unsigned loop_off, int size, sector_t real_block) { char *raw_buf = kmap_atomic(raw_page) + raw_off; char *loop_buf = kmap_atomic(loop_page) + loop_off; char *in, *out, *key; int i, keysize; if (cmd == READ) { in = raw_buf; out = loop_buf; } else { in = loop_buf; out = raw_buf; } key = lo->lo_encrypt_key; keysize = lo->lo_encrypt_key_size; for (i = 0; i < size; i++) *out++ = *in++ ^ key[(i & 511) % keysize]; kunmap_atomic(loop_buf); kunmap_atomic(raw_buf); cond_resched(); return 0; } static int xor_init(struct loop_device *lo, const struct loop_info64 *info) { if (unlikely(info->lo_encrypt_key_size <= 0)) return -EINVAL; return 0; } static struct loop_func_table none_funcs = { .number = LO_CRYPT_NONE, .transfer = transfer_none, }; static struct loop_func_table xor_funcs = { .number = LO_CRYPT_XOR, .transfer = transfer_xor, .init = xor_init }; /* xfer_funcs[0] is special - its release function is never called */ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { &none_funcs, &xor_funcs }; static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) { loff_t size, loopsize; /* Compute loopsize in bytes */ size = i_size_read(file->f_mapping->host); loopsize = size - offset; /* offset is beyond i_size, wierd but possible */ if (loopsize < 0) return 0; if (sizelimit > 0 && sizelimit < loopsize) loopsize = sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t. */ return loopsize >> 9; } static loff_t get_loop_size(struct loop_device *lo, struct file *file) { return get_size(lo->lo_offset, lo->lo_sizelimit, file); } static int figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) { loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); sector_t x = (sector_t)size; if (unlikely((loff_t)x != size)) return -EFBIG; if (lo->lo_offset != offset) lo->lo_offset = offset; if (lo->lo_sizelimit != sizelimit) lo->lo_sizelimit = sizelimit; set_capacity(lo->lo_disk, x); return 0; } static inline int lo_do_transfer(struct loop_device *lo, int cmd, struct page *rpage, unsigned roffs, struct page *lpage, unsigned loffs, int size, sector_t rblock) { if (unlikely(!lo->transfer)) return 0; return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); } /** * __do_lo_send_write - helper for writing data to a loop device * * This helper just factors out common code between do_lo_send_direct_write() * and do_lo_send_write(). */ static int __do_lo_send_write(struct file *file, u8 *buf, const int len, loff_t pos) { ssize_t bw; mm_segment_t old_fs = get_fs(); set_fs(get_ds()); bw = file->f_op->write(file, buf, len, &pos); set_fs(old_fs); if (likely(bw == len)) return 0; printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", (unsigned long long)pos, len); if (bw >= 0) bw = -EIO; return bw; } /** * do_lo_send_direct_write - helper for writing data to a loop device * * This is the fast, non-transforming version that does not need double * buffering. */ static int do_lo_send_direct_write(struct loop_device *lo, struct bio_vec *bvec, loff_t pos, struct page *page) { ssize_t bw = __do_lo_send_write(lo->lo_backing_file, kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len, pos); kunmap(bvec->bv_page); cond_resched(); return bw; } /** * do_lo_send_write - helper for writing data to a loop device * * This is the slow, transforming version that needs to double buffer the * data as it cannot do the transformations in place without having direct * access to the destination pages of the backing file. */ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, loff_t pos, struct page *page) { int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page, bvec->bv_offset, bvec->bv_len, pos >> 9); if (likely(!ret)) return __do_lo_send_write(lo->lo_backing_file, page_address(page), bvec->bv_len, pos); printk(KERN_ERR "loop: Transfer error at byte offset %llu, " "length %i.\n", (unsigned long long)pos, bvec->bv_len); if (ret > 0) ret = -EIO; return ret; } static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) { int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, struct page *page); struct bio_vec *bvec; struct page *page = NULL; int i, ret = 0; if (lo->transfer != transfer_none) { page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); if (unlikely(!page)) goto fail; kmap(page); do_lo_send = do_lo_send_write; } else { do_lo_send = do_lo_send_direct_write; } bio_for_each_segment(bvec, bio, i) { ret = do_lo_send(lo, bvec, pos, page); if (ret < 0) break; pos += bvec->bv_len; } if (page) { kunmap(page); __free_page(page); } out: return ret; fail: printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n"); ret = -ENOMEM; goto out; } struct lo_read_data { struct loop_device *lo; struct page *page; unsigned offset; int bsize; }; static int lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct lo_read_data *p = sd->u.data; struct loop_device *lo = p->lo; struct page *page = buf->page; sector_t IV; int size; IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + (buf->offset >> 9); size = sd->len; if (size > p->bsize) size = p->bsize; if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) { printk(KERN_ERR "loop: transfer error block %ld\n", page->index); size = -EINVAL; } flush_dcache_page(p->page); if (size > 0) p->offset += size; return size; } static int lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { return __splice_from_pipe(pipe, sd, lo_splice_actor); } static ssize_t do_lo_receive(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) { struct lo_read_data cookie; struct splice_desc sd; struct file *file; ssize_t retval; cookie.lo = lo; cookie.page = bvec->bv_page; cookie.offset = bvec->bv_offset; cookie.bsize = bsize; sd.len = 0; sd.total_len = bvec->bv_len; sd.flags = 0; sd.pos = pos; sd.u.data = &cookie; file = lo->lo_backing_file; retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); return retval; } static int lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) { struct bio_vec *bvec; ssize_t s; int i; bio_for_each_segment(bvec, bio, i) { s = do_lo_receive(lo, bvec, bsize, pos); if (s < 0) return s; if (s != bvec->bv_len) { zero_fill_bio(bio); break; } pos += bvec->bv_len; } return 0; } static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) { loff_t pos; int ret; pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; if (bio_rw(bio) == WRITE) { struct file *file = lo->lo_backing_file; if (bio->bi_rw & REQ_FLUSH) { ret = vfs_fsync(file, 0); if (unlikely(ret && ret != -EINVAL)) { ret = -EIO; goto out; } } /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. However we do not support discard if * encryption is enabled, because it may give an attacker * useful information. */ if (bio->bi_rw & REQ_DISCARD) { struct file *file = lo->lo_backing_file; int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { ret = -EOPNOTSUPP; goto out; } ret = file->f_op->fallocate(file, mode, pos, bio->bi_size); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) ret = -EIO; goto out; } ret = lo_send(lo, bio, pos); if ((bio->bi_rw & REQ_FUA) && !ret) { ret = vfs_fsync(file, 0); if (unlikely(ret && ret != -EINVAL)) ret = -EIO; } } else ret = lo_receive(lo, bio, lo->lo_blocksize, pos); out: return ret; } /* * Add bio to back of pending list */ static void loop_add_bio(struct loop_device *lo, struct bio *bio) { bio_list_add(&lo->lo_bio_list, bio); } /* * Grab first pending buffer */ static struct bio *loop_get_bio(struct loop_device *lo) { return bio_list_pop(&lo->lo_bio_list); } static void loop_make_request(struct request_queue *q, struct bio *old_bio) { struct loop_device *lo = q->queuedata; int rw = bio_rw(old_bio); if (rw == READA) rw = READ; BUG_ON(!lo || (rw != READ && rw != WRITE)); spin_lock_irq(&lo->lo_lock); if (lo->lo_state != Lo_bound) goto out; if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY))) goto out; loop_add_bio(lo, old_bio); wake_up(&lo->lo_event); spin_unlock_irq(&lo->lo_lock); return; out: spin_unlock_irq(&lo->lo_lock); bio_io_error(old_bio); } struct switch_request { struct file *file; struct completion wait; }; static void do_loop_switch(struct loop_device *, struct switch_request *); static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) { if (unlikely(!bio->bi_bdev)) { do_loop_switch(lo, bio->bi_private); bio_put(bio); } else { int ret = do_bio_filebacked(lo, bio); bio_endio(bio, ret); } } /* * worker thread that handles reads/writes to file backed loop devices, * to avoid blocking in our make_request_fn. it also does loop decrypting * on reads for block backed loop, as that is too heavy to do from * b_end_io context where irqs may be disabled. * * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before * calling kthread_stop(). Therefore once kthread_should_stop() is * true, make_request will not place any more requests. Therefore * once kthread_should_stop() is true and lo_bio is NULL, we are * done with the loop. */ static int loop_thread(void *data) { struct loop_device *lo = data; struct bio *bio; set_user_nice(current, -20); while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) { wait_event_interruptible(lo->lo_event, !bio_list_empty(&lo->lo_bio_list) || kthread_should_stop()); if (bio_list_empty(&lo->lo_bio_list)) continue; spin_lock_irq(&lo->lo_lock); bio = loop_get_bio(lo); spin_unlock_irq(&lo->lo_lock); BUG_ON(!bio); loop_handle_bio(lo, bio); } return 0; } /* * loop_switch performs the hard work of switching a backing store. * First it needs to flush existing IO, it does this by sending a magic * BIO down the pipe. The completion of this BIO does the actual switch. */ static int loop_switch(struct loop_device *lo, struct file *file) { struct switch_request w; struct bio *bio = bio_alloc(GFP_KERNEL, 0); if (!bio) return -ENOMEM; init_completion(&w.wait); w.file = file; bio->bi_private = &w; bio->bi_bdev = NULL; loop_make_request(lo->lo_queue, bio); wait_for_completion(&w.wait); return 0; } /* * Helper to flush the IOs in loop, but keeping loop thread running */ static int loop_flush(struct loop_device *lo) { /* loop not yet configured, no running thread, nothing to flush */ if (!lo->lo_thread) return 0; return loop_switch(lo, NULL); } /* * Do the actual switch; called from the BIO completion routine */ static void do_loop_switch(struct loop_device *lo, struct switch_request *p) { struct file *file = p->file; struct file *old_file = lo->lo_backing_file; struct address_space *mapping; /* if no new file, only flush of queued bios requested */ if (!file) goto out; mapping = file->f_mapping; mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); lo->lo_backing_file = file; lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? mapping->host->i_bdev->bd_block_size : PAGE_SIZE; lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); out: complete(&p->wait); } /* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up * the original file and in High Availability environments to switch to * an alternative location for the content in case of server meltdown. * This can only work if the loop device is used read-only, and if the * new backing store is the same size and type as the old backing store. */ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsigned int arg) { struct file *file, *old_file; struct inode *inode; int error; error = -ENXIO; if (lo->lo_state != Lo_bound) goto out; /* the loop device has to be read-only */ error = -EINVAL; if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) goto out; error = -EBADF; file = fget(arg); if (!file) goto out; inode = file->f_mapping->host; old_file = lo->lo_backing_file; error = -EINVAL; if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) goto out_putf; /* size of the new backing store needs to be the same */ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) goto out_putf; /* and ... switch */ error = loop_switch(lo, file); if (error) goto out_putf; fput(old_file); if (lo->lo_flags & LO_FLAGS_PARTSCAN) ioctl_by_bdev(bdev, BLKRRPART, 0); return 0; out_putf: fput(file); out: return error; } static inline int is_loop_device(struct file *file) { struct inode *i = file->f_mapping->host; return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; } /* loop sysfs attributes */ static ssize_t loop_attr_show(struct device *dev, char *page, ssize_t (*callback)(struct loop_device *, char *)) { struct gendisk *disk = dev_to_disk(dev); struct loop_device *lo = disk->private_data; return callback(lo, page); } #define LOOP_ATTR_RO(_name) \ static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ static ssize_t loop_attr_do_show_##_name(struct device *d, \ struct device_attribute *attr, char *b) \ { \ return loop_attr_show(d, b, loop_attr_##_name##_show); \ } \ static struct device_attribute loop_attr_##_name = \ __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) { ssize_t ret; char *p = NULL; spin_lock_irq(&lo->lo_lock); if (lo->lo_backing_file) p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); spin_unlock_irq(&lo->lo_lock); if (IS_ERR_OR_NULL(p)) ret = PTR_ERR(p); else { ret = strlen(p); memmove(buf, p, ret); buf[ret++] = '\n'; buf[ret] = 0; } return ret; } static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); } static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); } static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) { int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); return sprintf(buf, "%s\n", autoclear ? "1" : "0"); } static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) { int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); return sprintf(buf, "%s\n", partscan ? "1" : "0"); } LOOP_ATTR_RO(backing_file); LOOP_ATTR_RO(offset); LOOP_ATTR_RO(sizelimit); LOOP_ATTR_RO(autoclear); LOOP_ATTR_RO(partscan); static struct attribute *loop_attrs[] = { &loop_attr_backing_file.attr, &loop_attr_offset.attr, &loop_attr_sizelimit.attr, &loop_attr_autoclear.attr, &loop_attr_partscan.attr, NULL, }; static struct attribute_group loop_attribute_group = { .name = "loop", .attrs= loop_attrs, }; static int loop_sysfs_init(struct loop_device *lo) { return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, &loop_attribute_group); } static void loop_sysfs_exit(struct loop_device *lo) { sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, &loop_attribute_group); } static void loop_config_discard(struct loop_device *lo) { struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. However we do support discard if * encryption is enabled, because it may give an attacker * useful information. */ if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { q->limits.discard_granularity = 0; q->limits.discard_alignment = 0; q->limits.max_discard_sectors = 0; q->limits.discard_zeroes_data = 0; queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); return; } q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_alignment = 0; q->limits.max_discard_sectors = UINT_MAX >> 9; q->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); } static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct block_device *bdev, unsigned int arg) { struct file *file, *f; struct inode *inode; struct address_space *mapping; unsigned lo_blocksize; int lo_flags = 0; int error; loff_t size; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); error = -EBADF; file = fget(arg); if (!file) goto out; error = -EBUSY; if (lo->lo_state != Lo_unbound) goto out_putf; /* Avoid recursion */ f = file; while (is_loop_device(f)) { struct loop_device *l; if (f->f_mapping->host->i_bdev == bdev) goto out_putf; l = f->f_mapping->host->i_bdev->bd_disk->private_data; if (l->lo_state == Lo_unbound) { error = -EINVAL; goto out_putf; } f = l->lo_backing_file; } mapping = file->f_mapping; inode = mapping->host; error = -EINVAL; if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) goto out_putf; if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || !file->f_op->write) lo_flags |= LO_FLAGS_READ_ONLY; lo_blocksize = S_ISBLK(inode->i_mode) ? inode->i_bdev->bd_block_size : PAGE_SIZE; error = -EFBIG; size = get_loop_size(lo, file); if ((loff_t)(sector_t)size != size) goto out_putf; error = 0; set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); lo->lo_blocksize = lo_blocksize; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; lo->transfer = transfer_none; lo->ioctl = NULL; lo->lo_sizelimit = 0; lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); bio_list_init(&lo->lo_bio_list); /* * set queue make_request_fn, and add limits based on lower level * device */ blk_queue_make_request(lo->lo_queue, loop_make_request); lo->lo_queue->queuedata = lo; if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) blk_queue_flush(lo->lo_queue, REQ_FLUSH); set_capacity(lo->lo_disk, size); bd_set_size(bdev, size << 9); loop_sysfs_init(lo); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); set_blocksize(bdev, lo_blocksize); lo->lo_thread = kthread_create(loop_thread, lo, "loop%d", lo->lo_number); if (IS_ERR(lo->lo_thread)) { error = PTR_ERR(lo->lo_thread); goto out_clr; } lo->lo_state = Lo_bound; wake_up_process(lo->lo_thread); if (part_shift) lo->lo_flags |= LO_FLAGS_PARTSCAN; if (lo->lo_flags & LO_FLAGS_PARTSCAN) ioctl_by_bdev(bdev, BLKRRPART, 0); return 0; out_clr: loop_sysfs_exit(lo); lo->lo_thread = NULL; lo->lo_device = NULL; lo->lo_backing_file = NULL; lo->lo_flags = 0; set_capacity(lo->lo_disk, 0); invalidate_bdev(bdev); bd_set_size(bdev, 0); kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); mapping_set_gfp_mask(mapping, lo->old_gfp_mask); lo->lo_state = Lo_unbound; out_putf: fput(file); out: /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return error; } static int loop_release_xfer(struct loop_device *lo) { int err = 0; struct loop_func_table *xfer = lo->lo_encryption; if (xfer) { if (xfer->release) err = xfer->release(lo); lo->transfer = NULL; lo->lo_encryption = NULL; module_put(xfer->owner); } return err; } static int loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, const struct loop_info64 *i) { int err = 0; if (xfer) { struct module *owner = xfer->owner; if (!try_module_get(owner)) return -EINVAL; if (xfer->init) err = xfer->init(lo, i); if (err) module_put(owner); else lo->lo_encryption = xfer; } return err; } static int loop_clr_fd(struct loop_device *lo) { struct file *filp = lo->lo_backing_file; gfp_t gfp = lo->old_gfp_mask; struct block_device *bdev = lo->lo_device; if (lo->lo_state != Lo_bound) return -ENXIO; if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ return -EBUSY; if (filp == NULL) return -EINVAL; spin_lock_irq(&lo->lo_lock); lo->lo_state = Lo_rundown; spin_unlock_irq(&lo->lo_lock); kthread_stop(lo->lo_thread); spin_lock_irq(&lo->lo_lock); lo->lo_backing_file = NULL; spin_unlock_irq(&lo->lo_lock); loop_release_xfer(lo); lo->transfer = NULL; lo->ioctl = NULL; lo->lo_device = NULL; lo->lo_encryption = NULL; lo->lo_offset = 0; lo->lo_sizelimit = 0; lo->lo_encrypt_key_size = 0; lo->lo_thread = NULL; memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); if (bdev) invalidate_bdev(bdev); set_capacity(lo->lo_disk, 0); loop_sysfs_exit(lo); if (bdev) { bd_set_size(bdev, 0); /* let user-space know about this change */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); } mapping_set_gfp_mask(filp->f_mapping, gfp); lo->lo_state = Lo_unbound; /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) ioctl_by_bdev(bdev, BLKRRPART, 0); lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; mutex_unlock(&lo->lo_ctl_mutex); /* * Need not hold lo_ctl_mutex to fput backing file. * Calling fput holding lo_ctl_mutex triggers a circular * lock dependency possibility warning as fput can take * bd_mutex which is usually taken before lo_ctl_mutex. */ fput(filp); return 0; } static int loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { int err; struct loop_func_table *xfer; uid_t uid = current_uid(); if (lo->lo_encrypt_key_size && lo->lo_key_owner != uid && !capable(CAP_SYS_ADMIN)) return -EPERM; if (lo->lo_state != Lo_bound) return -ENXIO; if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) return -EINVAL; err = loop_release_xfer(lo); if (err) return err; if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; if (type >= MAX_LO_CRYPT) return -EINVAL; xfer = xfer_funcs[type]; if (xfer == NULL) return -EINVAL; } else xfer = NULL; err = loop_init_xfer(lo, xfer, info); if (err) return err; if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) return -EFBIG; } loop_config_discard(lo); memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; if (!xfer) xfer = &none_funcs; lo->transfer = xfer->transfer; lo->ioctl = xfer->ioctl; if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != (info->lo_flags & LO_FLAGS_AUTOCLEAR)) lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; if ((info->lo_flags & LO_FLAGS_PARTSCAN) && !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { lo->lo_flags |= LO_FLAGS_PARTSCAN; lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; ioctl_by_bdev(lo->lo_device, BLKRRPART, 0); } lo->lo_encrypt_key_size = info->lo_encrypt_key_size; lo->lo_init[0] = info->lo_init[0]; lo->lo_init[1] = info->lo_init[1]; if (info->lo_encrypt_key_size) { memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, info->lo_encrypt_key_size); lo->lo_key_owner = uid; } return 0; } static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { struct file *file = lo->lo_backing_file; struct kstat stat; int error; if (lo->lo_state != Lo_bound) return -ENXIO; error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat); if (error) return error; memset(info, 0, sizeof(*info)); info->lo_number = lo->lo_number; info->lo_device = huge_encode_dev(stat.dev); info->lo_inode = stat.ino; info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); info->lo_offset = lo->lo_offset; info->lo_sizelimit = lo->lo_sizelimit; info->lo_flags = lo->lo_flags; memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); info->lo_encrypt_type = lo->lo_encryption ? lo->lo_encryption->number : 0; if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { info->lo_encrypt_key_size = lo->lo_encrypt_key_size; memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); } return 0; } static void loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) { memset(info64, 0, sizeof(*info64)); info64->lo_number = info->lo_number; info64->lo_device = info->lo_device; info64->lo_inode = info->lo_inode; info64->lo_rdevice = info->lo_rdevice; info64->lo_offset = info->lo_offset; info64->lo_sizelimit = 0; info64->lo_encrypt_type = info->lo_encrypt_type; info64->lo_encrypt_key_size = info->lo_encrypt_key_size; info64->lo_flags = info->lo_flags; info64->lo_init[0] = info->lo_init[0]; info64->lo_init[1] = info->lo_init[1]; if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); else memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); } static int loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) { memset(info, 0, sizeof(*info)); info->lo_number = info64->lo_number; info->lo_device = info64->lo_device; info->lo_inode = info64->lo_inode; info->lo_rdevice = info64->lo_rdevice; info->lo_offset = info64->lo_offset; info->lo_encrypt_type = info64->lo_encrypt_type; info->lo_encrypt_key_size = info64->lo_encrypt_key_size; info->lo_flags = info64->lo_flags; info->lo_init[0] = info64->lo_init[0]; info->lo_init[1] = info64->lo_init[1]; if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); else memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); /* error in case values were truncated */ if (info->lo_device != info64->lo_device || info->lo_rdevice != info64->lo_rdevice || info->lo_inode != info64->lo_inode || info->lo_offset != info64->lo_offset) return -EOVERFLOW; return 0; } static int loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; if (copy_from_user(&info, arg, sizeof (struct loop_info))) return -EFAULT; loop_info64_from_old(&info, &info64); return loop_set_status(lo, &info64); } static int loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) { struct loop_info64 info64; if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) return -EFAULT; return loop_set_status(lo, &info64); } static int loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; int err = 0; if (!arg) err = -EINVAL; if (!err) err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_old(&info64, &info); if (!err && copy_to_user(arg, &info, sizeof(info))) err = -EFAULT; return err; } static int loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; int err = 0; if (!arg) err = -EINVAL; if (!err) err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; return err; } static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) { int err; sector_t sec; loff_t sz; err = -ENXIO; if (unlikely(lo->lo_state != Lo_bound)) goto out; err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); if (unlikely(err)) goto out; sec = get_capacity(lo->lo_disk); /* the width of sector_t may be narrow for bit-shift */ sz = sec; sz <<= 9; mutex_lock(&bdev->bd_mutex); bd_set_size(bdev, sz); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); mutex_unlock(&bdev->bd_mutex); out: return err; } static int lo_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; int err; mutex_lock_nested(&lo->lo_ctl_mutex, 1); switch (cmd) { case LOOP_SET_FD: err = loop_set_fd(lo, mode, bdev, arg); break; case LOOP_CHANGE_FD: err = loop_change_fd(lo, bdev, arg); break; case LOOP_CLR_FD: /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ err = loop_clr_fd(lo); if (!err) goto out_unlocked; break; case LOOP_SET_STATUS: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status_old(lo, (struct loop_info __user *)arg); break; case LOOP_GET_STATUS: err = loop_get_status_old(lo, (struct loop_info __user *) arg); break; case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status64(lo, (struct loop_info64 __user *) arg); break; case LOOP_GET_STATUS64: err = loop_get_status64(lo, (struct loop_info64 __user *) arg); break; case LOOP_SET_CAPACITY: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_capacity(lo, bdev); break; default: err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; } mutex_unlock(&lo->lo_ctl_mutex); out_unlocked: return err; } #ifdef CONFIG_COMPAT struct compat_loop_info { compat_int_t lo_number; /* ioctl r/o */ compat_dev_t lo_device; /* ioctl r/o */ compat_ulong_t lo_inode; /* ioctl r/o */ compat_dev_t lo_rdevice; /* ioctl r/o */ compat_int_t lo_offset; compat_int_t lo_encrypt_type; compat_int_t lo_encrypt_key_size; /* ioctl w/o */ compat_int_t lo_flags; /* ioctl r/o */ char lo_name[LO_NAME_SIZE]; unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ compat_ulong_t lo_init[2]; char reserved[4]; }; /* * Transfer 32-bit compatibility structure in userspace to 64-bit loop info * - noinlined to reduce stack space usage in main part of driver */ static noinline int loop_info64_from_compat(const struct compat_loop_info __user *arg, struct loop_info64 *info64) { struct compat_loop_info info; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; memset(info64, 0, sizeof(*info64)); info64->lo_number = info.lo_number; info64->lo_device = info.lo_device; info64->lo_inode = info.lo_inode; info64->lo_rdevice = info.lo_rdevice; info64->lo_offset = info.lo_offset; info64->lo_sizelimit = 0; info64->lo_encrypt_type = info.lo_encrypt_type; info64->lo_encrypt_key_size = info.lo_encrypt_key_size; info64->lo_flags = info.lo_flags; info64->lo_init[0] = info.lo_init[0]; info64->lo_init[1] = info.lo_init[1]; if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); else memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); return 0; } /* * Transfer 64-bit loop info to 32-bit compatibility structure in userspace * - noinlined to reduce stack space usage in main part of driver */ static noinline int loop_info64_to_compat(const struct loop_info64 *info64, struct compat_loop_info __user *arg) { struct compat_loop_info info; memset(&info, 0, sizeof(info)); info.lo_number = info64->lo_number; info.lo_device = info64->lo_device; info.lo_inode = info64->lo_inode; info.lo_rdevice = info64->lo_rdevice; info.lo_offset = info64->lo_offset; info.lo_encrypt_type = info64->lo_encrypt_type; info.lo_encrypt_key_size = info64->lo_encrypt_key_size; info.lo_flags = info64->lo_flags; info.lo_init[0] = info64->lo_init[0]; info.lo_init[1] = info64->lo_init[1]; if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); else memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); /* error in case values were truncated */ if (info.lo_device != info64->lo_device || info.lo_rdevice != info64->lo_rdevice || info.lo_inode != info64->lo_inode || info.lo_offset != info64->lo_offset || info.lo_init[0] != info64->lo_init[0] || info.lo_init[1] != info64->lo_init[1]) return -EOVERFLOW; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int loop_set_status_compat(struct loop_device *lo, const struct compat_loop_info __user *arg) { struct loop_info64 info64; int ret; ret = loop_info64_from_compat(arg, &info64); if (ret < 0) return ret; return loop_set_status(lo, &info64); } static int loop_get_status_compat(struct loop_device *lo, struct compat_loop_info __user *arg) { struct loop_info64 info64; int err = 0; if (!arg) err = -EINVAL; if (!err) err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_compat(&info64, arg); return err; } static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; int err; switch(cmd) { case LOOP_SET_STATUS: mutex_lock(&lo->lo_ctl_mutex); err = loop_set_status_compat( lo, (const struct compat_loop_info __user *) arg); mutex_unlock(&lo->lo_ctl_mutex); break; case LOOP_GET_STATUS: mutex_lock(&lo->lo_ctl_mutex); err = loop_get_status_compat( lo, (struct compat_loop_info __user *) arg); mutex_unlock(&lo->lo_ctl_mutex); break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: case LOOP_GET_STATUS64: case LOOP_SET_STATUS64: arg = (unsigned long) compat_ptr(arg); case LOOP_SET_FD: case LOOP_CHANGE_FD: err = lo_ioctl(bdev, mode, cmd, arg); break; default: err = -ENOIOCTLCMD; break; } return err; } #endif static int lo_open(struct block_device *bdev, fmode_t mode) { struct loop_device *lo; int err = 0; mutex_lock(&loop_index_mutex); lo = bdev->bd_disk->private_data; if (!lo) { err = -ENXIO; goto out; } mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; mutex_unlock(&lo->lo_ctl_mutex); out: mutex_unlock(&loop_index_mutex); return err; } static int lo_release(struct gendisk *disk, fmode_t mode) { struct loop_device *lo = disk->private_data; int err; mutex_lock(&lo->lo_ctl_mutex); if (--lo->lo_refcnt) goto out; if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { /* * In autoclear mode, stop the loop thread * and remove configuration after last close. */ err = loop_clr_fd(lo); if (!err) goto out_unlocked; } else { /* * Otherwise keep thread (if running) and config, * but flush possible ongoing bios in thread. */ loop_flush(lo); } out: mutex_unlock(&lo->lo_ctl_mutex); out_unlocked: return 0; } static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, .release = lo_release, .ioctl = lo_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = lo_compat_ioctl, #endif }; /* * And now the modules code and kernel interface. */ static int max_loop; module_param(max_loop, int, S_IRUGO); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); module_param(max_part, int, S_IRUGO); MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); int loop_register_transfer(struct loop_func_table *funcs) { unsigned int n = funcs->number; if (n >= MAX_LO_CRYPT || xfer_funcs[n]) return -EINVAL; xfer_funcs[n] = funcs; return 0; } static int unregister_transfer_cb(int id, void *ptr, void *data) { struct loop_device *lo = ptr; struct loop_func_table *xfer = data; mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_encryption == xfer) loop_release_xfer(lo); mutex_unlock(&lo->lo_ctl_mutex); return 0; } int loop_unregister_transfer(int number) { unsigned int n = number; struct loop_func_table *xfer; if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) return -EINVAL; xfer_funcs[n] = NULL; idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); return 0; } EXPORT_SYMBOL(loop_register_transfer); EXPORT_SYMBOL(loop_unregister_transfer); static int loop_add(struct loop_device **l, int i) { struct loop_device *lo; struct gendisk *disk; int err; lo = kzalloc(sizeof(*lo), GFP_KERNEL); if (!lo) { err = -ENOMEM; goto out; } err = idr_pre_get(&loop_index_idr, GFP_KERNEL); if (err < 0) goto out_free_dev; if (i >= 0) { int m; /* create specific i in the index */ err = idr_get_new_above(&loop_index_idr, lo, i, &m); if (err >= 0 && i != m) { idr_remove(&loop_index_idr, m); err = -EEXIST; } } else if (i == -1) { int m; /* get next free nr */ err = idr_get_new(&loop_index_idr, lo, &m); if (err >= 0) i = m; } else { err = -EINVAL; } if (err < 0) goto out_free_dev; lo->lo_queue = blk_alloc_queue(GFP_KERNEL); if (!lo->lo_queue) goto out_free_dev; disk = lo->lo_disk = alloc_disk(1 << part_shift); if (!disk) goto out_free_queue; /* * Disable partition scanning by default. The in-kernel partition * scanning can be requested individually per-device during its * setup. Userspace can always add and remove partitions from all * devices. The needed partition minors are allocated from the * extended minor space, the main loop device numbers will continue * to match the loop minors, regardless of the number of partitions * used. * * If max_part is given, partition scanning is globally enabled for * all loop devices. The minors for the main loop devices will be * multiples of max_part. * * Note: Global-for-all-devices, set-only-at-init, read-only module * parameteters like 'max_loop' and 'max_part' make things needlessly * complicated, are too static, inflexible and may surprise * userspace tools. Parameters like this in general should be avoided. */ if (!part_shift) disk->flags |= GENHD_FL_NO_PART_SCAN; disk->flags |= GENHD_FL_EXT_DEVT; mutex_init(&lo->lo_ctl_mutex); lo->lo_number = i; lo->lo_thread = NULL; init_waitqueue_head(&lo->lo_event); spin_lock_init(&lo->lo_lock); disk->major = LOOP_MAJOR; disk->first_minor = i << part_shift; disk->fops = &lo_fops; disk->private_data = lo; disk->queue = lo->lo_queue; sprintf(disk->disk_name, "loop%d", i); add_disk(disk); *l = lo; return lo->lo_number; out_free_queue: blk_cleanup_queue(lo->lo_queue); out_free_dev: kfree(lo); out: return err; } static void loop_remove(struct loop_device *lo) { del_gendisk(lo->lo_disk); blk_cleanup_queue(lo->lo_queue); put_disk(lo->lo_disk); kfree(lo); } static int find_free_cb(int id, void *ptr, void *data) { struct loop_device *lo = ptr; struct loop_device **l = data; if (lo->lo_state == Lo_unbound) { *l = lo; return 1; } return 0; } static int loop_lookup(struct loop_device **l, int i) { struct loop_device *lo; int ret = -ENODEV; if (i < 0) { int err; err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); if (err == 1) { *l = lo; ret = lo->lo_number; } goto out; } /* lookup and return a specific i */ lo = idr_find(&loop_index_idr, i); if (lo) { *l = lo; ret = lo->lo_number; } out: return ret; } static struct kobject *loop_probe(dev_t dev, int *part, void *data) { struct loop_device *lo; struct kobject *kobj; int err; mutex_lock(&loop_index_mutex); err = loop_lookup(&lo, MINOR(dev) >> part_shift); if (err < 0) err = loop_add(&lo, MINOR(dev) >> part_shift); if (err < 0) kobj = ERR_PTR(err); else kobj = get_disk(lo->lo_disk); mutex_unlock(&loop_index_mutex); *part = 0; return kobj; } static long loop_control_ioctl(struct file *file, unsigned int cmd, unsigned long parm) { struct loop_device *lo; int ret = -ENOSYS; mutex_lock(&loop_index_mutex); switch (cmd) { case LOOP_CTL_ADD: ret = loop_lookup(&lo, parm); if (ret >= 0) { ret = -EEXIST; break; } ret = loop_add(&lo, parm); break; case LOOP_CTL_REMOVE: ret = loop_lookup(&lo, parm); if (ret < 0) break; mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_state != Lo_unbound) { ret = -EBUSY; mutex_unlock(&lo->lo_ctl_mutex); break; } if (lo->lo_refcnt > 0) { ret = -EBUSY; mutex_unlock(&lo->lo_ctl_mutex); break; } lo->lo_disk->private_data = NULL; mutex_unlock(&lo->lo_ctl_mutex); idr_remove(&loop_index_idr, lo->lo_number); loop_remove(lo); break; case LOOP_CTL_GET_FREE: ret = loop_lookup(&lo, -1); if (ret >= 0) break; ret = loop_add(&lo, -1); } mutex_unlock(&loop_index_mutex); return ret; } static const struct file_operations loop_ctl_fops = { .open = nonseekable_open, .unlocked_ioctl = loop_control_ioctl, .compat_ioctl = loop_control_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice loop_misc = { .minor = LOOP_CTRL_MINOR, .name = "loop-control", .fops = &loop_ctl_fops, }; MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); MODULE_ALIAS("devname:loop-control"); static int __init loop_init(void) { int i, nr; unsigned long range; struct loop_device *lo; int err; err = misc_register(&loop_misc); if (err < 0) return err; part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); /* * Adjust max_part according to part_shift as it is exported * to user space so that user can decide correct minor number * if [s]he want to create more devices. * * Note that -1 is required because partition 0 is reserved * for the whole disk. */ max_part = (1UL << part_shift) - 1; } if ((1UL << part_shift) > DISK_MAX_PARTS) return -EINVAL; if (max_loop > 1UL << (MINORBITS - part_shift)) return -EINVAL; /* * If max_loop is specified, create that many devices upfront. * This also becomes a hard limit. If max_loop is not specified, * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module * init time. Loop devices can be requested on-demand with the * /dev/loop-control interface, or be instantiated by accessing * a 'dead' device node. */ if (max_loop) { nr = max_loop; range = max_loop << part_shift; } else { nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; range = 1UL << MINORBITS; } if (register_blkdev(LOOP_MAJOR, "loop")) return -EIO; blk_register_region(MKDEV(LOOP_MAJOR, 0), range, THIS_MODULE, loop_probe, NULL, NULL); /* pre-create number of devices given by config or max_loop */ mutex_lock(&loop_index_mutex); for (i = 0; i < nr; i++) loop_add(&lo, i); mutex_unlock(&loop_index_mutex); printk(KERN_INFO "loop: module loaded\n"); return 0; } static int loop_exit_cb(int id, void *ptr, void *data) { struct loop_device *lo = ptr; loop_remove(lo); return 0; } static void __exit loop_exit(void) { unsigned long range; range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_remove_all(&loop_index_idr); idr_destroy(&loop_index_idr); blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); } module_init(loop_init); module_exit(loop_exit); #ifndef MODULE static int __init max_loop_setup(char *str) { max_loop = simple_strtol(str, NULL, 0); return 1; } __setup("max_loop=", max_loop_setup); #endif
gpl-2.0
omnirom/android_kernel_asus_tegra3
arch/arm/mach-lpc32xx/timer.c
2929
5084
/* * arch/arm/mach-lpc32xx/timer.c * * Author: Kevin Wells <kevin.wells@nxp.com> * * Copyright (C) 2009 - 2010 NXP Semiconductors * Copyright (C) 2009 Fontys University of Applied Sciences, Eindhoven * Ed Schouten <e.schouten@fontys.nl> * Laurens Timmermans <l.timmermans@fontys.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/time.h> #include <linux/err.h> #include <linux/clockchips.h> #include <asm/mach/time.h> #include <mach/hardware.h> #include <mach/platform.h> #include "common.h" static int lpc32xx_clkevt_next_event(unsigned long delta, struct clock_event_device *dev) { __raw_writel(LCP32XX_TIMER_CNTR_TCR_RESET, LCP32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); __raw_writel(delta, LCP32XX_TIMER_PR(LPC32XX_TIMER0_BASE)); __raw_writel(LCP32XX_TIMER_CNTR_TCR_EN, LCP32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); return 0; } static void lpc32xx_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: WARN_ON(1); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_SHUTDOWN: /* * Disable the timer. When using oneshot, we must also * disable the timer to wait for the first call to * set_next_event(). */ __raw_writel(0, LCP32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device lpc32xx_clkevt = { .name = "lpc32xx_clkevt", .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .rating = 300, .set_next_event = lpc32xx_clkevt_next_event, .set_mode = lpc32xx_clkevt_mode, }; static irqreturn_t lpc32xx_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &lpc32xx_clkevt; /* Clear match */ __raw_writel(LCP32XX_TIMER_CNTR_MTCH_BIT(0), LCP32XX_TIMER_IR(LPC32XX_TIMER0_BASE)); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction lpc32xx_timer_irq = { .name = "LPC32XX Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = lpc32xx_timer_interrupt, }; /* * The clock management driver isn't initialized at this point, so the * clocks need to be enabled here manually and then tagged as used in * the clock driver initialization */ static void __init lpc32xx_timer_init(void) { u32 clkrate, pllreg; /* Enable timer clock */ __raw_writel(LPC32XX_CLKPWR_TMRPWMCLK_TIMER0_EN | LPC32XX_CLKPWR_TMRPWMCLK_TIMER1_EN, LPC32XX_CLKPWR_TIMERS_PWMS_CLK_CTRL_1); /* * The clock driver isn't initialized at this point. So determine if * the SYSCLK is driven from the PLL397 or main oscillator and then use * it to compute the PLL frequency and the PCLK divider to get the base * timer rates. This rate is needed to compute the tick rate. */ if (clk_is_sysclk_mainosc() != 0) clkrate = LPC32XX_MAIN_OSC_FREQ; else clkrate = 397 * LPC32XX_CLOCK_OSC_FREQ; /* Get ARM HCLKPLL register and convert it into a frequency */ pllreg = __raw_readl(LPC32XX_CLKPWR_HCLKPLL_CTRL) & 0x1FFFF; clkrate = clk_get_pllrate_from_reg(clkrate, pllreg); /* Get PCLK divider and divide ARM PLL clock by it to get timer rate */ clkrate = clkrate / clk_get_pclk_div(); /* Initial timer setup */ __raw_writel(0, LCP32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); __raw_writel(LCP32XX_TIMER_CNTR_MTCH_BIT(0), LCP32XX_TIMER_IR(LPC32XX_TIMER0_BASE)); __raw_writel(1, LCP32XX_TIMER_MR0(LPC32XX_TIMER0_BASE)); __raw_writel(LCP32XX_TIMER_CNTR_MCR_MTCH(0) | LCP32XX_TIMER_CNTR_MCR_STOP(0) | LCP32XX_TIMER_CNTR_MCR_RESET(0), LCP32XX_TIMER_MCR(LPC32XX_TIMER0_BASE)); /* Setup tick interrupt */ setup_irq(IRQ_LPC32XX_TIMER0, &lpc32xx_timer_irq); /* Setup the clockevent structure. */ lpc32xx_clkevt.mult = div_sc(clkrate, NSEC_PER_SEC, lpc32xx_clkevt.shift); lpc32xx_clkevt.max_delta_ns = clockevent_delta2ns(-1, &lpc32xx_clkevt); lpc32xx_clkevt.min_delta_ns = clockevent_delta2ns(1, &lpc32xx_clkevt) + 1; lpc32xx_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&lpc32xx_clkevt); /* Use timer1 as clock source. */ __raw_writel(LCP32XX_TIMER_CNTR_TCR_RESET, LCP32XX_TIMER_TCR(LPC32XX_TIMER1_BASE)); __raw_writel(0, LCP32XX_TIMER_PR(LPC32XX_TIMER1_BASE)); __raw_writel(0, LCP32XX_TIMER_MCR(LPC32XX_TIMER1_BASE)); __raw_writel(LCP32XX_TIMER_CNTR_TCR_EN, LCP32XX_TIMER_TCR(LPC32XX_TIMER1_BASE)); clocksource_mmio_init(LCP32XX_TIMER_TC(LPC32XX_TIMER1_BASE), "lpc32xx_clksrc", clkrate, 300, 32, clocksource_mmio_readl_up); } struct sys_timer lpc32xx_timer = { .init = &lpc32xx_timer_init, };
gpl-2.0
wangshaowei/ok6410-linux
fs/udf/truncate.c
2929
8193
/* * truncate.c * * PURPOSE * Truncate handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1999-2004 Ben Fennema * (C) 1999 Stelias Computing Inc * * HISTORY * * 02/24/99 blf Created. * */ #include "udfdecl.h" #include <linux/fs.h> #include <linux/mm.h> #include <linux/buffer_head.h> #include "udf_i.h" #include "udf_sb.h" static void extent_trunc(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, int8_t etype, uint32_t elen, uint32_t nelen) { struct kernel_lb_addr neloc = {}; int last_block = (elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; int first_block = (nelen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (nelen) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, eloc, 0, last_block); etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30); } else neloc = *eloc; nelen = (etype << 30) | nelen; } if (elen != nelen) { udf_write_aext(inode, epos, &neloc, nelen, 0); if (last_block - first_block > 0) { if (etype == (EXT_RECORDED_ALLOCATED >> 30)) mark_inode_dirty(inode); if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block); } } } /* * Truncate the last extent to match i_size. This function assumes * that preallocation extent is already truncated. */ void udf_truncate_tail_extent(struct inode *inode) { struct extent_position epos = {}; struct kernel_lb_addr eloc; uint32_t elen, nelen; uint64_t lbcount = 0; int8_t etype = -1, netype; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || inode->i_size == iinfo->i_lenExtents) return; /* Are we going to delete the file anyway? */ if (inode->i_nlink == 0) return; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); /* Find the last extent in the file */ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { etype = netype; lbcount += elen; if (lbcount > inode->i_size) { if (lbcount - inode->i_size >= inode->i_sb->s_blocksize) printk(KERN_WARNING "udf_truncate_tail_extent(): Too long " "extent after EOF in inode %u: i_size: " "%Ld lbcount: %Ld extent %u+%u\n", (unsigned)inode->i_ino, (long long)inode->i_size, (long long)lbcount, (unsigned)eloc.logicalBlockNum, (unsigned)elen); nelen = elen - (lbcount - inode->i_size); epos.offset -= adsize; extent_trunc(inode, &epos, &eloc, etype, elen, nelen); epos.offset += adsize; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) != -1) printk(KERN_ERR "udf_truncate_tail_extent(): " "Extent after EOF in inode %u.\n", (unsigned)inode->i_ino); break; } } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = inode->i_size; brelse(epos.bh); } void udf_discard_prealloc(struct inode *inode) { struct extent_position epos = { NULL, 0, {0, 0} }; struct kernel_lb_addr eloc; uint32_t elen; uint64_t lbcount = 0; int8_t etype = -1, netype; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || inode->i_size == iinfo->i_lenExtents) return; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; epos.block = iinfo->i_location; /* Find the last extent in the file */ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { etype = netype; lbcount += elen; } if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { epos.offset -= adsize; lbcount -= elen; extent_trunc(inode, &epos, &eloc, etype, elen, 0); if (!epos.bh) { iinfo->i_lenAlloc = epos.offset - udf_file_entry_alloc_offset(inode); mark_inode_dirty(inode); } else { struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data); aed->lengthAllocDescs = cpu_to_le32(epos.offset - sizeof(struct allocExtDesc)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos.bh->b_data, epos.offset); else udf_update_tag(epos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos.bh, inode); } } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = lbcount; brelse(epos.bh); } static void udf_update_alloc_ext_desc(struct inode *inode, struct extent_position *epos, u32 lenalloc) { struct super_block *sb = inode->i_sb; struct udf_sb_info *sbi = UDF_SB(sb); struct allocExtDesc *aed = (struct allocExtDesc *) (epos->bh->b_data); int len = sizeof(struct allocExtDesc); aed->lengthAllocDescs = cpu_to_le32(lenalloc); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201) len += lenalloc; udf_update_tag(epos->bh->b_data, len); mark_buffer_dirty_inode(epos->bh, inode); } /* * Truncate extents of inode to inode->i_size. This function can be used only * for making file shorter. For making file longer, udf_extend_file() has to * be used. */ void udf_truncate_extents(struct inode *inode) { struct extent_position epos; struct kernel_lb_addr eloc, neloc = {}; uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; loff_t byte_offset; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); byte_offset = (offset << sb->s_blocksize_bits) + (inode->i_size & (sb->s_blocksize - 1)); if (etype == -1) { /* We should extend the file? */ WARN_ON(byte_offset); return; } epos.offset -= adsize; extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset); epos.offset += adsize; if (byte_offset) lenalloc = epos.offset; else lenalloc = epos.offset - adsize; if (!epos.bh) lenalloc -= udf_file_entry_alloc_offset(inode); else lenalloc -= sizeof(struct allocExtDesc); while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1) { if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { udf_write_aext(inode, &epos, &neloc, nelen, 0); if (indirect_ext_len) { /* We managed to free all extents in the * indirect extent - free it too */ BUG_ON(!epos.bh); udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len); } else if (!epos.bh) { iinfo->i_lenAlloc = lenalloc; mark_inode_dirty(inode); } else udf_update_alloc_ext_desc(inode, &epos, lenalloc); brelse(epos.bh); epos.offset = sizeof(struct allocExtDesc); epos.block = eloc; epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, &eloc, 0)); if (elen) indirect_ext_len = (elen + sb->s_blocksize - 1) >> sb->s_blocksize_bits; else indirect_ext_len = 1; } else { extent_trunc(inode, &epos, &eloc, etype, elen, 0); epos.offset += adsize; } } if (indirect_ext_len) { BUG_ON(!epos.bh); udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len); } else if (!epos.bh) { iinfo->i_lenAlloc = lenalloc; mark_inode_dirty(inode); } else udf_update_alloc_ext_desc(inode, &epos, lenalloc); iinfo->i_lenExtents = inode->i_size; brelse(epos.bh); }
gpl-2.0
gromaudio/linux-imx6
net/tipc/node.c
2929
13984
/* * net/tipc/node.c: TIPC node management routines * * Copyright (c) 2000-2006, Ericsson AB * Copyright (c) 2005-2006, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "config.h" #include "node.h" #include "name_distr.h" static void node_lost_contact(struct tipc_node *n_ptr); static void node_established_contact(struct tipc_node *n_ptr); static DEFINE_SPINLOCK(node_create_lock); static struct hlist_head node_htable[NODE_HTABLE_SIZE]; LIST_HEAD(tipc_node_list); static u32 tipc_num_nodes; static atomic_t tipc_num_links = ATOMIC_INIT(0); u32 tipc_own_tag; /** * tipc_node_find - locate specified node object, if it exists */ struct tipc_node *tipc_node_find(u32 addr) { struct tipc_node *node; struct hlist_node *pos; if (unlikely(!in_own_cluster(addr))) return NULL; hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { if (node->addr == addr) return node; } return NULL; } /** * tipc_node_create - create neighboring node * * Currently, this routine is called by neighbor discovery code, which holds * net_lock for reading only. We must take node_create_lock to ensure a node * isn't created twice if two different bearers discover the node at the same * time. (It would be preferable to switch to holding net_lock in write mode, * but this is a non-trivial change.) */ struct tipc_node *tipc_node_create(u32 addr) { struct tipc_node *n_ptr, *temp_node; spin_lock_bh(&node_create_lock); n_ptr = tipc_node_find(addr); if (n_ptr) { spin_unlock_bh(&node_create_lock); return n_ptr; } n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); if (!n_ptr) { spin_unlock_bh(&node_create_lock); warn("Node creation failed, no memory\n"); return NULL; } n_ptr->addr = addr; spin_lock_init(&n_ptr->lock); INIT_HLIST_NODE(&n_ptr->hash); INIT_LIST_HEAD(&n_ptr->list); INIT_LIST_HEAD(&n_ptr->nsub); hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); list_for_each_entry(temp_node, &tipc_node_list, list) { if (n_ptr->addr < temp_node->addr) break; } list_add_tail(&n_ptr->list, &temp_node->list); tipc_num_nodes++; spin_unlock_bh(&node_create_lock); return n_ptr; } void tipc_node_delete(struct tipc_node *n_ptr) { list_del(&n_ptr->list); hlist_del(&n_ptr->hash); kfree(n_ptr); tipc_num_nodes--; } /** * tipc_node_link_up - handle addition of link * * Link becomes active (alone or shared) or standby, depending on its priority. */ void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr) { struct link **active = &n_ptr->active_links[0]; n_ptr->working_links++; info("Established link <%s> on network plane %c\n", l_ptr->name, l_ptr->b_ptr->net_plane); if (!active[0]) { active[0] = active[1] = l_ptr; node_established_contact(n_ptr); return; } if (l_ptr->priority < active[0]->priority) { info("New link <%s> becomes standby\n", l_ptr->name); return; } tipc_link_send_duplicate(active[0], l_ptr); if (l_ptr->priority == active[0]->priority) { active[0] = l_ptr; return; } info("Old link <%s> becomes standby\n", active[0]->name); if (active[1] != active[0]) info("Old link <%s> becomes standby\n", active[1]->name); active[0] = active[1] = l_ptr; } /** * node_select_active_links - select active link */ static void node_select_active_links(struct tipc_node *n_ptr) { struct link **active = &n_ptr->active_links[0]; u32 i; u32 highest_prio = 0; active[0] = active[1] = NULL; for (i = 0; i < MAX_BEARERS; i++) { struct link *l_ptr = n_ptr->links[i]; if (!l_ptr || !tipc_link_is_up(l_ptr) || (l_ptr->priority < highest_prio)) continue; if (l_ptr->priority > highest_prio) { highest_prio = l_ptr->priority; active[0] = active[1] = l_ptr; } else { active[1] = l_ptr; } } } /** * tipc_node_link_down - handle loss of link */ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr) { struct link **active; n_ptr->working_links--; if (!tipc_link_is_active(l_ptr)) { info("Lost standby link <%s> on network plane %c\n", l_ptr->name, l_ptr->b_ptr->net_plane); return; } info("Lost link <%s> on network plane %c\n", l_ptr->name, l_ptr->b_ptr->net_plane); active = &n_ptr->active_links[0]; if (active[0] == l_ptr) active[0] = active[1]; if (active[1] == l_ptr) active[1] = active[0]; if (active[0] == l_ptr) node_select_active_links(n_ptr); if (tipc_node_is_up(n_ptr)) tipc_link_changeover(l_ptr); else node_lost_contact(n_ptr); } int tipc_node_active_links(struct tipc_node *n_ptr) { return n_ptr->active_links[0] != NULL; } int tipc_node_redundant_links(struct tipc_node *n_ptr) { return n_ptr->working_links > 1; } int tipc_node_is_up(struct tipc_node *n_ptr) { return tipc_node_active_links(n_ptr); } void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr) { n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; atomic_inc(&tipc_num_links); n_ptr->link_cnt++; } void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr) { n_ptr->links[l_ptr->b_ptr->identity] = NULL; atomic_dec(&tipc_num_links); n_ptr->link_cnt--; } /* * Routing table management - five cases to handle: * * 1: A link towards a zone/cluster external node comes up. * => Send a multicast message updating routing tables of all * system nodes within own cluster that the new destination * can be reached via this node. * (node.establishedContact()=>cluster.multicastNewRoute()) * * 2: A link towards a slave node comes up. * => Send a multicast message updating routing tables of all * system nodes within own cluster that the new destination * can be reached via this node. * (node.establishedContact()=>cluster.multicastNewRoute()) * => Send a message to the slave node about existence * of all system nodes within cluster: * (node.establishedContact()=>cluster.sendLocalRoutes()) * * 3: A new cluster local system node becomes available. * => Send message(s) to this particular node containing * information about all cluster external and slave * nodes which can be reached via this node. * (node.establishedContact()==>network.sendExternalRoutes()) * (node.establishedContact()==>network.sendSlaveRoutes()) * => Send messages to all directly connected slave nodes * containing information about the existence of the new node * (node.establishedContact()=>cluster.multicastNewRoute()) * * 4: The link towards a zone/cluster external node or slave * node goes down. * => Send a multcast message updating routing tables of all * nodes within cluster that the new destination can not any * longer be reached via this node. * (node.lostAllLinks()=>cluster.bcastLostRoute()) * * 5: A cluster local system node becomes unavailable. * => Remove all references to this node from the local * routing tables. Note: This is a completely node * local operation. * (node.lostAllLinks()=>network.removeAsRouter()) * => Send messages to all directly connected slave nodes * containing information about loss of the node * (node.establishedContact()=>cluster.multicastLostRoute()) * */ static void node_established_contact(struct tipc_node *n_ptr) { tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); /* Syncronize broadcast acks */ n_ptr->bclink.acked = tipc_bclink_get_last_sent(); if (n_ptr->bclink.supported) { tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr); if (n_ptr->addr < tipc_own_addr) tipc_own_tag++; } } static void node_cleanup_finished(unsigned long node_addr) { struct tipc_node *n_ptr; read_lock_bh(&tipc_net_lock); n_ptr = tipc_node_find(node_addr); if (n_ptr) { tipc_node_lock(n_ptr); n_ptr->cleanup_required = 0; tipc_node_unlock(n_ptr); } read_unlock_bh(&tipc_net_lock); } static void node_lost_contact(struct tipc_node *n_ptr) { char addr_string[16]; u32 i; /* Clean up broadcast reception remains */ n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; while (n_ptr->bclink.deferred_head) { struct sk_buff *buf = n_ptr->bclink.deferred_head; n_ptr->bclink.deferred_head = buf->next; buf_discard(buf); } if (n_ptr->bclink.defragm) { buf_discard(n_ptr->bclink.defragm); n_ptr->bclink.defragm = NULL; } if (n_ptr->bclink.supported) { tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr); if (n_ptr->addr < tipc_own_addr) tipc_own_tag--; } info("Lost contact with %s\n", tipc_addr_string_fill(addr_string, n_ptr->addr)); /* Abort link changeover */ for (i = 0; i < MAX_BEARERS; i++) { struct link *l_ptr = n_ptr->links[i]; if (!l_ptr) continue; l_ptr->reset_checkpoint = l_ptr->next_in_no; l_ptr->exp_msg_count = 0; tipc_link_reset_fragments(l_ptr); } /* Notify subscribers */ tipc_nodesub_notify(n_ptr); /* Prevent re-contact with node until all cleanup is done */ n_ptr->cleanup_required = 1; tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr); } struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) { u32 domain; struct sk_buff *buf; struct tipc_node *n_ptr; struct tipc_node_info node_info; u32 payload_size; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); if (!tipc_addr_domain_valid(domain)) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (network address)"); read_lock_bh(&tipc_net_lock); if (!tipc_num_nodes) { read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_none(); } /* For now, get space for all other nodes */ payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; if (payload_size > 32768u) { read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (too many nodes)"); } buf = tipc_cfg_reply_alloc(payload_size); if (!buf) { read_unlock_bh(&tipc_net_lock); return NULL; } /* Add TLVs for all nodes in scope */ list_for_each_entry(n_ptr, &tipc_node_list, list) { if (!tipc_in_scope(domain, n_ptr->addr)) continue; node_info.addr = htonl(n_ptr->addr); node_info.up = htonl(tipc_node_is_up(n_ptr)); tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, &node_info, sizeof(node_info)); } read_unlock_bh(&tipc_net_lock); return buf; } struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) { u32 domain; struct sk_buff *buf; struct tipc_node *n_ptr; struct tipc_link_info link_info; u32 payload_size; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); if (!tipc_addr_domain_valid(domain)) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (network address)"); if (tipc_mode != TIPC_NET_MODE) return tipc_cfg_reply_none(); read_lock_bh(&tipc_net_lock); /* Get space for all unicast links + multicast link */ payload_size = TLV_SPACE(sizeof(link_info)) * (atomic_read(&tipc_num_links) + 1); if (payload_size > 32768u) { read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (too many links)"); } buf = tipc_cfg_reply_alloc(payload_size); if (!buf) { read_unlock_bh(&tipc_net_lock); return NULL; } /* Add TLV for broadcast link */ link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); link_info.up = htonl(1); strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); /* Add TLVs for any other links in scope */ list_for_each_entry(n_ptr, &tipc_node_list, list) { u32 i; if (!tipc_in_scope(domain, n_ptr->addr)) continue; tipc_node_lock(n_ptr); for (i = 0; i < MAX_BEARERS; i++) { if (!n_ptr->links[i]) continue; link_info.dest = htonl(n_ptr->addr); link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); strcpy(link_info.str, n_ptr->links[i]->name); tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); } tipc_node_unlock(n_ptr); } read_unlock_bh(&tipc_net_lock); return buf; }
gpl-2.0
sfumato77/rk3x_kernel_3.0.36
sound/pci/oxygen/xonar_dg.c
3185
16469
/* * card driver for the Xonar DG * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, see <http://www.gnu.org/licenses/>. */ /* * Xonar DG * -------- * * CMI8788: * * SPI 0 -> CS4245 * * I²S 1 -> CS4245 * I²S 2 -> CS4361 (center/LFE) * I²S 3 -> CS4361 (surround) * I²S 4 -> CS4361 (front) * * GPIO 3 <- ? * GPIO 4 <- headphone detect * GPIO 5 -> route input jack to line-in (0) or mic-in (1) * GPIO 6 -> route input jack to line-in (0) or mic-in (1) * GPIO 7 -> enable rear headphone amp * GPIO 8 -> enable output to speakers * * CS4245: * * input 1 <- aux * input 2 <- front mic * input 4 <- line/mic * DAC out -> headphones * aux out -> front panel headphones */ #include <linux/pci.h> #include <linux/delay.h> #include <sound/control.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/tlv.h> #include "oxygen.h" #include "xonar_dg.h" #include "cs4245.h" #define GPIO_MAGIC 0x0008 #define GPIO_HP_DETECT 0x0010 #define GPIO_INPUT_ROUTE 0x0060 #define GPIO_HP_REAR 0x0080 #define GPIO_OUTPUT_ENABLE 0x0100 struct dg { unsigned int output_sel; s8 input_vol[4][2]; unsigned int input_sel; u8 hp_vol_att; u8 cs4245_regs[0x11]; }; static void cs4245_write(struct oxygen *chip, unsigned int reg, u8 value) { struct dg *data = chip->model_data; oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER | OXYGEN_SPI_DATA_LENGTH_3 | OXYGEN_SPI_CLOCK_1280 | (0 << OXYGEN_SPI_CODEC_SHIFT) | OXYGEN_SPI_CEN_LATCH_CLOCK_HI, CS4245_SPI_ADDRESS | CS4245_SPI_WRITE | (reg << 8) | value); data->cs4245_regs[reg] = value; } static void cs4245_write_cached(struct oxygen *chip, unsigned int reg, u8 value) { struct dg *data = chip->model_data; if (value != data->cs4245_regs[reg]) cs4245_write(chip, reg, value); } static void cs4245_registers_init(struct oxygen *chip) { struct dg *data = chip->model_data; cs4245_write(chip, CS4245_POWER_CTRL, CS4245_PDN); cs4245_write(chip, CS4245_DAC_CTRL_1, data->cs4245_regs[CS4245_DAC_CTRL_1]); cs4245_write(chip, CS4245_ADC_CTRL, data->cs4245_regs[CS4245_ADC_CTRL]); cs4245_write(chip, CS4245_SIGNAL_SEL, data->cs4245_regs[CS4245_SIGNAL_SEL]); cs4245_write(chip, CS4245_PGA_B_CTRL, data->cs4245_regs[CS4245_PGA_B_CTRL]); cs4245_write(chip, CS4245_PGA_A_CTRL, data->cs4245_regs[CS4245_PGA_A_CTRL]); cs4245_write(chip, CS4245_ANALOG_IN, data->cs4245_regs[CS4245_ANALOG_IN]); cs4245_write(chip, CS4245_DAC_A_CTRL, data->cs4245_regs[CS4245_DAC_A_CTRL]); cs4245_write(chip, CS4245_DAC_B_CTRL, data->cs4245_regs[CS4245_DAC_B_CTRL]); cs4245_write(chip, CS4245_DAC_CTRL_2, CS4245_DAC_SOFT | CS4245_DAC_ZERO | CS4245_INVERT_DAC); cs4245_write(chip, CS4245_INT_MASK, 0); cs4245_write(chip, CS4245_POWER_CTRL, 0); } static void cs4245_init(struct oxygen *chip) { struct dg *data = chip->model_data; data->cs4245_regs[CS4245_DAC_CTRL_1] = CS4245_DAC_FM_SINGLE | CS4245_DAC_DIF_LJUST; data->cs4245_regs[CS4245_ADC_CTRL] = CS4245_ADC_FM_SINGLE | CS4245_ADC_DIF_LJUST; data->cs4245_regs[CS4245_SIGNAL_SEL] = CS4245_A_OUT_SEL_HIZ | CS4245_ASYNCH; data->cs4245_regs[CS4245_PGA_B_CTRL] = 0; data->cs4245_regs[CS4245_PGA_A_CTRL] = 0; data->cs4245_regs[CS4245_ANALOG_IN] = CS4245_PGA_SOFT | CS4245_PGA_ZERO | CS4245_SEL_INPUT_4; data->cs4245_regs[CS4245_DAC_A_CTRL] = 0; data->cs4245_regs[CS4245_DAC_B_CTRL] = 0; cs4245_registers_init(chip); snd_component_add(chip->card, "CS4245"); } static void dg_output_enable(struct oxygen *chip) { msleep(2500); oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE); } static void dg_init(struct oxygen *chip) { struct dg *data = chip->model_data; data->output_sel = 0; data->input_sel = 3; data->hp_vol_att = 2 * 16; cs4245_init(chip); oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_MAGIC | GPIO_HP_DETECT); oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_INPUT_ROUTE | GPIO_HP_REAR | GPIO_OUTPUT_ENABLE); oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE | GPIO_HP_REAR); dg_output_enable(chip); } static void dg_cleanup(struct oxygen *chip) { oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE); } static void dg_suspend(struct oxygen *chip) { dg_cleanup(chip); } static void dg_resume(struct oxygen *chip) { cs4245_registers_init(chip); dg_output_enable(chip); } static void set_cs4245_dac_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { struct dg *data = chip->model_data; u8 value; value = data->cs4245_regs[CS4245_DAC_CTRL_1] & ~CS4245_DAC_FM_MASK; if (params_rate(params) <= 50000) value |= CS4245_DAC_FM_SINGLE; else if (params_rate(params) <= 100000) value |= CS4245_DAC_FM_DOUBLE; else value |= CS4245_DAC_FM_QUAD; cs4245_write_cached(chip, CS4245_DAC_CTRL_1, value); } static void set_cs4245_adc_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { struct dg *data = chip->model_data; u8 value; value = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_ADC_FM_MASK; if (params_rate(params) <= 50000) value |= CS4245_ADC_FM_SINGLE; else if (params_rate(params) <= 100000) value |= CS4245_ADC_FM_DOUBLE; else value |= CS4245_ADC_FM_QUAD; cs4245_write_cached(chip, CS4245_ADC_CTRL, value); } static inline unsigned int shift_bits(unsigned int value, unsigned int shift_from, unsigned int shift_to, unsigned int mask) { if (shift_from < shift_to) return (value << (shift_to - shift_from)) & mask; else return (value >> (shift_from - shift_to)) & mask; } static unsigned int adjust_dg_dac_routing(struct oxygen *chip, unsigned int play_routing) { return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) | shift_bits(play_routing, OXYGEN_PLAY_DAC2_SOURCE_SHIFT, OXYGEN_PLAY_DAC1_SOURCE_SHIFT, OXYGEN_PLAY_DAC1_SOURCE_MASK) | shift_bits(play_routing, OXYGEN_PLAY_DAC1_SOURCE_SHIFT, OXYGEN_PLAY_DAC2_SOURCE_SHIFT, OXYGEN_PLAY_DAC2_SOURCE_MASK) | shift_bits(play_routing, OXYGEN_PLAY_DAC0_SOURCE_SHIFT, OXYGEN_PLAY_DAC3_SOURCE_SHIFT, OXYGEN_PLAY_DAC3_SOURCE_MASK); } static int output_switch_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "Speakers", "Headphones", "FP Headphones" }; return snd_ctl_enum_info(info, 1, 3, names); } static int output_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; mutex_lock(&chip->mutex); value->value.enumerated.item[0] = data->output_sel; mutex_unlock(&chip->mutex); return 0; } static int output_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; u8 reg; int changed; if (value->value.enumerated.item[0] > 2) return -EINVAL; mutex_lock(&chip->mutex); changed = value->value.enumerated.item[0] != data->output_sel; if (changed) { data->output_sel = value->value.enumerated.item[0]; reg = data->cs4245_regs[CS4245_SIGNAL_SEL] & ~CS4245_A_OUT_SEL_MASK; reg |= data->output_sel == 2 ? CS4245_A_OUT_SEL_DAC : CS4245_A_OUT_SEL_HIZ; cs4245_write_cached(chip, CS4245_SIGNAL_SEL, reg); cs4245_write_cached(chip, CS4245_DAC_A_CTRL, data->output_sel ? data->hp_vol_att : 0); cs4245_write_cached(chip, CS4245_DAC_B_CTRL, data->output_sel ? data->hp_vol_att : 0); oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, data->output_sel == 1 ? GPIO_HP_REAR : 0, GPIO_HP_REAR); } mutex_unlock(&chip->mutex); return changed; } static int hp_volume_offset_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[3] = { "< 64 ohms", "64-150 ohms", "150-300 ohms" }; return snd_ctl_enum_info(info, 1, 3, names); } static int hp_volume_offset_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; mutex_lock(&chip->mutex); if (data->hp_vol_att > 2 * 7) value->value.enumerated.item[0] = 0; else if (data->hp_vol_att > 0) value->value.enumerated.item[0] = 1; else value->value.enumerated.item[0] = 2; mutex_unlock(&chip->mutex); return 0; } static int hp_volume_offset_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { static const s8 atts[3] = { 2 * 16, 2 * 7, 0 }; struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; s8 att; int changed; if (value->value.enumerated.item[0] > 2) return -EINVAL; att = atts[value->value.enumerated.item[0]]; mutex_lock(&chip->mutex); changed = att != data->hp_vol_att; if (changed) { data->hp_vol_att = att; if (data->output_sel) { cs4245_write_cached(chip, CS4245_DAC_A_CTRL, att); cs4245_write_cached(chip, CS4245_DAC_B_CTRL, att); } } mutex_unlock(&chip->mutex); return changed; } static int input_vol_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 2 * -12; info->value.integer.max = 2 * 12; return 0; } static int input_vol_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; unsigned int idx = ctl->private_value; mutex_lock(&chip->mutex); value->value.integer.value[0] = data->input_vol[idx][0]; value->value.integer.value[1] = data->input_vol[idx][1]; mutex_unlock(&chip->mutex); return 0; } static int input_vol_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; unsigned int idx = ctl->private_value; int changed = 0; if (value->value.integer.value[0] < 2 * -12 || value->value.integer.value[0] > 2 * 12 || value->value.integer.value[1] < 2 * -12 || value->value.integer.value[1] > 2 * 12) return -EINVAL; mutex_lock(&chip->mutex); changed = data->input_vol[idx][0] != value->value.integer.value[0] || data->input_vol[idx][1] != value->value.integer.value[1]; if (changed) { data->input_vol[idx][0] = value->value.integer.value[0]; data->input_vol[idx][1] = value->value.integer.value[1]; if (idx == data->input_sel) { cs4245_write_cached(chip, CS4245_PGA_A_CTRL, data->input_vol[idx][0]); cs4245_write_cached(chip, CS4245_PGA_B_CTRL, data->input_vol[idx][1]); } } mutex_unlock(&chip->mutex); return changed; } static DECLARE_TLV_DB_SCALE(cs4245_pga_db_scale, -1200, 50, 0); static int input_sel_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[4] = { "Mic", "Aux", "Front Mic", "Line" }; return snd_ctl_enum_info(info, 1, 4, names); } static int input_sel_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; mutex_lock(&chip->mutex); value->value.enumerated.item[0] = data->input_sel; mutex_unlock(&chip->mutex); return 0; } static int input_sel_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { static const u8 sel_values[4] = { CS4245_SEL_MIC, CS4245_SEL_INPUT_1, CS4245_SEL_INPUT_2, CS4245_SEL_INPUT_4 }; struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; int changed; if (value->value.enumerated.item[0] > 3) return -EINVAL; mutex_lock(&chip->mutex); changed = value->value.enumerated.item[0] != data->input_sel; if (changed) { data->input_sel = value->value.enumerated.item[0]; cs4245_write(chip, CS4245_ANALOG_IN, (data->cs4245_regs[CS4245_ANALOG_IN] & ~CS4245_SEL_MASK) | sel_values[data->input_sel]); cs4245_write_cached(chip, CS4245_PGA_A_CTRL, data->input_vol[data->input_sel][0]); cs4245_write_cached(chip, CS4245_PGA_B_CTRL, data->input_vol[data->input_sel][1]); oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, data->input_sel ? 0 : GPIO_INPUT_ROUTE, GPIO_INPUT_ROUTE); } mutex_unlock(&chip->mutex); return changed; } static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[2] = { "Active", "Frozen" }; return snd_ctl_enum_info(info, 1, 2, names); } static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; value->value.enumerated.item[0] = !!(data->cs4245_regs[CS4245_ADC_CTRL] & CS4245_HPF_FREEZE); return 0; } static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; struct dg *data = chip->model_data; u8 reg; int changed; mutex_lock(&chip->mutex); reg = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_HPF_FREEZE; if (value->value.enumerated.item[0]) reg |= CS4245_HPF_FREEZE; changed = reg != data->cs4245_regs[CS4245_ADC_CTRL]; if (changed) cs4245_write(chip, CS4245_ADC_CTRL, reg); mutex_unlock(&chip->mutex); return changed; } #define INPUT_VOLUME(xname, index) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .info = input_vol_info, \ .get = input_vol_get, \ .put = input_vol_put, \ .tlv = { .p = cs4245_pga_db_scale }, \ .private_value = index, \ } static const struct snd_kcontrol_new dg_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Output Playback Enum", .info = output_switch_info, .get = output_switch_get, .put = output_switch_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphones Impedance Playback Enum", .info = hp_volume_offset_info, .get = hp_volume_offset_get, .put = hp_volume_offset_put, }, INPUT_VOLUME("Mic Capture Volume", 0), INPUT_VOLUME("Aux Capture Volume", 1), INPUT_VOLUME("Front Mic Capture Volume", 2), INPUT_VOLUME("Line Capture Volume", 3), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = input_sel_info, .get = input_sel_get, .put = input_sel_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC High-pass Filter Capture Enum", .info = hpf_info, .get = hpf_get, .put = hpf_put, }, }; static int dg_control_filter(struct snd_kcontrol_new *template) { if (!strncmp(template->name, "Master Playback ", 16)) return 1; return 0; } static int dg_mixer_init(struct oxygen *chip) { unsigned int i; int err; for (i = 0; i < ARRAY_SIZE(dg_controls); ++i) { err = snd_ctl_add(chip->card, snd_ctl_new1(&dg_controls[i], chip)); if (err < 0) return err; } return 0; } static void dump_cs4245_registers(struct oxygen *chip, struct snd_info_buffer *buffer) { struct dg *data = chip->model_data; unsigned int i; snd_iprintf(buffer, "\nCS4245:"); for (i = 1; i <= 0x10; ++i) snd_iprintf(buffer, " %02x", data->cs4245_regs[i]); snd_iprintf(buffer, "\n"); } struct oxygen_model model_xonar_dg = { .shortname = "Xonar DG", .longname = "C-Media Oxygen HD Audio", .chip = "CMI8786", .init = dg_init, .control_filter = dg_control_filter, .mixer_init = dg_mixer_init, .cleanup = dg_cleanup, .suspend = dg_suspend, .resume = dg_resume, .set_dac_params = set_cs4245_dac_params, .set_adc_params = set_cs4245_adc_params, .adjust_dac_routing = adjust_dg_dac_routing, .dump_registers = dump_cs4245_registers, .model_data_size = sizeof(struct dg), .device_config = PLAYBACK_0_TO_I2S | PLAYBACK_1_TO_SPDIF | CAPTURE_0_FROM_I2S_2, .dac_channels_pcm = 6, .dac_channels_mixer = 0, .function_flags = OXYGEN_FUNCTION_SPI, .dac_mclks = OXYGEN_MCLKS(256, 128, 128), .adc_mclks = OXYGEN_MCLKS(256, 128, 128), .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST, .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST, };
gpl-2.0
SolidRun/linux-imx6-3.14
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
3697
3725
/* * pmi backend for the cbe_cpufreq driver * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 * * Author: Christian Krafft <krafft@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/of_platform.h> #include <asm/processor.h> #include <asm/prom.h> #include <asm/pmi.h> #include <asm/cell-regs.h> #ifdef DEBUG #include <asm/time.h> #endif #include "ppc_cbe_cpufreq.h" static u8 pmi_slow_mode_limit[MAX_CBE]; bool cbe_cpufreq_has_pmi = false; EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); /* * hardware specific functions */ int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode) { int ret; pmi_message_t pmi_msg; #ifdef DEBUG long time; #endif pmi_msg.type = PMI_TYPE_FREQ_CHANGE; pmi_msg.data1 = cbe_cpu_to_node(cpu); pmi_msg.data2 = pmode; #ifdef DEBUG time = jiffies; #endif pmi_send_message(pmi_msg); #ifdef DEBUG time = jiffies - time; time = jiffies_to_msecs(time); pr_debug("had to wait %lu ms for a transition using " \ "PMI\n", time); #endif ret = pmi_msg.data2; pr_debug("PMI returned slow mode %d\n", ret); return ret; } EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) { u8 node, slow_mode; BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); node = pmi_msg.data1; slow_mode = pmi_msg.data2; pmi_slow_mode_limit[node] = slow_mode; pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); } static int pmi_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; struct cpufreq_frequency_table *cbe_freqs; u8 node; /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE * and CPUFREQ_NOTIFY policy events?) */ if (event == CPUFREQ_START) return 0; cbe_freqs = cpufreq_frequency_get_table(policy->cpu); node = cbe_cpu_to_node(policy->cpu); pr_debug("got notified, event=%lu, node=%u\n", event, node); if (pmi_slow_mode_limit[node] != 0) { pr_debug("limiting node %d to slow mode %d\n", node, pmi_slow_mode_limit[node]); cpufreq_verify_within_limits(policy, 0, cbe_freqs[pmi_slow_mode_limit[node]].frequency); } return 0; } static struct notifier_block pmi_notifier_block = { .notifier_call = pmi_notifier, }; static struct pmi_handler cbe_pmi_handler = { .type = PMI_TYPE_FREQ_CHANGE, .handle_pmi_message = cbe_cpufreq_handle_pmi, }; static int __init cbe_cpufreq_pmi_init(void) { cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; if (!cbe_cpufreq_has_pmi) return -ENODEV; cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); return 0; } static void __exit cbe_cpufreq_pmi_exit(void) { cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); pmi_unregister_handler(&cbe_pmi_handler); } module_init(cbe_cpufreq_pmi_init); module_exit(cbe_cpufreq_pmi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
gpl-2.0
coreentin/android_kernel_nvidia_s8515
arch/m68k/apollo/config.c
4465
6133
#include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/rtc.h> #include <linux/vt_kern.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <asm/bootinfo.h> #include <asm/pgtable.h> #include <asm/apollohw.h> #include <asm/irq.h> #include <asm/rtc.h> #include <asm/machdep.h> u_long sio01_physaddr; u_long sio23_physaddr; u_long rtc_physaddr; u_long pica_physaddr; u_long picb_physaddr; u_long cpuctrl_physaddr; u_long timer_physaddr; u_long apollo_model; extern void dn_sched_init(irq_handler_t handler); extern void dn_init_IRQ(void); extern unsigned long dn_gettimeoffset(void); extern int dn_dummy_hwclk(int, struct rtc_time *); extern int dn_dummy_set_clock_mmss(unsigned long); extern void dn_dummy_reset(void); #ifdef CONFIG_HEARTBEAT static void dn_heartbeat(int on); #endif static irqreturn_t dn_timer_int(int irq,void *); static void dn_get_model(char *model); static const char *apollo_models[] = { [APOLLO_DN3000-APOLLO_DN3000] = "DN3000 (Otter)", [APOLLO_DN3010-APOLLO_DN3000] = "DN3010 (Otter)", [APOLLO_DN3500-APOLLO_DN3000] = "DN3500 (Cougar II)", [APOLLO_DN4000-APOLLO_DN3000] = "DN4000 (Mink)", [APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)" }; int apollo_parse_bootinfo(const struct bi_record *record) { int unknown = 0; const unsigned long *data = record->data; switch(record->tag) { case BI_APOLLO_MODEL: apollo_model=*data; break; default: unknown=1; } return unknown; } void dn_setup_model(void) { printk("Apollo hardware found: "); printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]); switch(apollo_model) { case APOLLO_UNKNOWN: panic("Unknown apollo model"); break; case APOLLO_DN3000: case APOLLO_DN3010: sio01_physaddr=SAU8_SIO01_PHYSADDR; rtc_physaddr=SAU8_RTC_PHYSADDR; pica_physaddr=SAU8_PICA; picb_physaddr=SAU8_PICB; cpuctrl_physaddr=SAU8_CPUCTRL; timer_physaddr=SAU8_TIMER; break; case APOLLO_DN4000: sio01_physaddr=SAU7_SIO01_PHYSADDR; sio23_physaddr=SAU7_SIO23_PHYSADDR; rtc_physaddr=SAU7_RTC_PHYSADDR; pica_physaddr=SAU7_PICA; picb_physaddr=SAU7_PICB; cpuctrl_physaddr=SAU7_CPUCTRL; timer_physaddr=SAU7_TIMER; break; case APOLLO_DN4500: panic("Apollo model not yet supported"); break; case APOLLO_DN3500: sio01_physaddr=SAU7_SIO01_PHYSADDR; sio23_physaddr=SAU7_SIO23_PHYSADDR; rtc_physaddr=SAU7_RTC_PHYSADDR; pica_physaddr=SAU7_PICA; picb_physaddr=SAU7_PICB; cpuctrl_physaddr=SAU7_CPUCTRL; timer_physaddr=SAU7_TIMER; break; default: panic("Undefined apollo model"); break; } } int dn_serial_console_wait_key(struct console *co) { while(!(sio01.srb_csrb & 1)) barrier(); return sio01.rhrb_thrb; } void dn_serial_console_write (struct console *co, const char *str,unsigned int count) { while(count--) { if (*str == '\n') { sio01.rhrb_thrb = (unsigned char)'\r'; while (!(sio01.srb_csrb & 0x4)) ; } sio01.rhrb_thrb = (unsigned char)*str++; while (!(sio01.srb_csrb & 0x4)) ; } } void dn_serial_print (const char *str) { while (*str) { if (*str == '\n') { sio01.rhrb_thrb = (unsigned char)'\r'; while (!(sio01.srb_csrb & 0x4)) ; } sio01.rhrb_thrb = (unsigned char)*str++; while (!(sio01.srb_csrb & 0x4)) ; } } void __init config_apollo(void) { int i; dn_setup_model(); mach_sched_init=dn_sched_init; /* */ mach_init_IRQ=dn_init_IRQ; mach_gettimeoffset = dn_gettimeoffset; mach_max_dma_address = 0xffffffff; mach_hwclk = dn_dummy_hwclk; /* */ mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */ mach_reset = dn_dummy_reset; /* */ #ifdef CONFIG_HEARTBEAT mach_heartbeat = dn_heartbeat; #endif mach_get_model = dn_get_model; cpuctrl=0xaa00; /* clear DMA translation table */ for(i=0;i<0x400;i++) addr_xlat_map[i]=0; } irqreturn_t dn_timer_int(int irq, void *dev_id) { irq_handler_t timer_handler = dev_id; volatile unsigned char x; timer_handler(irq, dev_id); x=*(volatile unsigned char *)(timer+3); x=*(volatile unsigned char *)(timer+5); return IRQ_HANDLED; } void dn_sched_init(irq_handler_t timer_routine) { /* program timer 1 */ *(volatile unsigned char *)(timer+3)=0x01; *(volatile unsigned char *)(timer+1)=0x40; *(volatile unsigned char *)(timer+5)=0x09; *(volatile unsigned char *)(timer+7)=0xc4; /* enable IRQ of PIC B */ *(volatile unsigned char *)(pica+1)&=(~8); #if 0 printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); #endif if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine)) pr_err("Couldn't register timer interrupt\n"); } unsigned long dn_gettimeoffset(void) { return 0xdeadbeef; } int dn_dummy_hwclk(int op, struct rtc_time *t) { if(!op) { /* read */ t->tm_sec=rtc->second; t->tm_min=rtc->minute; t->tm_hour=rtc->hours; t->tm_mday=rtc->day_of_month; t->tm_wday=rtc->day_of_week; t->tm_mon=rtc->month; t->tm_year=rtc->year; } else { rtc->second=t->tm_sec; rtc->minute=t->tm_min; rtc->hours=t->tm_hour; rtc->day_of_month=t->tm_mday; if(t->tm_wday!=-1) rtc->day_of_week=t->tm_wday; rtc->month=t->tm_mon; rtc->year=t->tm_year; } return 0; } int dn_dummy_set_clock_mmss(unsigned long nowtime) { printk("set_clock_mmss\n"); return 0; } void dn_dummy_reset(void) { dn_serial_print("The end !\n"); for(;;); } void dn_dummy_waitbut(void) { dn_serial_print("waitbut\n"); } static void dn_get_model(char *model) { strcpy(model, "Apollo "); if (apollo_model >= APOLLO_DN3000 && apollo_model <= APOLLO_DN4500) strcat(model, apollo_models[apollo_model - APOLLO_DN3000]); } #ifdef CONFIG_HEARTBEAT static int dn_cpuctrl=0xff00; static void dn_heartbeat(int on) { if(on) { dn_cpuctrl&=~0x100; cpuctrl=dn_cpuctrl; } else { dn_cpuctrl&=~0x100; dn_cpuctrl|=0x100; cpuctrl=dn_cpuctrl; } } #endif
gpl-2.0
puskyer/android_kernel_motorola_olympus
arch/powerpc/mm/slice.c
4465
20490
/* * address space "slices" (meta-segments) support * * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation. * * Based on hugetlb implementation * * Copyright (C) 2003 David Gibson, IBM Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #undef DEBUG #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/module.h> #include <asm/mman.h> #include <asm/mmu.h> #include <asm/spu.h> static DEFINE_SPINLOCK(slice_convert_lock); #ifdef DEBUG int _slice_debug = 1; static void slice_print_mask(const char *label, struct slice_mask mask) { char *p, buf[16 + 3 + 16 + 1]; int i; if (!_slice_debug) return; p = buf; for (i = 0; i < SLICE_NUM_LOW; i++) *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0'; *(p++) = ' '; *(p++) = '-'; *(p++) = ' '; for (i = 0; i < SLICE_NUM_HIGH; i++) *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; *(p++) = 0; printk(KERN_DEBUG "%s:%s\n", label, buf); } #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0) #else static void slice_print_mask(const char *label, struct slice_mask mask) {} #define slice_dbg(fmt...) #endif static struct slice_mask slice_range_to_mask(unsigned long start, unsigned long len) { unsigned long end = start + len - 1; struct slice_mask ret = { 0, 0 }; if (start < SLICE_LOW_TOP) { unsigned long mend = min(end, SLICE_LOW_TOP); unsigned long mstart = min(start, SLICE_LOW_TOP); ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) - (1u << GET_LOW_SLICE_INDEX(mstart)); } if ((start + len) > SLICE_LOW_TOP) ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) - (1u << GET_HIGH_SLICE_INDEX(start)); return ret; } static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct vm_area_struct *vma; if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); return (!vma || (addr + len) <= vma->vm_start); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) { return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, 1ul << SLICE_LOW_SHIFT); } static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) { unsigned long start = slice << SLICE_HIGH_SHIFT; unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); /* Hack, so that each addresses is controlled by exactly one * of the high or low area bitmaps, the first high area starts * at 4GB, not 0 */ if (start == 0) start = SLICE_LOW_TOP; return !slice_area_is_free(mm, start, end - start); } static struct slice_mask slice_mask_for_free(struct mm_struct *mm) { struct slice_mask ret = { 0, 0 }; unsigned long i; for (i = 0; i < SLICE_NUM_LOW; i++) if (!slice_low_has_vma(mm, i)) ret.low_slices |= 1u << i; if (mm->task_size <= SLICE_LOW_TOP) return ret; for (i = 0; i < SLICE_NUM_HIGH; i++) if (!slice_high_has_vma(mm, i)) ret.high_slices |= 1u << i; return ret; } static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) { struct slice_mask ret = { 0, 0 }; unsigned long i; u64 psizes; psizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) if (((psizes >> (i * 4)) & 0xf) == psize) ret.low_slices |= 1u << i; psizes = mm->context.high_slices_psize; for (i = 0; i < SLICE_NUM_HIGH; i++) if (((psizes >> (i * 4)) & 0xf) == psize) ret.high_slices |= 1u << i; return ret; } static int slice_check_fit(struct slice_mask mask, struct slice_mask available) { return (mask.low_slices & available.low_slices) == mask.low_slices && (mask.high_slices & available.high_slices) == mask.high_slices; } static void slice_flush_segments(void *parm) { struct mm_struct *mm = parm; unsigned long flags; if (mm != current->active_mm) return; /* update the paca copy of the context struct */ get_paca()->context = current->active_mm->context; local_irq_save(flags); slb_flush_and_rebolt(); local_irq_restore(flags); } static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) { /* Write the new slice psize bits */ u64 lpsizes, hpsizes; unsigned long i, flags; slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); slice_print_mask(" mask", mask); /* We need to use a spinlock here to protect against * concurrent 64k -> 4k demotion ... */ spin_lock_irqsave(&slice_convert_lock, flags); lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) if (mask.low_slices & (1u << i)) lpsizes = (lpsizes & ~(0xful << (i * 4))) | (((unsigned long)psize) << (i * 4)); hpsizes = mm->context.high_slices_psize; for (i = 0; i < SLICE_NUM_HIGH; i++) if (mask.high_slices & (1u << i)) hpsizes = (hpsizes & ~(0xful << (i * 4))) | (((unsigned long)psize) << (i * 4)); mm->context.low_slices_psize = lpsizes; mm->context.high_slices_psize = hpsizes; slice_dbg(" lsps=%lx, hsps=%lx\n", mm->context.low_slices_psize, mm->context.high_slices_psize); spin_unlock_irqrestore(&slice_convert_lock, flags); #ifdef CONFIG_SPU_BASE spu_flush_all_slbs(mm); #endif } static unsigned long slice_find_area_bottomup(struct mm_struct *mm, unsigned long len, struct slice_mask available, int psize, int use_cache) { struct vm_area_struct *vma; unsigned long start_addr, addr; struct slice_mask mask; int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); if (use_cache) { if (len <= mm->cached_hole_size) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } else start_addr = addr = mm->free_area_cache; } else start_addr = addr = TASK_UNMAPPED_BASE; full_search: for (;;) { addr = _ALIGN_UP(addr, 1ul << pshift); if ((TASK_SIZE - len) < addr) break; vma = find_vma(mm, addr); BUG_ON(vma && (addr >= vma->vm_end)); mask = slice_range_to_mask(addr, len); if (!slice_check_fit(mask, available)) { if (addr < SLICE_LOW_TOP) addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT); else addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); continue; } if (!vma || addr + len <= vma->vm_start) { /* * Remember the place where we stopped the search: */ if (use_cache) mm->free_area_cache = addr + len; return addr; } if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; } /* Make sure we didn't miss any holes */ if (use_cache && start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } static unsigned long slice_find_area_topdown(struct mm_struct *mm, unsigned long len, struct slice_mask available, int psize, int use_cache) { struct vm_area_struct *vma; unsigned long addr; struct slice_mask mask; int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); /* check if free_area_cache is useful for us */ if (use_cache) { if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } /* either no address requested or can't fit in requested * address hole */ addr = mm->free_area_cache; /* make sure it can fit in the remaining address space */ if (addr > len) { addr = _ALIGN_DOWN(addr - len, 1ul << pshift); mask = slice_range_to_mask(addr, len); if (slice_check_fit(mask, available) && slice_area_is_free(mm, addr, len)) /* remember the address as a hint for * next time */ return (mm->free_area_cache = addr); } } addr = mm->mmap_base; while (addr > len) { /* Go down by chunk size */ addr = _ALIGN_DOWN(addr - len, 1ul << pshift); /* Check for hit with different page size */ mask = slice_range_to_mask(addr, len); if (!slice_check_fit(mask, available)) { if (addr < SLICE_LOW_TOP) addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT); else if (addr < (1ul << SLICE_HIGH_SHIFT)) addr = SLICE_LOW_TOP; else addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT); continue; } /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); if (!vma || (addr + len) <= vma->vm_start) { /* remember the address as a hint for next time */ if (use_cache) mm->free_area_cache = addr; return addr; } /* remember the largest hole we saw so far */ if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start; } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ addr = slice_find_area_bottomup(mm, len, available, psize, 0); /* * Restore the topdown base: */ if (use_cache) { mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; } return addr; } static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, struct slice_mask mask, int psize, int topdown, int use_cache) { if (topdown) return slice_find_area_topdown(mm, len, mask, psize, use_cache); else return slice_find_area_bottomup(mm, len, mask, psize, use_cache); } #define or_mask(dst, src) do { \ (dst).low_slices |= (src).low_slices; \ (dst).high_slices |= (src).high_slices; \ } while (0) #define andnot_mask(dst, src) do { \ (dst).low_slices &= ~(src).low_slices; \ (dst).high_slices &= ~(src).high_slices; \ } while (0) #ifdef CONFIG_PPC_64K_PAGES #define MMU_PAGE_BASE MMU_PAGE_64K #else #define MMU_PAGE_BASE MMU_PAGE_4K #endif unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, unsigned long flags, unsigned int psize, int topdown, int use_cache) { struct slice_mask mask = {0, 0}; struct slice_mask good_mask; struct slice_mask potential_mask = {0,0} /* silence stupid warning */; struct slice_mask compat_mask = {0, 0}; int fixed = (flags & MAP_FIXED); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); struct mm_struct *mm = current->mm; unsigned long newaddr; /* Sanity checks */ BUG_ON(mm->task_size == 0); slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n", addr, len, flags, topdown, use_cache); if (len > mm->task_size) return -ENOMEM; if (len & ((1ul << pshift) - 1)) return -EINVAL; if (fixed && (addr & ((1ul << pshift) - 1))) return -EINVAL; if (fixed && addr > (mm->task_size - len)) return -EINVAL; /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { addr = _ALIGN_UP(addr, 1ul << pshift); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ if (addr > mm->task_size - len || !slice_area_is_free(mm, addr, len)) addr = 0; } /* First make up a "good" mask of slices that have the right size * already */ good_mask = slice_mask_for_size(mm, psize); slice_print_mask(" good_mask", good_mask); /* * Here "good" means slices that are already the right page size, * "compat" means slices that have a compatible page size (i.e. * 4k in a 64k pagesize kernel), and "free" means slices without * any VMAs. * * If MAP_FIXED: * check if fits in good | compat => OK * check if fits in good | compat | free => convert free * else bad * If have hint: * check if hint fits in good => OK * check if hint fits in good | free => convert free * Otherwise: * search in good, found => OK * search in good | free, found => convert free * search in good | compat | free, found => convert free. */ #ifdef CONFIG_PPC_64K_PAGES /* If we support combo pages, we can allow 64k pages in 4k slices */ if (psize == MMU_PAGE_64K) { compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); if (fixed) or_mask(good_mask, compat_mask); } #endif /* First check hint if it's valid or if we have MAP_FIXED */ if (addr != 0 || fixed) { /* Build a mask for the requested range */ mask = slice_range_to_mask(addr, len); slice_print_mask(" mask", mask); /* Check if we fit in the good mask. If we do, we just return, * nothing else to do */ if (slice_check_fit(mask, good_mask)) { slice_dbg(" fits good !\n"); return addr; } } else { /* Now let's see if we can find something in the existing * slices for that size */ newaddr = slice_find_area(mm, len, good_mask, psize, topdown, use_cache); if (newaddr != -ENOMEM) { /* Found within the good mask, we don't have to setup, * we thus return directly */ slice_dbg(" found area at 0x%lx\n", newaddr); return newaddr; } } /* We don't fit in the good mask, check what other slices are * empty and thus can be converted */ potential_mask = slice_mask_for_free(mm); or_mask(potential_mask, good_mask); slice_print_mask(" potential", potential_mask); if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) { slice_dbg(" fits potential !\n"); goto convert; } /* If we have MAP_FIXED and failed the above steps, then error out */ if (fixed) return -EBUSY; slice_dbg(" search...\n"); /* If we had a hint that didn't work out, see if we can fit * anywhere in the good area. */ if (addr) { addr = slice_find_area(mm, len, good_mask, psize, topdown, use_cache); if (addr != -ENOMEM) { slice_dbg(" found area at 0x%lx\n", addr); return addr; } } /* Now let's see if we can find something in the existing slices * for that size plus free slices */ addr = slice_find_area(mm, len, potential_mask, psize, topdown, use_cache); #ifdef CONFIG_PPC_64K_PAGES if (addr == -ENOMEM && psize == MMU_PAGE_64K) { /* retry the search with 4k-page slices included */ or_mask(potential_mask, compat_mask); addr = slice_find_area(mm, len, potential_mask, psize, topdown, use_cache); } #endif if (addr == -ENOMEM) return -ENOMEM; mask = slice_range_to_mask(addr, len); slice_dbg(" found potential area at 0x%lx\n", addr); slice_print_mask(" mask", mask); convert: andnot_mask(mask, good_mask); andnot_mask(mask, compat_mask); if (mask.low_slices || mask.high_slices) { slice_convert(mm, mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); } return addr; } EXPORT_SYMBOL_GPL(slice_get_unmapped_area); unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { return slice_get_unmapped_area(addr, len, flags, current->mm->context.user_psize, 0, 1); } unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { return slice_get_unmapped_area(addr0, len, flags, current->mm->context.user_psize, 1, 1); } unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) { u64 psizes; int index; if (addr < SLICE_LOW_TOP) { psizes = mm->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); } else { psizes = mm->context.high_slices_psize; index = GET_HIGH_SLICE_INDEX(addr); } return (psizes >> (index * 4)) & 0xf; } EXPORT_SYMBOL_GPL(get_slice_psize); /* * This is called by hash_page when it needs to do a lazy conversion of * an address space from real 64K pages to combo 4K pages (typically * when hitting a non cacheable mapping on a processor or hypervisor * that won't allow them for 64K pages). * * This is also called in init_new_context() to change back the user * psize from whatever the parent context had it set to * N.B. This may be called before mm->context.id has been set. * * This function will only change the content of the {low,high)_slice_psize * masks, it will not flush SLBs as this shall be handled lazily by the * caller. */ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) { unsigned long flags, lpsizes, hpsizes; unsigned int old_psize; int i; slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); spin_lock_irqsave(&slice_convert_lock, flags); old_psize = mm->context.user_psize; slice_dbg(" old_psize=%d\n", old_psize); if (old_psize == psize) goto bail; mm->context.user_psize = psize; wmb(); lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) if (((lpsizes >> (i * 4)) & 0xf) == old_psize) lpsizes = (lpsizes & ~(0xful << (i * 4))) | (((unsigned long)psize) << (i * 4)); hpsizes = mm->context.high_slices_psize; for (i = 0; i < SLICE_NUM_HIGH; i++) if (((hpsizes >> (i * 4)) & 0xf) == old_psize) hpsizes = (hpsizes & ~(0xful << (i * 4))) | (((unsigned long)psize) << (i * 4)); mm->context.low_slices_psize = lpsizes; mm->context.high_slices_psize = hpsizes; slice_dbg(" lsps=%lx, hsps=%lx\n", mm->context.low_slices_psize, mm->context.high_slices_psize); bail: spin_unlock_irqrestore(&slice_convert_lock, flags); } void slice_set_psize(struct mm_struct *mm, unsigned long address, unsigned int psize) { unsigned long i, flags; u64 *p; spin_lock_irqsave(&slice_convert_lock, flags); if (address < SLICE_LOW_TOP) { i = GET_LOW_SLICE_INDEX(address); p = &mm->context.low_slices_psize; } else { i = GET_HIGH_SLICE_INDEX(address); p = &mm->context.high_slices_psize; } *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4)); spin_unlock_irqrestore(&slice_convert_lock, flags); #ifdef CONFIG_SPU_BASE spu_flush_all_slbs(mm); #endif } void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize) { struct slice_mask mask = slice_range_to_mask(start, len); slice_convert(mm, mask, psize); } /* * is_hugepage_only_range() is used by generic code to verify wether * a normal mmap mapping (non hugetlbfs) is valid on a given area. * * until the generic code provides a more generic hook and/or starts * calling arch get_unmapped_area for MAP_FIXED (which our implementation * here knows how to deal with), we hijack it to keep standard mappings * away from us. * * because of that generic code limitation, MAP_FIXED mapping cannot * "convert" back a slice with no VMAs to the standard page size, only * get_unmapped_area() can. It would be possible to fix it here but I * prefer working on fixing the generic code instead. * * WARNING: This will not work if hugetlbfs isn't enabled since the * generic code will redefine that function as 0 in that. This is ok * for now as we only use slices with hugetlbfs enabled. This should * be fixed as the generic code gets fixed. */ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct slice_mask mask, available; unsigned int psize = mm->context.user_psize; mask = slice_range_to_mask(addr, len); available = slice_mask_for_size(mm, psize); #ifdef CONFIG_PPC_64K_PAGES /* We need to account for 4k slices too */ if (psize == MMU_PAGE_64K) { struct slice_mask compat_mask; compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); or_mask(available, compat_mask); } #endif #if 0 /* too verbose */ slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", mm, addr, len); slice_print_mask(" mask", mask); slice_print_mask(" available", available); #endif return !slice_check_fit(mask, available); }
gpl-2.0
AndroPlus-org/kernel
drivers/video/console/font_mini_4x6.c
4977
55915
/* Hand composed "Minuscule" 4x6 font, with binary data generated using * Perl stub. * * Use 'perl -x mini_4x6.c < mini_4x6.c > new_version.c' to regenerate * binary data. * * Created by Kenneth Albanowski. * No rights reserved, released to the public domain. * * Version 1.0 */ /* #!/usr/bin/perl -pn s{((0x)?[0-9a-fA-F]+)(.*\[([\*\ ]{4})\])}{ ($num,$pat,$bits) = ($1,$3,$4); $bits =~ s/([^\s0])|(.)/ defined($1) + 0 /ge; $num = ord(pack("B8", $bits)); $num |= $num >> 4; $num = sprintf("0x%.2x", $num); #print "$num,$pat,$bits\n"; $num . $pat; }ge; __END__; */ /* Note: binary data consists of one byte for each row of each character top to bottom, character 0 to character 255, six bytes per character. Each byte contains the same four character bits in both nybbles. MSBit to LSBit = left to right. */ #include <linux/font.h> #define FONTDATAMAX 1536 static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { /*{*/ /* Char 0: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 1: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 2: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 3: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 4: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 5: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 6: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 7: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 8: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 9: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 10: '' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 11: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 12: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 13: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 14: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 15: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 16: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 17: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 18: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 19: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 20: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 21: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 22: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 23: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 24: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 25: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 26: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 27: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 28: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 29: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 30: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 31: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 32: ' ' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 33: '!' */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 34: '"' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 35: '#' */ 0xaa, /*= [* * ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 36: '$' */ 0x44, /*= [ * ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 37: '%' */ 0xaa, /*= [* * ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 38: '&' */ 0x66, /*= [ ** ] */ 0x99, /*= [* *] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0xdd, /*= [** *] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 39: ''' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 40: '(' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 41: ')' */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 42: '*' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 43: '+' */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 44: ',' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 45: '-' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 46: '.' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 47: '/' */ 0x00, /*= [ ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 48: '0' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 49: '1' */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 50: '2' */ 0xcc, /*= [** ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 51: '3' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x22, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 52: '4' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 53: '5' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 54: '6' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 55: '7' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 56: '8' */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 57: '9' */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 58: ':' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 59: ';' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ /*}*/ /*{*/ /* Char 60: '<' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 61: '=' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 62: '>' */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 63: '?' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 64: '@' */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 65: 'A' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 66: 'B' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 67: 'C' */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 68: 'D' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 69: 'E' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 70: 'F' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 71: 'G' */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 72: 'H' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 73: 'I' */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 74: 'J' */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 75: 'K' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 76: 'L' */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 77: 'M' */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 78: 'N' */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 79: 'O' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 80: 'P' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 81: 'Q' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 82: 'R' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 83: 'S' */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 84: 'T' */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 85: 'U' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 86: 'V' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 87: 'W' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 88: 'X' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 89: 'Y' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 90: 'Z' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 91: '[' */ 0x66, /*= [ ** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 92: '\' */ 0x00, /*= [ ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 93: ']' */ 0x66, /*= [ ** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 94: '^' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 95: '_' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ /*}*/ /*{*/ /* Char 96: '`' */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 97: 'a' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 98: 'b' */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 99: 'c' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 100: 'd' */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 101: 'e' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 102: 'f' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 103: 'g' */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 104: 'h' */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 105: 'i' */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 106: 'j' */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 107: 'k' */ 0x00, /*= [ ] */ 0x88, /*= [* ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 108: 'l' */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 109: 'm' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 110: 'n' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 111: 'o' */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 112: 'p' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x88, /*= [* ] */ /*}*/ /*{*/ /* Char 113: 'q' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x22, /*= [ * ] */ /*}*/ /*{*/ /* Char 114: 'r' */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 115: 's' */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0x22, /*= [ * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 116: 't' */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 117: 'u' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 118: 'v' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 119: 'w' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 120: 'x' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 121: 'y' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0xcc, /*= [** ] */ /*}*/ /*{*/ /* Char 122: 'z' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 123: '{' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 124: '|' */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 125: '}' */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 126: '~' */ 0x55, /*= [ * *] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 127: '' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 128: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 129: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 130: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 131: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 132: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 133: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 134: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 135: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 136: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 137: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 138: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 139: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 140: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 141: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 142: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 143: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 144: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 145: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 146: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 147: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 148: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 149: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 150: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 151: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 152: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 153: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 154: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 155: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 156: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 157: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 158: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 159: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 160: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 161: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 162: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 163: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 164: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 165: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 166: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 167: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 168: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 169: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 170: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 171: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 172: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 173: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 174: */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 175: */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 176: */ 0x88, /*= [* ] */ 0x22, /*= [ * ] */ 0x88, /*= [* ] */ 0x22, /*= [ * ] */ 0x88, /*= [* ] */ 0x22, /*= [ * ] */ /*}*/ /*{*/ /* Char 177: */ 0xaa, /*= [* * ] */ 0x55, /*= [ * *] */ 0xaa, /*= [* * ] */ 0x55, /*= [ * *] */ 0xaa, /*= [* * ] */ 0x55, /*= [ * *] */ /*}*/ /*{*/ /* Char 178: */ 0xdd, /*= [** *] */ 0xbb, /*= [* **] */ 0xdd, /*= [** *] */ 0xbb, /*= [* **] */ 0xdd, /*= [** *] */ 0xbb, /*= [* **] */ /*}*/ /*{*/ /* Char 179: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 180: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 181: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 182: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 183: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 184: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 185: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 186: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 187: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 188: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 189: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 190: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 191: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 192: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 193: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 194: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 195: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 196: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 197: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 198: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 199: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 200: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 201: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 202: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 203: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 204: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 205: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 206: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 207: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 208: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 209: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 210: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 211: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 212: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 213: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 214: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 215: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 216: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 217: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 218: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 219: */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ /*}*/ /*{*/ /* Char 220: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ /*}*/ /*{*/ /* Char 221: */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ /*}*/ /*{*/ /* Char 222: */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ /*}*/ /*{*/ /* Char 223: */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 224: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 225: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 226: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 227: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 228: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 229: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 230: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 231: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 232: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 233: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 234: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 235: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 236: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 237: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 238: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 239: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 240: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 241: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 242: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 243: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 244: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 245: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 246: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 247: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 248: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 249: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 250: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 251: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 252: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 253: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 254: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 255: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ }; const struct font_desc font_mini_4x6 = { .idx = MINI4x6_IDX, .name = "MINI4x6", .width = 4, .height = 6, .data = fontdata_mini_4x6, .pref = 3, };
gpl-2.0
Split-Screen/android_kernel_lge_gee
drivers/atm/zatm.c
4977
44403
/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/atm_zatm.h> #include <linux/capability.h> #include <linux/bitops.h> #include <linux/wait.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/string.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include "uPD98401.h" #include "uPD98402.h" #include "zeprom.h" #include "zatm.h" /* * TODO: * * Minor features * - support 64 kB SDUs (will have to use multibuffer batches then :-( ) * - proper use of CDV, credit = max(1,CDVT*PCR) * - AAL0 * - better receive timestamps * - OAM */ #define ZATM_COPPER 1 #if 0 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif #ifndef CONFIG_ATM_ZATM_DEBUG #define NULLCHECK(x) #define EVENT(s,a,b) static void event_dump(void) { } #else /* * NULL pointer checking */ #define NULLCHECK(x) \ if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x)) /* * Very extensive activity logging. Greatly improves bug detection speed but * costs a few Mbps if enabled. */ #define EV 64 static const char *ev[EV]; static unsigned long ev_a[EV],ev_b[EV]; static int ec = 0; static void EVENT(const char *s,unsigned long a,unsigned long b) { ev[ec] = s; ev_a[ec] = a; ev_b[ec] = b; ec = (ec+1) % EV; } static void event_dump(void) { int n,i; printk(KERN_NOTICE "----- event dump follows -----\n"); for (n = 0; n < EV; n++) { i = (ec+n) % EV; printk(KERN_NOTICE); printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); } printk(KERN_NOTICE "----- event dump ends here -----\n"); } #endif /* CONFIG_ATM_ZATM_DEBUG */ #define RING_BUSY 1 /* indication from do_tx that PDU has to be backlogged */ static struct atm_dev *zatm_boards = NULL; static unsigned long dummy[2] = {0,0}; #define zin_n(r) inl(zatm_dev->base+r*4) #define zin(r) inl(zatm_dev->base+uPD98401_##r*4) #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) #define zwait while (zin(CMR) & uPD98401_BUSY) /* RX0, RX1, TX0, TX1 */ static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ #define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i]) /*-------------------------------- utilities --------------------------------*/ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) { zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) { zwait; zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER); } /*------------------------------- free lists --------------------------------*/ /* * Free buffer head structure: * [0] pointer to buffer (for SAR) * [1] buffer descr link pointer (for SAR) * [2] back pointer to skb (for poll_rx) * [3] data * ... */ struct rx_buffer_head { u32 buffer; /* pointer to buffer (for SAR) */ u32 link; /* buffer descriptor link pointer (for SAR) */ struct sk_buff *skb; /* back pointer to skb (for poll_rx) */ }; static void refill_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; struct sk_buff *skb; struct rx_buffer_head *first; unsigned long flags; int align,offset,free,count,size; EVENT("refill_pool\n",0,0); zatm_dev = ZATM_DEV(dev); size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); if (size < PAGE_SIZE) { align = 32; /* for 32 byte alignment */ offset = sizeof(struct rx_buffer_head); } else { align = 4096; offset = zatm_dev->pool_info[pool].offset+ sizeof(struct rx_buffer_head); } size += align; spin_lock_irqsave(&zatm_dev->lock, flags); free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & uPD98401_RXFP_REMAIN; spin_unlock_irqrestore(&zatm_dev->lock, flags); if (free >= zatm_dev->pool_info[pool].low_water) return; EVENT("starting ... POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); count = 0; first = NULL; while (free < zatm_dev->pool_info[pool].high_water) { struct rx_buffer_head *head; skb = alloc_skb(size,GFP_ATOMIC); if (!skb) { printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new " "skb (%d) with %d free\n",dev->number,size,free); break; } skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+ align+offset-1) & ~(unsigned long) (align-1))-offset)- skb->data); head = (struct rx_buffer_head *) skb->data; skb_reserve(skb,sizeof(struct rx_buffer_head)); if (!first) first = head; count++; head->buffer = virt_to_bus(skb->data); head->link = 0; head->skb = skb; EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb, (unsigned long) head); spin_lock_irqsave(&zatm_dev->lock, flags); if (zatm_dev->last_free[pool]) ((struct rx_buffer_head *) (zatm_dev->last_free[pool]-> data))[-1].link = virt_to_bus(head); zatm_dev->last_free[pool] = skb; skb_queue_tail(&zatm_dev->pool[pool],skb); spin_unlock_irqrestore(&zatm_dev->lock, flags); free++; } if (first) { spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(virt_to_bus(first),CER); zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT ("POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); } } static void drain_free(struct atm_dev *dev,int pool) { skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); } static int pool_index(int max_pdu) { int i; if (max_pdu % ATM_CELL_PAYLOAD) printk(KERN_ERR DEV_LABEL ": driver error in pool_index: " "max_pdu is %d\n",max_pdu); if (max_pdu > 65536) return -1; for (i = 0; (64 << i) < max_pdu; i++); return i+ZATM_AAL5_POOL_BASE; } /* use_pool isn't reentrant */ static void use_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; unsigned long flags; int size; zatm_dev = ZATM_DEV(dev); if (!(zatm_dev->pool_info[pool].ref_count++)) { skb_queue_head_init(&zatm_dev->pool[pool]); size = pool-ZATM_AAL5_POOL_BASE; if (size < 0) size = 0; /* 64B... */ else if (size > 10) size = 10; /* ... 64kB */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << uPD98401_RXFP_ALERT_SHIFT) | (1 << uPD98401_RXFP_BTSZ_SHIFT) | (size << uPD98401_RXFP_BFSZ_SHIFT), zatm_dev->pool_base+pool*2); zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+ pool*2+1); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->last_free[pool] = NULL; refill_pool(dev,pool); } DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); } static void unuse_pool(struct atm_dev *dev,int pool) { if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count)) drain_free(dev,pool); } /*----------------------------------- RX ------------------------------------*/ #if 0 static void exception(struct atm_vcc *vcc) { static int count = 0; struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev); struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc); unsigned long *qrp; int i; if (count++ > 2) return; for (i = 0; i < 8; i++) printk("TX%d: 0x%08lx\n",i, zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i)); for (i = 0; i < 5; i++) printk("SH%d: 0x%08lx\n",i, zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i)); qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP); printk("qrp=0x%08lx\n",(unsigned long) qrp); for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]); } #endif static const char *err_txt[] = { "No error", "RX buf underflow", "RX FIFO overrun", "Maximum len violation", "CRC error", "User abort", "Length violation", "T1 error", "Deactivated", "???", "???", "???", "???", "???", "???", "???" }; static void poll_rx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; int error; EVENT("poll_rx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { u32 *here; struct sk_buff *skb; struct atm_vcc *vcc; int cells,size,chan; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); here = (u32 *) pos; if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; cells = here[0] & uPD98401_AAL5_SIZE; #if 0 printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]); { unsigned long *x; printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev, zatm_dev->pool_base), zpeekl(zatm_dev,zatm_dev->pool_base+1)); x = (unsigned long *) here[2]; printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n", x[0],x[1],x[2],x[3]); } #endif error = 0; if (here[3] & uPD98401_AAL5_ERR) { error = (here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT; if (error == uPD98401_AAL5_ES_DEACT || error == uPD98401_AAL5_ES_FREE) continue; } EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT,error); skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; __net_timestamp(skb); #if 0 printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], ((unsigned *) skb->data)[0]); #endif EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb, (unsigned long) here); #if 0 printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); #endif size = error ? 0 : ntohs(((__be16 *) skb->data)[cells* ATM_CELL_PAYLOAD/sizeof(u16)-3]); EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size); chan = (here[3] & uPD98401_AAL5_CHAN) >> uPD98401_AAL5_CHAN_SHIFT; if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { int pos; vcc = zatm_dev->rx_map[chan]; pos = ZATM_VCC(vcc)->pool; if (skb == zatm_dev->last_free[pos]) zatm_dev->last_free[pos] = NULL; skb_unlink(skb, zatm_dev->pool + pos); } else { printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " "for non-existing channel\n",dev->number); size = 0; vcc = NULL; event_dump(); } if (error) { static unsigned long silence = 0; static int last_error = 0; if (error != last_error || time_after(jiffies, silence) || silence == 0){ printk(KERN_WARNING DEV_LABEL "(itf %d): " "chan %d error %s\n",dev->number,chan, err_txt[error]); last_error = error; silence = (jiffies+2*HZ)|1; } size = 0; } if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER || size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) { printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d " "cells\n",dev->number,size,cells); size = 0; event_dump(); } if (size > ATM_MAX_AAL5_PDU) { printk(KERN_ERR DEV_LABEL "(itf %d): size too big " "(%d)\n",dev->number,size); size = 0; event_dump(); } if (!size) { dev_kfree_skb_irq(skb); if (vcc) atomic_inc(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { dev_kfree_skb_irq(skb); continue; } skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); atomic_inc(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ refill_pool(dev,zatm_vcc->pool); /* maybe this saves us a few interrupts */ #endif } static int open_rx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; unsigned short chan; int cells; DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->rx_chan = 0; if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; if (vcc->qos.aal == ATM_AAL5) { if (vcc->qos.rxtp.max_sdu > 65464) vcc->qos.rxtp.max_sdu = 65464; /* fix this - we may want to receive 64kB SDUs later */ cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD); zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); } else { cells = 1; zatm_vcc->pool = ZATM_AAL0_POOL; } if (zatm_vcc->pool < 0) return -EMSGSIZE; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; use_pool(vcc->dev,zatm_vcc->pool); DPRINTK("pool %d\n",zatm_vcc->pool); /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT, chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ? uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1); zpokel(zatm_dev,0,chan*VC_SIZE/4+2); zatm_vcc->rx_chan = chan; zatm_dev->rx_map[chan] = vcc; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static int open_rx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->rx_chan) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); /* should also handle VPI @@@ */ pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) | ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos); spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static void close_rx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); if (!zatm_vcc->rx_chan) return; DPRINTK("close_rx\n"); /* disable receiver */ if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { spin_lock_irqsave(&zatm_dev->lock, flags); pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); zwait; zout(uPD98401_NOP,CMR); zwait; zout(uPD98401_NOP,CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; udelay(10); /* why oh why ... ? */ zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " "%d\n",vcc->dev->number,zatm_vcc->rx_chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL; zatm_vcc->rx_chan = 0; unuse_pool(vcc->dev,zatm_vcc->pool); } static int start_rx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int size,i; DPRINTK("start_rx\n"); zatm_dev = ZATM_DEV(dev); size = sizeof(struct atm_vcc *)*zatm_dev->chans; zatm_dev->rx_map = kzalloc(size,GFP_KERNEL); if (!zatm_dev->rx_map) return -ENOMEM; /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */ zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR); /* prepare free buffer pools */ for (i = 0; i <= ZATM_LAST_POOL; i++) { zatm_dev->pool_info[i].ref_count = 0; zatm_dev->pool_info[i].rqa_count = 0; zatm_dev->pool_info[i].rqu_count = 0; zatm_dev->pool_info[i].low_water = LOW_MARK; zatm_dev->pool_info[i].high_water = HIGH_MARK; zatm_dev->pool_info[i].offset = 0; zatm_dev->pool_info[i].next_off = 0; zatm_dev->pool_info[i].next_cnt = 0; zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES; } return 0; } /*----------------------------------- TX ------------------------------------*/ static int do_tx(struct sk_buff *skb) { struct atm_vcc *vcc; struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; u32 *dsc; unsigned long flags; EVENT("do_tx\n",0,0); DPRINTK("sending skb %p\n",skb); vcc = ATM_SKB(skb)->vcc; zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0); spin_lock_irqsave(&zatm_dev->lock, flags); if (!skb_shinfo(skb)->nr_frags) { if (zatm_vcc->txing == RING_ENTRIES-1) { spin_unlock_irqrestore(&zatm_dev->lock, flags); return RING_BUSY; } zatm_vcc->txing++; dsc = zatm_vcc->ring+zatm_vcc->ring_curr; zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) & (RING_ENTRIES*RING_WORDS-1); dsc[1] = 0; dsc[2] = skb->len; dsc[3] = virt_to_bus(skb->data); mb(); dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0); } else { printk("NONONONOO!!!!\n"); dsc = NULL; #if 0 u32 *put; int i; dsc = kmalloc(uPD98401_TXPD_SIZE * 2 + uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC); if (!dsc) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_irq(skb); return -EAGAIN; } /* @@@ should check alignment */ put = dsc+8; dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); dsc[1] = 0; dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE; dsc[3] = virt_to_bus(put); for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) { *put++ = ((struct iovec *) skb->data)[i].iov_len; *put++ = virt_to_bus(((struct iovec *) skb->data)[i].iov_base); } put[-2] |= uPD98401_TXBD_LAST; #endif } ZATM_PRV_DSC(skb) = dsc; skb_queue_tail(&zatm_vcc->tx_queue,skb); DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP)); zwait; zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT("done\n",0,0); return 0; } static inline void dequeue_tx(struct atm_vcc *vcc) { struct zatm_vcc *zatm_vcc; struct sk_buff *skb; EVENT("dequeue_tx\n",0,0); zatm_vcc = ZATM_VCC(vcc); skb = skb_dequeue(&zatm_vcc->tx_queue); if (!skb) { printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not " "txing\n",vcc->dev->number); return; } #if 0 /* @@@ would fail on CLP */ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n", *ZATM_PRV_DSC(skb)); #endif *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */ zatm_vcc->txing--; if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); while ((skb = skb_dequeue(&zatm_vcc->backlog))) if (do_tx(skb) == RING_BUSY) { skb_queue_head(&zatm_vcc->backlog,skb); break; } atomic_inc(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } static void poll_tx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; EVENT("poll_tx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { int chan; #if 1 u32 data,*addr; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); addr = (u32 *) pos; data = *addr; chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr, data); EVENT("chan = %d\n",chan,0); #else NO ! chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; #endif if (chan < zatm_dev->chans && zatm_dev->tx_map[chan]) dequeue_tx(zatm_dev->tx_map[chan]); else { printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication " "for non-existing channel %d\n",dev->number,chan); event_dump(); } if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; } zout(pos & 0xffff,MTA(mbx)); } /* * BUG BUG BUG: Doesn't handle "new-style" rate specification yet. */ static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr) { struct zatm_dev *zatm_dev; unsigned long flags; unsigned long i,m,c; int shaper; DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max); zatm_dev = ZATM_DEV(dev); if (!zatm_dev->free_shapers) return -EAGAIN; for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++); zatm_dev->free_shapers &= ~1 << shaper; if (ubr) { c = 5; i = m = 1; zatm_dev->ubr_ref_cnt++; zatm_dev->ubr = shaper; *pcr = 0; } else { if (min) { if (min <= 255) { i = min; m = ATM_OC3_PCR; } else { i = 255; m = ATM_OC3_PCR*255/min; } } else { if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw; if (max <= 255) { i = max; m = ATM_OC3_PCR; } else { i = 255; m = DIV_ROUND_UP(ATM_OC3_PCR*255, max); } } if (i > m) { printk(KERN_CRIT DEV_LABEL "shaper algorithm botched " "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m); m = i; } *pcr = i*ATM_OC3_PCR/m; c = 20; /* @@@ should use max_cdv ! */ if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL; if (zatm_dev->tx_bw < *pcr) return -EAGAIN; zatm_dev->tx_bw -= *pcr; } spin_lock_irqsave(&zatm_dev->lock, flags); DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr); zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper)); zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper)); zpokel(zatm_dev,0,uPD98401_X(shaper)); zpokel(zatm_dev,0,uPD98401_Y(shaper)); zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); return shaper; } static void dealloc_shaper(struct atm_dev *dev,int shaper) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); if (shaper == zatm_dev->ubr) { if (--zatm_dev->ubr_ref_cnt) return; zatm_dev->ubr = -1; } spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E, uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->free_shapers |= 1 << shaper; } static void close_tx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int chan; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); chan = zatm_vcc->tx_chan; if (!chan) return; DPRINTK("close_tx\n"); if (skb_peek(&zatm_vcc->backlog)) { printk("waiting for backlog to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); } if (skb_peek(&zatm_vcc->tx_queue)) { printk("waiting for TX queue to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue)); } spin_lock_irqsave(&zatm_dev->lock, flags); #if 0 zwait; zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); #endif zwait; zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " "%d\n",vcc->dev->number,chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_vcc->tx_chan = 0; zatm_dev->tx_map[chan] = NULL; if (zatm_vcc->shaper != zatm_dev->ubr) { zatm_dev->tx_bw += vcc->qos.txtp.min_pcr; dealloc_shaper(vcc->dev,zatm_vcc->shaper); } kfree(zatm_vcc->ring); } static int open_tx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; u32 *loop; unsigned short chan; int unlimited; DPRINTK("open_tx_first\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->tx_chan = 0; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; unlimited = vcc->qos.txtp.traffic_class == ATM_UBR && (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR || vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; else { int uninitialized_var(pcr); if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) < 0) { close_tx(vcc); return zatm_vcc->shaper; } if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR; vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr; } zatm_vcc->tx_chan = chan; skb_queue_head_init(&zatm_vcc->tx_queue); init_waitqueue_head(&zatm_vcc->tx_wait); /* initialize ring */ zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL); if (!zatm_vcc->ring) return -ENOMEM; loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS; loop[0] = uPD98401_TXPD_V; loop[1] = loop[2] = 0; loop[3] = virt_to_bus(zatm_vcc->ring); zatm_vcc->ring_curr = 0; zatm_vcc->txing = 0; skb_queue_head_init(&zatm_vcc->backlog); zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring), chan*VC_SIZE/4+uPD98401_TXVC_QRP); return 0; } static int open_tx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; DPRINTK("open_tx_second\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->tx_chan) return 0; /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper << uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) | vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc; return 0; } static int start_tx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int i; DPRINTK("start_tx\n"); zatm_dev = ZATM_DEV(dev); zatm_dev->tx_map = kmalloc(sizeof(struct atm_vcc *)* zatm_dev->chans,GFP_KERNEL); if (!zatm_dev->tx_map) return -ENOMEM; zatm_dev->tx_bw = ATM_OC3_PCR; zatm_dev->free_shapers = (1 << NR_SHAPERS)-1; zatm_dev->ubr = -1; zatm_dev->ubr_ref_cnt = 0; /* initialize shapers */ for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i)); return 0; } /*------------------------------- interrupts --------------------------------*/ static irqreturn_t zatm_int(int irq,void *dev_id) { struct atm_dev *dev; struct zatm_dev *zatm_dev; u32 reason; int handled = 0; dev = dev_id; zatm_dev = ZATM_DEV(dev); while ((reason = zin(GSR))) { handled = 1; EVENT("reason 0x%x\n",reason,0); if (reason & uPD98401_INT_PI) { EVENT("PHY int\n",0,0); dev->phy->interrupt(dev); } if (reason & uPD98401_INT_RQA) { unsigned long pools; int i; pools = zin(RQA); EVENT("RQA (0x%08x)\n",pools,0); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqa_count++; } pools >>= 1; } } if (reason & uPD98401_INT_RQU) { unsigned long pools; int i; pools = zin(RQU); printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n", dev->number,pools); event_dump(); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqu_count++; } pools >>= 1; } } /* don't handle RD */ if (reason & uPD98401_INT_SPE) printk(KERN_ALERT DEV_LABEL "(itf %d): system parity " "error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_CPE) printk(KERN_ALERT DEV_LABEL "(itf %d): control memory " "parity error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_SBE) { printk(KERN_ALERT DEV_LABEL "(itf %d): system bus " "error at 0x%08x\n",dev->number,zin(ADDR)); event_dump(); } /* don't handle IND */ if (reason & uPD98401_INT_MF) { printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full " "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF) >> uPD98401_INT_MF_SHIFT); event_dump(); /* @@@ should try to recover */ } if (reason & uPD98401_INT_MM) { if (reason & 1) poll_rx(dev,0); if (reason & 2) poll_rx(dev,1); if (reason & 4) poll_tx(dev,2); if (reason & 8) poll_tx(dev,3); } /* @@@ handle RCRn */ } return IRQ_RETVAL(handled); } /*----------------------------- (E)EPROM access -----------------------------*/ static void __devinit eprom_set(struct zatm_dev *zatm_dev,unsigned long value, unsigned short cmd) { int error; if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value))) printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n", error); } static unsigned long __devinit eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd) { unsigned int value; int error; if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value))) printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n", error); return value; } static void __devinit eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,int bits,unsigned short cmd) { unsigned long value; int i; for (i = bits-1; i >= 0; i--) { value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0); eprom_set(zatm_dev,value,cmd); eprom_set(zatm_dev,value | ZEPROM_SK,cmd); eprom_set(zatm_dev,value,cmd); } } static void __devinit eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,unsigned short cmd) { int i; *byte = 0; for (i = 8; i; i--) { eprom_set(zatm_dev,ZEPROM_CS,cmd); eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd); *byte <<= 1; if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1; eprom_set(zatm_dev,ZEPROM_CS,cmd); } } static unsigned char __devinit eprom_try_esi(struct atm_dev *dev, unsigned short cmd,int offset,int swap) { unsigned char buf[ZEPROM_SIZE]; struct zatm_dev *zatm_dev; int i; zatm_dev = ZATM_DEV(dev); for (i = 0; i < ZEPROM_SIZE; i += 2) { eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */ eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd); eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd); eprom_get_byte(zatm_dev,buf+i+swap,cmd); eprom_get_byte(zatm_dev,buf+i+1-swap,cmd); eprom_set(zatm_dev,0,cmd); /* deselect EPROM */ } memcpy(dev->esi,buf+offset,ESI_LEN); return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */ } static void __devinit eprom_get_esi(struct atm_dev *dev) { if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return; (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0); } /*--------------------------------- entries ---------------------------------*/ static int __devinit zatm_init(struct atm_dev *dev) { struct zatm_dev *zatm_dev; struct pci_dev *pci_dev; unsigned short command; int error,i,last; unsigned long t0,t1,t2; DPRINTK(">zatm_init\n"); zatm_dev = ZATM_DEV(dev); spin_lock_init(&zatm_dev->lock); pci_dev = zatm_dev->pci_dev; zatm_dev->base = pci_resource_start(pci_dev, 0); zatm_dev->irq = pci_dev->irq; if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) { printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n", dev->number,error); return -EINVAL; } if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)" "\n",dev->number,error); return -EIO; } eprom_get_esi(dev); printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,", dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq); /* reset uPD98401 */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR); last = MAX_CRAM_SIZE; for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { zpokel(zatm_dev,0x55555555,i); if (zpeekl(zatm_dev,i) != 0x55555555) last = i; else { zpokel(zatm_dev,0xAAAAAAAA,i); if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i; else zpokel(zatm_dev,i,i); } } for (i = 0; i < last; i += RAM_INCREMENT) if (zpeekl(zatm_dev,i) != i) break; zatm_dev->mem = i << 2; while (i) zpokel(zatm_dev,0,--i); /* reset again to rebuild memory pointers */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 | uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR); /* TODO: should shrink allocation now */ printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" : "MMF"); for (i = 0; i < ESI_LEN; i++) printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-"); do { unsigned long flags; spin_lock_irqsave(&zatm_dev->lock, flags); t0 = zpeekl(zatm_dev,uPD98401_TSR); udelay(10); t1 = zpeekl(zatm_dev,uPD98401_TSR); udelay(1010); t2 = zpeekl(zatm_dev,uPD98401_TSR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } while (t0 > t1 || t1 > t2); /* loop if wrapping ... */ zatm_dev->khz = t2-2*t1+t0; printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d " "MHz\n",dev->number, (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT, zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000); return uPD98402_init(dev); } static int __devinit zatm_start(struct atm_dev *dev) { struct zatm_dev *zatm_dev = ZATM_DEV(dev); struct pci_dev *pdev = zatm_dev->pci_dev; unsigned long curr; int pools,vccs,rx; int error, i, ld; DPRINTK("zatm_start\n"); zatm_dev->rx_map = zatm_dev->tx_map = NULL; for (i = 0; i < NR_MBX; i++) zatm_dev->mbx_start[i] = 0; error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev); if (error < 0) { printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", dev->number,zatm_dev->irq); goto done; } /* define memory regions */ pools = NR_POOLS; if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE) pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE; vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/ (2*VC_SIZE+RX_SIZE); ld = -1; for (rx = 1; rx < vccs; rx <<= 1) ld++; dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */ dev->ci_range.vci_bits = ld; dev->link_rate = ATM_OC3_PCR; zatm_dev->chans = vccs; /* ??? */ curr = rx*RX_SIZE/4; DPRINTK("RX pool 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ zatm_dev->pool_base = curr; curr += pools*POOL_SIZE/4; DPRINTK("Shapers 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */ curr += NR_SHAPERS*SHAPER_SIZE/4; DPRINTK("Free 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, " "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx, (zatm_dev->mem-curr*4)/VC_SIZE); /* create mailboxes */ for (i = 0; i < NR_MBX; i++) { void *mbx; dma_addr_t mbx_dma; if (!mbx_entries[i]) continue; mbx = pci_alloc_consistent(pdev, 2*MBX_SIZE(i), &mbx_dma); if (!mbx) { error = -ENOMEM; goto out; } /* * Alignment provided by pci_alloc_consistent() isn't enough * for this device. */ if (((unsigned long)mbx ^ mbx_dma) & 0xffff) { printk(KERN_ERR DEV_LABEL "(itf %d): system " "bus incompatible with driver\n", dev->number); pci_free_consistent(pdev, 2*MBX_SIZE(i), mbx, mbx_dma); error = -ENODEV; goto out; } DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i)); zatm_dev->mbx_start[i] = (unsigned long)mbx; zatm_dev->mbx_dma[i] = mbx_dma; zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) & 0xffff; zout(mbx_dma >> 16, MSH(i)); zout(mbx_dma, MSL(i)); zout(zatm_dev->mbx_end[i], MBA(i)); zout((unsigned long)mbx & 0xffff, MTA(i)); zout((unsigned long)mbx & 0xffff, MWA(i)); } error = start_tx(dev); if (error) goto out; error = start_rx(dev); if (error) goto out_tx; error = dev->phy->start(dev); if (error) goto out_rx; zout(0xffffffff,IMR); /* enable interrupts */ /* enable TX & RX */ zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR); done: return error; out_rx: kfree(zatm_dev->rx_map); out_tx: kfree(zatm_dev->tx_map); out: while (i-- > 0) { pci_free_consistent(pdev, 2*MBX_SIZE(i), (void *)zatm_dev->mbx_start[i], zatm_dev->mbx_dma[i]); } free_irq(zatm_dev->irq, dev); goto done; } static void zatm_close(struct atm_vcc *vcc) { DPRINTK(">zatm_close\n"); if (!ZATM_VCC(vcc)) return; clear_bit(ATM_VF_READY,&vcc->flags); close_rx(vcc); EVENT("close_tx\n",0,0); close_tx(vcc); DPRINTK("zatm_close: done waiting\n"); /* deallocate memory */ kfree(ZATM_VCC(vcc)); vcc->dev_data = NULL; clear_bit(ATM_VF_ADDR,&vcc->flags); } static int zatm_open(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; short vpi = vcc->vpi; int vci = vcc->vci; int error; DPRINTK(">zatm_open\n"); zatm_dev = ZATM_DEV(vcc->dev); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) set_bit(ATM_VF_ADDR,&vcc->flags); if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */ DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, vcc->vci); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { zatm_vcc = kmalloc(sizeof(struct zatm_vcc),GFP_KERNEL); if (!zatm_vcc) { clear_bit(ATM_VF_ADDR,&vcc->flags); return -ENOMEM; } vcc->dev_data = zatm_vcc; ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */ if ((error = open_rx_first(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_first(vcc))) { zatm_close(vcc); return error; } } if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; if ((error = open_rx_second(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_second(vcc))) { zatm_close(vcc); return error; } set_bit(ATM_VF_READY,&vcc->flags); return 0; } static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags) { printk("Not yet implemented\n"); return -ENOSYS; /* @@@ */ } static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); switch (cmd) { case ZATM_GETPOOLZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case ZATM_GETPOOL: { struct zatm_pool_info info; int pool; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); info = zatm_dev->pool_info[pool]; if (cmd == ZATM_GETPOOLZ) { zatm_dev->pool_info[pool].rqa_count = 0; zatm_dev->pool_info[pool].rqu_count = 0; } spin_unlock_irqrestore(&zatm_dev->lock, flags); return copy_to_user( &((struct zatm_pool_req __user *) arg)->info, &info,sizeof(info)) ? -EFAULT : 0; } case ZATM_SETPOOL: { struct zatm_pool_info info; int pool; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; if (!info.low_water) info.low_water = zatm_dev-> pool_info[pool].low_water; if (!info.high_water) info.high_water = zatm_dev-> pool_info[pool].high_water; if (!info.next_thres) info.next_thres = zatm_dev-> pool_info[pool].next_thres; if (info.low_water >= info.high_water || info.low_water < 0) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); zatm_dev->pool_info[pool].low_water = info.low_water; zatm_dev->pool_info[pool].high_water = info.high_water; zatm_dev->pool_info[pool].next_thres = info.next_thres; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } default: if (!dev->phy->ioctl) return -ENOIOCTLCMD; return dev->phy->ioctl(dev,cmd,arg); } } static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen) { return -EINVAL; } static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,unsigned int optlen) { return -EINVAL; } static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) { int error; EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0); if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } if (!skb) { printk(KERN_CRIT "!skb in zatm_send ?\n"); if (vcc->pop) vcc->pop(vcc,skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; error = do_tx(skb); if (error != RING_BUSY) return error; skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); return 0; } static void zatm_phy_put(struct atm_dev *dev,unsigned char value, unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER) & 0xff; } static const struct atmdev_ops ops = { .open = zatm_open, .close = zatm_close, .ioctl = zatm_ioctl, .getsockopt = zatm_getsockopt, .setsockopt = zatm_setsockopt, .send = zatm_send, .phy_put = zatm_phy_put, .phy_get = zatm_phy_get, .change_qos = zatm_change_qos, }; static int __devinit zatm_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { struct atm_dev *dev; struct zatm_dev *zatm_dev; int ret = -ENOMEM; zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL); if (!zatm_dev) { printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); goto out; } dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto out_free; ret = pci_enable_device(pci_dev); if (ret < 0) goto out_deregister; ret = pci_request_regions(pci_dev, DEV_LABEL); if (ret < 0) goto out_disable; zatm_dev->pci_dev = pci_dev; dev->dev_data = zatm_dev; zatm_dev->copper = (int)ent->driver_data; if ((ret = zatm_init(dev)) || (ret = zatm_start(dev))) goto out_release; pci_set_drvdata(pci_dev, dev); zatm_dev->more = zatm_boards; zatm_boards = dev; ret = 0; out: return ret; out_release: pci_release_regions(pci_dev); out_disable: pci_disable_device(pci_dev); out_deregister: atm_dev_deregister(dev); out_free: kfree(zatm_dev); goto out; } MODULE_LICENSE("GPL"); static struct pci_device_id zatm_pci_tbl[] __devinitdata = { { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, zatm_pci_tbl); static struct pci_driver zatm_driver = { .name = DEV_LABEL, .id_table = zatm_pci_tbl, .probe = zatm_init_one, }; static int __init zatm_init_module(void) { return pci_register_driver(&zatm_driver); } module_init(zatm_init_module); /* module_exit not defined so not unloadable */
gpl-2.0
YaDev/kernel_samsung_gardaltetmo
drivers/infiniband/hw/ipath/ipath_init_chip.c
5233
33427
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/vmalloc.h> #include "ipath_kernel.h" #include "ipath_common.h" /* * min buffers we want to have per port, after driver */ #define IPATH_MIN_USER_PORT_BUFCNT 7 /* * Number of ports we are configured to use (to allow for more pio * buffers per port, etc.) Zero means use chip value. */ static ushort ipath_cfgports; module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO); MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); /* * Number of buffers reserved for driver (verbs and layered drivers.) * Initialized based on number of PIO buffers if not set via module interface. * The problem with this is that it's global, but we'll use different * numbers for different chip types. */ static ushort ipath_kpiobufs; static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort, &ipath_kpiobufs, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); /** * create_port0_egr - allocate the eager TID buffers * @dd: the infinipath device * * This code is now quite different for user and kernel, because * the kernel uses skb's, for the accelerated network performance. * This is the kernel (port0) version. * * Allocate the eager TID buffers and program them into infinipath. * We use the network layer alloc_skb() allocator to allocate the * memory, and either use the buffers as is for things like verbs * packets, or pass the buffers up to the ipath layered driver and * thence the network layer, replacing them as we do so (see * ipath_rcv_layer()). */ static int create_port0_egr(struct ipath_devdata *dd) { unsigned e, egrcnt; struct ipath_skbinfo *skbinfo; int ret; egrcnt = dd->ipath_p0_rcvegrcnt; skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt); if (skbinfo == NULL) { ipath_dev_err(dd, "allocation error for eager TID " "skb array\n"); ret = -ENOMEM; goto bail; } for (e = 0; e < egrcnt; e++) { /* * This is a bit tricky in that we allocate extra * space for 2 bytes of the 14 byte ethernet header. * These two bytes are passed in the ipath header so * the rest of the data is word aligned. We allocate * 4 bytes so that the data buffer stays word aligned. * See ipath_kreceive() for more details. */ skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL); if (!skbinfo[e].skb) { ipath_dev_err(dd, "SKB allocation error for " "eager TID %u\n", e); while (e != 0) dev_kfree_skb(skbinfo[--e].skb); vfree(skbinfo); ret = -ENOMEM; goto bail; } } /* * After loop above, so we can test non-NULL to see if ready * to use at receive, etc. */ dd->ipath_port0_skbinfo = skbinfo; for (e = 0; e < egrcnt; e++) { dd->ipath_port0_skbinfo[e].phys = ipath_map_single(dd->pcidev, dd->ipath_port0_skbinfo[e].skb->data, dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE); dd->ipath_f_put_tid(dd, e + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), RCVHQ_RCV_TYPE_EAGER, dd->ipath_port0_skbinfo[e].phys); } ret = 0; bail: return ret; } static int bringup_link(struct ipath_devdata *dd) { u64 val, ibc; int ret = 0; /* hold IBC in reset */ dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); /* * set initial max size pkt IBC will send, including ICRC; it's the * PIO buffer size in dwords, less 1; also see ipath_set_mtu() */ val = (dd->ipath_ibmaxlen >> 2) + 1; ibc = val << dd->ibcc_mpl_shift; /* flowcontrolwatermark is in units of KBytes */ ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT; /* * How often flowctrl sent. More or less in usecs; balance against * watermark value, so that in theory senders always get a flow * control update in time to not let the IB link go idle. */ ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT; /* max error tolerance */ ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; /* use "real" buffer space for */ ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT; /* IB credit flow control. */ ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; /* initially come up waiting for TS1, without sending anything. */ dd->ipath_ibcctrl = ibc; /* * Want to start out with both LINKCMD and LINKINITCMD in NOP * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that * to stay a NOP. Flag that we are disabled, for the (unlikely) * case that some recovery path is trying to bring the link up * before we are ready. */ ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE << INFINIPATH_IBCC_LINKINITCMD_SHIFT; dd->ipath_flags |= IPATH_IB_LINK_DISABLED; ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n", (unsigned long long) ibc); ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc); // be sure chip saw it val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ret = dd->ipath_f_bringup_serdes(dd); if (ret) dev_info(&dd->pcidev->dev, "Could not initialize SerDes, " "not usable\n"); else { /* enable IBC */ dd->ipath_control |= INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); } return ret; } static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd) { struct ipath_portdata *pd = NULL; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (pd) { pd->port_dd = dd; pd->port_cnt = 1; /* The port 0 pkey table is used by the layer interface. */ pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; pd->port_seq_cnt = 1; } return pd; } static int init_chip_first(struct ipath_devdata *dd) { struct ipath_portdata *pd; int ret = 0; u64 val; spin_lock_init(&dd->ipath_kernel_tid_lock); spin_lock_init(&dd->ipath_user_tid_lock); spin_lock_init(&dd->ipath_sendctrl_lock); spin_lock_init(&dd->ipath_uctxt_lock); spin_lock_init(&dd->ipath_sdma_lock); spin_lock_init(&dd->ipath_gpio_lock); spin_lock_init(&dd->ipath_eep_st_lock); spin_lock_init(&dd->ipath_sdepb_lock); mutex_init(&dd->ipath_eep_lock); /* * skip cfgports stuff because we are not allocating memory, * and we don't want problems if the portcnt changed due to * cfgports. We do still check and report a difference, if * not same (should be impossible). */ dd->ipath_f_config_ports(dd, ipath_cfgports); if (!ipath_cfgports) dd->ipath_cfgports = dd->ipath_portcnt; else if (ipath_cfgports <= dd->ipath_portcnt) { dd->ipath_cfgports = ipath_cfgports; ipath_dbg("Configured to use %u ports out of %u in chip\n", dd->ipath_cfgports, ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt)); } else { dd->ipath_cfgports = dd->ipath_portcnt; ipath_dbg("Tried to configured to use %u ports; chip " "only supports %u\n", ipath_cfgports, ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt)); } /* * Allocate full portcnt array, rather than just cfgports, because * cleanup iterates across all possible ports. */ dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt, GFP_KERNEL); if (!dd->ipath_pd) { ipath_dev_err(dd, "Unable to allocate portdata array, " "failing\n"); ret = -ENOMEM; goto done; } pd = create_portdata0(dd); if (!pd) { ipath_dev_err(dd, "Unable to allocate portdata for port " "0, failing\n"); ret = -ENOMEM; goto done; } dd->ipath_pd[0] = pd; dd->ipath_rcvtidcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); dd->ipath_rcvtidbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); dd->ipath_rcvegrcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); dd->ipath_rcvegrbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); dd->ipath_palign = ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign); dd->ipath_piobufbase = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize); dd->ipath_piosize2k = val & ~0U; dd->ipath_piosize4k = val >> 32; if (dd->ipath_piosize4k == 0 && ipath_mtu4096) ipath_mtu4096 = 0; /* 4KB not supported by this chip */ dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048; val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt); dd->ipath_piobcnt2k = val & ~0U; dd->ipath_piobcnt4k = val >> 32; dd->ipath_pio2kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase & 0xffffffff)); if (dd->ipath_piobcnt4k) { dd->ipath_pio4kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase >> 32)); /* * 4K buffers take 2 pages; we use roundup just to be * paranoid; we calculate it once here, rather than on * ever buf allocate */ dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k, dd->ipath_palign); ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p " "(%x aligned)\n", dd->ipath_piobcnt2k, dd->ipath_piosize2k, dd->ipath_pio2kbase, dd->ipath_piobcnt4k, dd->ipath_piosize4k, dd->ipath_pio4kbase, dd->ipath_4kalign); } else ipath_dbg("%u 2k piobufs @ %p\n", dd->ipath_piobcnt2k, dd->ipath_pio2kbase); done: return ret; } /** * init_chip_reset - re-initialize after a reset, or enable * @dd: the infinipath device * * sanity check at least some of the values after reset, and * ensure no receive or transmit (explicitly, in case reset * failed */ static int init_chip_reset(struct ipath_devdata *dd) { u32 rtmp; int i; unsigned long flags; /* * ensure chip does no sends or receives, tail updates, or * pioavail updates while we re-initialize */ dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift); for (i = 0; i < dd->ipath_portcnt; i++) { clear_bit(dd->ipath_r_portenable_shift + i, &dd->ipath_rcvctrl); clear_bit(dd->ipath_r_intravail_shift + i, &dd->ipath_rcvctrl); } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); dd->ipath_sendctrl = 0U; /* no sdma, etc */ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); if (rtmp != dd->ipath_rcvtidcnt) dev_info(&dd->pcidev->dev, "tidcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); if (rtmp != dd->ipath_rcvtidbase) dev_info(&dd->pcidev->dev, "tidbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidbase, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); if (rtmp != dd->ipath_rcvegrcnt) dev_info(&dd->pcidev->dev, "egrcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); if (rtmp != dd->ipath_rcvegrbase) dev_info(&dd->pcidev->dev, "egrbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrbase, rtmp); return 0; } static int init_pioavailregs(struct ipath_devdata *dd) { int ret; dd->ipath_pioavailregs_dma = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys, GFP_KERNEL); if (!dd->ipath_pioavailregs_dma) { ipath_dev_err(dd, "failed to allocate PIOavail reg area " "in memory\n"); ret = -ENOMEM; goto done; } /* * we really want L2 cache aligned, but for current CPUs of * interest, they are the same. */ dd->ipath_statusp = (u64 *) ((char *)dd->ipath_pioavailregs_dma + ((2 * L1_CACHE_BYTES + dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); /* copy the current value now that it's really allocated */ *dd->ipath_statusp = dd->_ipath_status; /* * setup buffer to hold freeze msg, accessible to apps, * following statusp */ dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1]; /* and its length */ dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]); ret = 0; done: return ret; } /** * init_shadow_tids - allocate the shadow TID array * @dd: the infinipath device * * allocate the shadow TID array, so we can ipath_munlock previous * entries. It may make more sense to move the pageshadow to the * port data structure, so we only allocate memory for ports actually * in use, since we at 8k per port, now. */ static void init_shadow_tids(struct ipath_devdata *dd) { struct page **pages; dma_addr_t *addrs; pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *)); if (!pages) { ipath_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); dd->ipath_pageshadow = NULL; return; } addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { ipath_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); vfree(pages); dd->ipath_pageshadow = NULL; return; } dd->ipath_pageshadow = pages; dd->ipath_physshadow = addrs; } static void enable_chip(struct ipath_devdata *dd, int reinit) { u32 val; u64 rcvmask; unsigned long flags; int i; if (!reinit) init_waitqueue_head(&ipath_state_wait); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); /* Enable PIO send, and update of PIOavail regs to memory. */ dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | INFINIPATH_S_PIOBUFAVAILUPD; /* * Set the PIO avail update threshold to host memory * on chips that support it. */ if (dd->ipath_pioupd_thresh) dd->ipath_sendctrl |= dd->ipath_pioupd_thresh << INFINIPATH_S_UPDTHRESH_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); /* * Enable kernel ports' receive and receive interrupt. * Other ports done as user opens and inits them. */ rcvmask = 1ULL; dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) | (rcvmask << dd->ipath_r_intravail_shift); if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* * now ready for use. this should be cleared whenever we * detect a reset, or initiate one. */ dd->ipath_flags |= IPATH_INITTED; /* * Init our shadow copies of head from tail values, * and write head values to match. */ val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); /* Initialize so we interrupt on next packet received */ ipath_write_ureg(dd, ur_rcvhdrhead, dd->ipath_rhdrhead_intr_off | dd->ipath_pd[0]->port_head, 0); /* * by now pioavail updates to memory should have occurred, so * copy them into our working/shadow registers; this is in * case something went wrong with abort, but mostly to get the * initial values of the generation bit correct. */ for (i = 0; i < dd->ipath_pioavregs; i++) { __le64 pioavail; /* * Chip Errata bug 6641; even and odd qwords>3 are swapped. */ if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) pioavail = dd->ipath_pioavailregs_dma[i ^ 1]; else pioavail = dd->ipath_pioavailregs_dma[i]; /* * don't need to worry about ipath_pioavailkernel here * because we will call ipath_chg_pioavailkernel() later * in initialization, to busy out buffers as needed */ dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail); } /* can get counters, stats, etc. */ dd->ipath_flags |= IPATH_PRESENT; } static int init_housekeeping(struct ipath_devdata *dd, int reinit) { char boardn[40]; int ret = 0; /* * have to clear shadow copies of registers at init that are * not otherwise set here, or all kinds of bizarre things * happen with driver on chip reset */ dd->ipath_rcvhdrsize = 0; /* * Don't clear ipath_flags as 8bit mode was set before * entering this func. However, we do set the linkstate to * unknown, so we can watch for a transition. * PRESENT is set because we want register reads to work, * and the kernel infrastructure saw it in config space; * We clear it if we have failures. */ dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT; dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | IPATH_LINKDOWN | IPATH_LINKINIT); ipath_cdbg(VERBOSE, "Try to read spc chip revision\n"); dd->ipath_revision = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); /* * set up fundamental info we need to use the chip; we assume * if the revision reg and these regs are OK, we don't need to * special case the rest */ dd->ipath_sregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase); dd->ipath_cregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase); dd->ipath_uregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase); ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, " "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase, dd->ipath_uregbase, dd->ipath_cregbase); if ((dd->ipath_revision & 0xffffffff) == 0xffffffff || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { ipath_dev_err(dd, "Register read failures from chip, " "giving up initialization\n"); dd->ipath_flags &= ~IPATH_PRESENT; ret = -ENODEV; goto done; } /* clear diagctrl register, in case diags were running and crashed */ ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0); /* clear the initial reset flag, in case first driver load */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, INFINIPATH_E_RESET); ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n", (unsigned long long) dd->ipath_revision, dd->ipath_pcirev); if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) { ipath_dev_err(dd, "Driver only handles version %d, " "chip swversion is %d (%llx), failng\n", IPATH_CHIP_SWVERSION, (int)(dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK, (unsigned long long) dd->ipath_revision); ret = -ENOSYS; goto done; } dd->ipath_majrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_CHIPREVMAJOR_SHIFT) & INFINIPATH_R_CHIPREVMAJOR_MASK); dd->ipath_minrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT) & INFINIPATH_R_CHIPREVMINOR_MASK); dd->ipath_boardrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK); ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn); snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion), "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, " "SW Compat %u\n", IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn, (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) & INFINIPATH_R_ARCH_MASK, dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev, (unsigned)(dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK); ipath_dbg("%s", dd->ipath_boardversion); if (ret) goto done; if (reinit) ret = init_chip_reset(dd); else ret = init_chip_first(dd); done: return ret; } static void verify_interrupt(unsigned long opaque) { struct ipath_devdata *dd = (struct ipath_devdata *) opaque; if (!dd) return; /* being torn down */ /* * If we don't have any interrupts, let the user know and * don't bother checking again. */ if (dd->ipath_int_counter == 0) { if (!dd->ipath_f_intr_fallback(dd)) dev_err(&dd->pcidev->dev, "No interrupts detected, " "not usable.\n"); else /* re-arm the timer to see if fallback works */ mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2); } else ipath_cdbg(VERBOSE, "%u interrupts at timer check\n", dd->ipath_int_counter); } /** * ipath_init_chip - do the actual initialization sequence on the chip * @dd: the infinipath device * @reinit: reinitializing, so don't allocate new memory * * Do the actual initialization sequence on the chip. This is done * both from the init routine called from the PCI infrastructure, and * when we reset the chip, or detect that it was reset internally, * or it's administratively re-enabled. * * Memory allocation here and in called routines is only done in * the first case (reinit == 0). We have to be careful, because even * without memory allocation, we need to re-write all the chip registers * TIDs, etc. after the reset or enable has completed. */ int ipath_init_chip(struct ipath_devdata *dd, int reinit) { int ret = 0; u32 kpiobufs, defkbufs; u32 piobufs, uports; u64 val; struct ipath_portdata *pd; gfp_t gfp_flags = GFP_USER | __GFP_COMP; ret = init_housekeeping(dd, reinit); if (ret) goto done; /* * we ignore most issues after reporting them, but have to specially * handle hardware-disabled chips. */ if (ret == 2) { /* unique error, known to ipath_init_one */ ret = -EPERM; goto done; } /* * We could bump this to allow for full rcvegrcnt + rcvtidcnt, * but then it no longer nicely fits power of two, and since * we now use routines that backend onto __get_free_pages, the * rest would be wasted. */ dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt, dd->ipath_rcvhdrcnt); /* * Set up the shadow copies of the piobufavail registers, * which we compare against the chip registers for now, and * the in memory DMA'ed copies of the registers. This has to * be done early, before we calculate lastport, etc. */ piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; /* * calc number of pioavail registers, and save it; we have 2 * bits per buffer. */ dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / (sizeof(u64) * BITS_PER_BYTE / 2); uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; if (piobufs > 144) defkbufs = 32 + dd->ipath_pioreserved; else defkbufs = 16 + dd->ipath_pioreserved; if (ipath_kpiobufs && (ipath_kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) { int i = (int) piobufs - (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); if (i < 1) i = 1; dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " "%d for kernel leaves too few for %d user ports " "(%d each); using %u\n", ipath_kpiobufs, piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i); /* * shouldn't change ipath_kpiobufs, because could be * different for different devices... */ kpiobufs = i; } else if (ipath_kpiobufs) kpiobufs = ipath_kpiobufs; else kpiobufs = defkbufs; dd->ipath_lastport_piobuf = piobufs - kpiobufs; dd->ipath_pbufsport = uports ? dd->ipath_lastport_piobuf / uports : 0; /* if not an even divisor, some user ports get extra buffers */ dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports); if (dd->ipath_ports_extrabuf) ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to " "ports <= %u\n", dd->ipath_pbufsport, dd->ipath_ports_extrabuf); dd->ipath_lastpioindex = 0; dd->ipath_lastpioindexl = dd->ipath_piobcnt2k; /* ipath_pioavailshadow initialized earlier */ ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " "each for %u user ports\n", kpiobufs, piobufs, dd->ipath_pbufsport, uports); ret = dd->ipath_f_early_init(dd); if (ret) { ipath_dev_err(dd, "Early initialization failure\n"); goto done; } /* * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be * done after early_init. */ dd->ipath_hdrqlast = dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize, dd->ipath_rcvhdrentsize); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize, dd->ipath_rcvhdrsize); if (!reinit) { ret = init_pioavailregs(dd); init_shadow_tids(dd); if (ret) goto done; } ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, dd->ipath_pioavailregs_phys); /* * this is to detect s/w errors, which the h/w works around by * ignoring the low 6 bits of address, if it wasn't aligned. */ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr); if (val != dd->ipath_pioavailregs_phys) { ipath_dev_err(dd, "Catastrophic software error, " "SendPIOAvailAddr written as %lx, " "read back as %llx\n", (unsigned long) dd->ipath_pioavailregs_phys, (unsigned long long) val); ret = -EINVAL; goto done; } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP); /* * make sure we are not in freeze, and PIO send enabled, so * writes to pbc happen */ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); /* * before error clears, since we expect serdes pll errors during * this, the first time after reset */ if (bringup_link(dd)) { dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n"); ret = -ENETDOWN; goto done; } /* * clear any "expected" hwerrs from reset and/or initialization * clear any that aren't enabled (at least this once), and then * set the enable mask */ dd->ipath_f_init_hwerrors(dd); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); /* clear all */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL); /* enable errors that are masked, at least this first time. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, ~dd->ipath_maskederrs); dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */ dd->ipath_errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask); /* clear any interrupts up to this point (ints still not enabled) */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); dd->ipath_f_tidtemplate(dd); /* * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing * re-init, the simplest way to handle this is to free * existing, and re-allocate. * Need to re-create rest of port 0 portdata as well. */ pd = dd->ipath_pd[0]; if (reinit) { struct ipath_portdata *npd; /* * Alloc and init new ipath_portdata for port0, * Then free old pd. Could lead to fragmentation, but also * makes later support for hot-swap easier. */ npd = create_portdata0(dd); if (npd) { ipath_free_pddata(dd, pd); dd->ipath_pd[0] = npd; pd = npd; } else { ipath_dev_err(dd, "Unable to allocate portdata" " for port 0, failing\n"); ret = -ENOMEM; goto done; } } ret = ipath_create_rcvhdrq(dd, pd); if (!ret) ret = create_port0_egr(dd); if (ret) { ipath_dev_err(dd, "failed to allocate kernel port's " "rcvhdrq and/or egr bufs\n"); goto done; } else enable_chip(dd, reinit); /* after enable_chip, so pioavailshadow setup */ ipath_chg_pioavailkernel(dd, 0, piobufs, 1); /* * Cancel any possible active sends from early driver load. * Follows early_init because some chips have to initialize * PIO buffers in early_init to avoid false parity errors. * After enable and ipath_chg_pioavailkernel so we can safely * enable pioavail updates and PIOENABLE; packets are now * ready to go out. */ ipath_cancel_sends(dd, 1); if (!reinit) { /* * Used when we close a port, for DMA already in flight * at close. */ dd->ipath_dummy_hdrq = dma_alloc_coherent( &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size, &dd->ipath_dummy_hdrq_phys, gfp_flags); if (!dd->ipath_dummy_hdrq) { dev_info(&dd->pcidev->dev, "Couldn't allocate 0x%lx bytes for dummy hdrq\n", dd->ipath_pd[0]->port_rcvhdrq_size); /* fallback to just 0'ing */ dd->ipath_dummy_hdrq_phys = 0UL; } } /* * cause retrigger of pending interrupts ignored during init, * even if we had errors */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); if (!dd->ipath_stats_timer_active) { /* * first init, or after an admin disable/enable * set up stats retrieval timer, even if we had errors * in last portion of setup */ init_timer(&dd->ipath_stats_timer); dd->ipath_stats_timer.function = ipath_get_faststats; dd->ipath_stats_timer.data = (unsigned long) dd; /* every 5 seconds; */ dd->ipath_stats_timer.expires = jiffies + 5 * HZ; /* takes ~16 seconds to overflow at full IB 4x bandwdith */ add_timer(&dd->ipath_stats_timer); dd->ipath_stats_timer_active = 1; } /* Set up SendDMA if chip supports it */ if (dd->ipath_flags & IPATH_HAS_SEND_DMA) ret = setup_sdma(dd); /* Set up HoL state */ init_timer(&dd->ipath_hol_timer); dd->ipath_hol_timer.function = ipath_hol_event; dd->ipath_hol_timer.data = (unsigned long)dd; dd->ipath_hol_state = IPATH_HOL_UP; done: if (!ret) { *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; if (!dd->ipath_f_intrsetup(dd)) { /* now we can enable all interrupts from the chip */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL); /* force re-interrupt of any pending interrupts. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); /* chip is usable; mark it as initialized */ *dd->ipath_statusp |= IPATH_STATUS_INITTED; /* * setup to verify we get an interrupt, and fallback * to an alternate if necessary and possible */ if (!reinit) { init_timer(&dd->ipath_intrchk_timer); dd->ipath_intrchk_timer.function = verify_interrupt; dd->ipath_intrchk_timer.data = (unsigned long) dd; } dd->ipath_intrchk_timer.expires = jiffies + HZ/2; add_timer(&dd->ipath_intrchk_timer); } else ipath_dev_err(dd, "No interrupts enabled, couldn't " "setup interrupt address\n"); if (dd->ipath_cfgports > ipath_stats.sps_nports) /* * sps_nports is a global, so, we set it to * the highest number of ports of any of the * chips we find; we never decrement it, at * least for now. Since this might have changed * over disable/enable or prior to reset, always * do the check and potentially adjust. */ ipath_stats.sps_nports = dd->ipath_cfgports; } else ipath_dbg("Failed (%d) to initialize chip\n", ret); /* if ret is non-zero, we probably should do some cleanup here... */ return ret; } static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp) { struct ipath_devdata *dd; unsigned long flags; unsigned short val; int ret; ret = ipath_parse_ushort(str, &val); spin_lock_irqsave(&ipath_devs_lock, flags); if (ret < 0) goto bail; if (val == 0) { ret = -EINVAL; goto bail; } list_for_each_entry(dd, &ipath_dev_list, ipath_list) { if (dd->ipath_kregbase) continue; if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) { ipath_dev_err( dd, "Allocating %d PIO bufs for kernel leaves " "too few for %d user ports (%d each)\n", val, dd->ipath_cfgports - 1, IPATH_MIN_USER_PORT_BUFCNT); ret = -EINVAL; goto bail; } dd->ipath_lastport_piobuf = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val; } ipath_kpiobufs = val; ret = 0; bail: spin_unlock_irqrestore(&ipath_devs_lock, flags); return ret; }
gpl-2.0
RolanDroid/android_kernel_motorola_msm8226
drivers/input/mouse/touchkit_ps2.c
13169
3147
/* ---------------------------------------------------------------------------- * touchkit_ps2.c -- Driver for eGalax TouchKit PS/2 Touchscreens * * Copyright (C) 2005 by Stefan Lucke * Copyright (C) 2004 by Daniel Ritz * Copyright (C) by Todd E. Johnson (mtouchusb.c) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Based upon touchkitusb.c * * Vendor documentation is available at: * http://home.eeti.com.tw/web20/drivers/Software%20Programming%20Guide_v2.0.pdf */ #include <linux/kernel.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/libps2.h> #include "psmouse.h" #include "touchkit_ps2.h" #define TOUCHKIT_MAX_XC 0x07ff #define TOUCHKIT_MAX_YC 0x07ff #define TOUCHKIT_CMD 0x0a #define TOUCHKIT_CMD_LENGTH 1 #define TOUCHKIT_CMD_ACTIVE 'A' #define TOUCHKIT_CMD_FIRMWARE_VERSION 'D' #define TOUCHKIT_CMD_CONTROLLER_TYPE 'E' #define TOUCHKIT_SEND_PARMS(s, r, c) ((s) << 12 | (r) << 8 | (c)) #define TOUCHKIT_GET_TOUCHED(packet) (((packet)[0]) & 0x01) #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2]) #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4]) static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse) { unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; if (psmouse->pktcnt != 5) return PSMOUSE_GOOD_DATA; input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet)); input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet)); input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet)); input_sync(dev); return PSMOUSE_FULL_PACKET; } int touchkit_ps2_detect(struct psmouse *psmouse, bool set_properties) { struct input_dev *dev = psmouse->dev; unsigned char param[3]; int command; param[0] = TOUCHKIT_CMD_LENGTH; param[1] = TOUCHKIT_CMD_ACTIVE; command = TOUCHKIT_SEND_PARMS(2, 3, TOUCHKIT_CMD); if (ps2_command(&psmouse->ps2dev, param, command)) return -ENODEV; if (param[0] != TOUCHKIT_CMD || param[1] != 0x01 || param[2] != TOUCHKIT_CMD_ACTIVE) return -ENODEV; if (set_properties) { dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); dev->keybit[BIT_WORD(BTN_MOUSE)] = 0; dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(dev, ABS_X, 0, TOUCHKIT_MAX_XC, 0, 0); input_set_abs_params(dev, ABS_Y, 0, TOUCHKIT_MAX_YC, 0, 0); psmouse->vendor = "eGalax"; psmouse->name = "Touchscreen"; psmouse->protocol_handler = touchkit_ps2_process_byte; psmouse->pktsize = 5; } return 0; }
gpl-2.0
XileForce/Vindicator-S6-Unified
arch/mn10300/lib/negdi2.c
13937
1821
/* More subroutines needed by GCC output code on some machines. */ /* Compile this one with gcc. */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public Licence, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public Licence restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* It is incorrect to include config.h here, because this file is being compiled for the target, and hence definitions concerning only the host do not apply. */ #include <linux/types.h> union DWunion { s64 ll; struct { s32 low; s32 high; } s; }; s64 __negdi2(s64 u) { union DWunion w; union DWunion uu; uu.ll = u; w.s.low = -uu.s.low; w.s.high = -uu.s.high - ((u32) w.s.low > 0); return w.ll; }
gpl-2.0
goodhanrry/updaten_915s_to_lollipop
drivers/macintosh/windfarm_pid.c
14961
3751
/* * Windfarm PowerMac thermal control. Generic PID helpers * * (c) Copyright 2005 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/module.h> #include "windfarm_pid.h" #undef DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif void wf_pid_init(struct wf_pid_state *st, struct wf_pid_param *param) { memset(st, 0, sizeof(struct wf_pid_state)); st->param = *param; st->first = 1; } EXPORT_SYMBOL_GPL(wf_pid_init); s32 wf_pid_run(struct wf_pid_state *st, s32 new_sample) { s64 error, integ, deriv; s32 target; int i, hlen = st->param.history_len; /* Calculate error term */ error = new_sample - st->param.itarget; /* Get samples into our history buffer */ if (st->first) { for (i = 0; i < hlen; i++) { st->samples[i] = new_sample; st->errors[i] = error; } st->first = 0; st->index = 0; } else { st->index = (st->index + 1) % hlen; st->samples[st->index] = new_sample; st->errors[st->index] = error; } /* Calculate integral term */ for (i = 0, integ = 0; i < hlen; i++) integ += st->errors[(st->index + hlen - i) % hlen]; integ *= st->param.interval; /* Calculate derivative term */ deriv = st->errors[st->index] - st->errors[(st->index + hlen - 1) % hlen]; deriv /= st->param.interval; /* Calculate target */ target = (s32)((integ * (s64)st->param.gr + deriv * (s64)st->param.gd + error * (s64)st->param.gp) >> 36); if (st->param.additive) target += st->target; target = max(target, st->param.min); target = min(target, st->param.max); st->target = target; return st->target; } EXPORT_SYMBOL_GPL(wf_pid_run); void wf_cpu_pid_init(struct wf_cpu_pid_state *st, struct wf_cpu_pid_param *param) { memset(st, 0, sizeof(struct wf_cpu_pid_state)); st->param = *param; st->first = 1; } EXPORT_SYMBOL_GPL(wf_cpu_pid_init); s32 wf_cpu_pid_run(struct wf_cpu_pid_state *st, s32 new_power, s32 new_temp) { s64 integ, deriv, prop; s32 error, target, sval, adj; int i, hlen = st->param.history_len; /* Calculate error term */ error = st->param.pmaxadj - new_power; /* Get samples into our history buffer */ if (st->first) { for (i = 0; i < hlen; i++) { st->powers[i] = new_power; st->errors[i] = error; } st->temps[0] = st->temps[1] = new_temp; st->first = 0; st->index = st->tindex = 0; } else { st->index = (st->index + 1) % hlen; st->powers[st->index] = new_power; st->errors[st->index] = error; st->tindex = (st->tindex + 1) % 2; st->temps[st->tindex] = new_temp; } /* Calculate integral term */ for (i = 0, integ = 0; i < hlen; i++) integ += st->errors[(st->index + hlen - i) % hlen]; integ *= st->param.interval; integ *= st->param.gr; sval = st->param.tmax - (s32)(integ >> 20); adj = min(st->param.ttarget, sval); DBG("integ: %lx, sval: %lx, adj: %lx\n", integ, sval, adj); /* Calculate derivative term */ deriv = st->temps[st->tindex] - st->temps[(st->tindex + 2 - 1) % 2]; deriv /= st->param.interval; deriv *= st->param.gd; /* Calculate proportional term */ prop = st->last_delta = (new_temp - adj); prop *= st->param.gp; DBG("deriv: %lx, prop: %lx\n", deriv, prop); /* Calculate target */ target = st->target + (s32)((deriv + prop) >> 36); target = max(target, st->param.min); target = min(target, st->param.max); st->target = target; return st->target; } EXPORT_SYMBOL_GPL(wf_cpu_pid_run); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("PID algorithm for PowerMacs thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
RafaelRMachado/qtwebkit
Source/WebKit2/UIProcess/Downloads/DownloadProxyMap.cpp
114
2877
/* * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "DownloadProxyMap.h" #include "ChildProcessProxy.h" #include "DownloadProxy.h" #include "DownloadProxyMessages.h" #include "MessageReceiverMap.h" #include <wtf/StdLibExtras.h> namespace WebKit { DownloadProxyMap::DownloadProxyMap(ChildProcessProxy* process) : m_process(process) { } DownloadProxyMap::~DownloadProxyMap() { ASSERT(m_downloads.isEmpty()); } DownloadProxy* DownloadProxyMap::createDownloadProxy(WebContext* webContext) { RefPtr<DownloadProxy> downloadProxy = DownloadProxy::create(*this, webContext); m_downloads.set(downloadProxy->downloadID(), downloadProxy); m_process->addMessageReceiver(Messages::DownloadProxy::messageReceiverName(), downloadProxy->downloadID(), downloadProxy.get()); return downloadProxy.get(); } void DownloadProxyMap::downloadFinished(DownloadProxy* downloadProxy) { ASSERT(m_downloads.contains(downloadProxy->downloadID())); downloadProxy->invalidate(); m_downloads.remove(downloadProxy->downloadID()); m_process->removeMessageReceiver(Messages::DownloadProxy::messageReceiverName(), downloadProxy->downloadID()); } void DownloadProxyMap::processDidClose() { // Invalidate all outstanding downloads. for (HashMap<uint64_t, RefPtr<DownloadProxy> >::iterator::Values it = m_downloads.begin().values(), end = m_downloads.end().values(); it != end; ++it) { (*it)->processDidClose(); (*it)->invalidate(); } m_downloads.clear(); m_process = 0; } } // namespace WebKit
gpl-2.0
GuoqingJiang/linux
drivers/net/wan/hdlc_fr.c
114
30075
/* * Generic HDLC support routines for Linux * Frame Relay support * * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * Theory of PVC state DCE mode: (exist,new) -> 0,0 when "PVC create" or if "link unreliable" 0,x -> 1,1 if "link reliable" when sending FULL STATUS 1,1 -> 1,0 if received FULL STATUS ACK (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" -> 1 when "PVC up" and (exist,new) = 1,0 DTE mode: (exist,new,active) = FULL STATUS if "link reliable" = 0, 0, 0 if "link unreliable" No LMI: active = open and "link reliable" exist = new = not used CCITT LMI: ITU-T Q.933 Annex A ANSI LMI: ANSI T1.617 Annex D CISCO LMI: the original, aka "Gang of Four" LMI */ #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pkt_sched.h> #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/slab.h> #undef DEBUG_PKT #undef DEBUG_ECN #undef DEBUG_LINK #undef DEBUG_PROTO #undef DEBUG_PVC #define FR_UI 0x03 #define FR_PAD 0x00 #define NLPID_IP 0xCC #define NLPID_IPV6 0x8E #define NLPID_SNAP 0x80 #define NLPID_PAD 0x00 #define NLPID_CCITT_ANSI_LMI 0x08 #define NLPID_CISCO_LMI 0x09 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */ #define LMI_CISCO_DLCI 1023 #define LMI_CALLREF 0x00 /* Call Reference */ #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */ #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */ #define LMI_CCITT_REPTYPE 0x51 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */ #define LMI_CCITT_ALIVE 0x53 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */ #define LMI_CCITT_PVCSTAT 0x57 #define LMI_FULLREP 0x00 /* full report */ #define LMI_INTEGRITY 0x01 /* link integrity report */ #define LMI_SINGLE 0x02 /* single PVC report */ #define LMI_STATUS_ENQUIRY 0x75 #define LMI_STATUS 0x7D /* reply */ #define LMI_REPT_LEN 1 /* report type element length */ #define LMI_INTEG_LEN 2 /* link integrity element length */ #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */ #define LMI_ANSI_LENGTH 14 struct fr_hdr { #if defined(__LITTLE_ENDIAN_BITFIELD) unsigned ea1: 1; unsigned cr: 1; unsigned dlcih: 6; unsigned ea2: 1; unsigned de: 1; unsigned becn: 1; unsigned fecn: 1; unsigned dlcil: 4; #else unsigned dlcih: 6; unsigned cr: 1; unsigned ea1: 1; unsigned dlcil: 4; unsigned fecn: 1; unsigned becn: 1; unsigned de: 1; unsigned ea2: 1; #endif } __packed; struct pvc_device { struct net_device *frad; struct net_device *main; struct net_device *ether; /* bridged Ethernet interface */ struct pvc_device *next; /* Sorted in ascending DLCI order */ int dlci; int open_count; struct { unsigned int new: 1; unsigned int active: 1; unsigned int exist: 1; unsigned int deleted: 1; unsigned int fecn: 1; unsigned int becn: 1; unsigned int bandwidth; /* Cisco LMI reporting only */ }state; }; struct frad_state { fr_proto settings; struct pvc_device *first_pvc; int dce_pvc_count; struct timer_list timer; unsigned long last_poll; int reliable; int dce_changed; int request; int fullrep_sent; u32 last_errors; /* last errors bit list */ u8 n391cnt; u8 txseq; /* TX sequence number */ u8 rxseq; /* RX sequence number */ }; static int fr_ioctl(struct net_device *dev, struct ifreq *ifr); static inline u16 q922_to_dlci(u8 *hdr) { return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); } static inline void dlci_to_q922(u8 *hdr, u16 dlci) { hdr[0] = (dlci >> 2) & 0xFC; hdr[1] = ((dlci << 4) & 0xF0) | 0x01; } static inline struct frad_state* state(hdlc_device *hdlc) { return(struct frad_state *)(hdlc->state); } static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci) { struct pvc_device *pvc = state(hdlc)->first_pvc; while (pvc) { if (pvc->dlci == dlci) return pvc; if (pvc->dlci > dlci) return NULL; /* the list is sorted */ pvc = pvc->next; } return NULL; } static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci) { hdlc_device *hdlc = dev_to_hdlc(dev); struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc; while (*pvc_p) { if ((*pvc_p)->dlci == dlci) return *pvc_p; if ((*pvc_p)->dlci > dlci) break; /* the list is sorted */ pvc_p = &(*pvc_p)->next; } pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC); #ifdef DEBUG_PVC printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); #endif if (!pvc) return NULL; pvc->dlci = dlci; pvc->frad = dev; pvc->next = *pvc_p; /* Put it in the chain */ *pvc_p = pvc; return pvc; } static inline int pvc_is_used(struct pvc_device *pvc) { return pvc->main || pvc->ether; } static inline void pvc_carrier(int on, struct pvc_device *pvc) { if (on) { if (pvc->main) if (!netif_carrier_ok(pvc->main)) netif_carrier_on(pvc->main); if (pvc->ether) if (!netif_carrier_ok(pvc->ether)) netif_carrier_on(pvc->ether); } else { if (pvc->main) if (netif_carrier_ok(pvc->main)) netif_carrier_off(pvc->main); if (pvc->ether) if (netif_carrier_ok(pvc->ether)) netif_carrier_off(pvc->ether); } } static inline void delete_unused_pvcs(hdlc_device *hdlc) { struct pvc_device **pvc_p = &state(hdlc)->first_pvc; while (*pvc_p) { if (!pvc_is_used(*pvc_p)) { struct pvc_device *pvc = *pvc_p; #ifdef DEBUG_PVC printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc); #endif *pvc_p = pvc->next; kfree(pvc); continue; } pvc_p = &(*pvc_p)->next; } } static inline struct net_device **get_dev_p(struct pvc_device *pvc, int type) { if (type == ARPHRD_ETHER) return &pvc->ether; else return &pvc->main; } static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) { u16 head_len; struct sk_buff *skb = *skb_p; switch (skb->protocol) { case cpu_to_be16(NLPID_CCITT_ANSI_LMI): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_CCITT_ANSI_LMI; break; case cpu_to_be16(NLPID_CISCO_LMI): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_CISCO_LMI; break; case cpu_to_be16(ETH_P_IP): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IP; break; case cpu_to_be16(ETH_P_IPV6): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IPV6; break; case cpu_to_be16(ETH_P_802_3): head_len = 10; if (skb_headroom(skb) < head_len) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_len); if (!skb2) return -ENOBUFS; dev_kfree_skb(skb); skb = *skb_p = skb2; } skb_push(skb, head_len); skb->data[3] = FR_PAD; skb->data[4] = NLPID_SNAP; skb->data[5] = FR_PAD; skb->data[6] = 0x80; skb->data[7] = 0xC2; skb->data[8] = 0x00; skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ break; default: head_len = 10; skb_push(skb, head_len); skb->data[3] = FR_PAD; skb->data[4] = NLPID_SNAP; skb->data[5] = FR_PAD; skb->data[6] = FR_PAD; skb->data[7] = FR_PAD; *(__be16*)(skb->data + 8) = skb->protocol; } dlci_to_q922(skb->data, dlci); skb->data[2] = FR_UI; return 0; } static int pvc_open(struct net_device *dev) { struct pvc_device *pvc = dev->ml_priv; if ((pvc->frad->flags & IFF_UP) == 0) return -EIO; /* Frad must be UP in order to activate PVC */ if (pvc->open_count++ == 0) { hdlc_device *hdlc = dev_to_hdlc(pvc->frad); if (state(hdlc)->settings.lmi == LMI_NONE) pvc->state.active = netif_carrier_ok(pvc->frad); pvc_carrier(pvc->state.active, pvc); state(hdlc)->dce_changed = 1; } return 0; } static int pvc_close(struct net_device *dev) { struct pvc_device *pvc = dev->ml_priv; if (--pvc->open_count == 0) { hdlc_device *hdlc = dev_to_hdlc(pvc->frad); if (state(hdlc)->settings.lmi == LMI_NONE) pvc->state.active = 0; if (state(hdlc)->settings.dce) { state(hdlc)->dce_changed = 1; pvc->state.active = 0; } } return 0; } static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct pvc_device *pvc = dev->ml_priv; fr_proto_pvc_info info; if (ifr->ifr_settings.type == IF_GET_PROTO) { if (dev->type == ARPHRD_ETHER) ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; else ifr->ifr_settings.type = IF_PROTO_FR_PVC; if (ifr->ifr_settings.size < sizeof(info)) { /* data size wanted */ ifr->ifr_settings.size = sizeof(info); return -ENOBUFS; } info.dlci = pvc->dlci; memcpy(info.master, pvc->frad->name, IFNAMSIZ); if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, &info, sizeof(info))) return -EFAULT; return 0; } return -EINVAL; } static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) { struct pvc_device *pvc = dev->ml_priv; if (pvc->state.active) { if (dev->type == ARPHRD_ETHER) { int pad = ETH_ZLEN - skb->len; if (pad > 0) { /* Pad the frame with zeros */ int len = skb->len; if (skb_tailroom(skb) < pad) if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } skb_put(skb, pad); memset(skb->data + len, 0, pad); } skb->protocol = cpu_to_be16(ETH_P_802_3); } if (!fr_hard_header(&skb, pvc->dlci)) { dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; if (pvc->state.fecn) /* TX Congestion counter */ dev->stats.tx_compressed++; skb->dev = pvc->frad; dev_queue_xmit(skb); return NETDEV_TX_OK; } } dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static inline void fr_log_dlci_active(struct pvc_device *pvc) { netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n", pvc->dlci, pvc->main ? pvc->main->name : "", pvc->main && pvc->ether ? " " : "", pvc->ether ? pvc->ether->name : "", pvc->state.new ? " new" : "", !pvc->state.exist ? "deleted" : pvc->state.active ? "active" : "inactive"); } static inline u8 fr_lmi_nextseq(u8 x) { x++; return x ? x : 1; } static void fr_lmi_send(struct net_device *dev, int fullrep) { hdlc_device *hdlc = dev_to_hdlc(dev); struct sk_buff *skb; struct pvc_device *pvc = state(hdlc)->first_pvc; int lmi = state(hdlc)->settings.lmi; int dce = state(hdlc)->settings.dce; int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH; int stat_len = (lmi == LMI_CISCO) ? 6 : 3; u8 *data; int i = 0; if (dce && fullrep) { len += state(hdlc)->dce_pvc_count * (2 + stat_len); if (len > HDLC_MAX_MRU) { netdev_warn(dev, "Too many PVCs while sending LMI full report\n"); return; } } skb = dev_alloc_skb(len); if (!skb) { netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n"); return; } memset(skb->data, 0, len); skb_reserve(skb, 4); if (lmi == LMI_CISCO) { skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); fr_hard_header(&skb, LMI_CISCO_DLCI); } else { skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); } data = skb_tail_pointer(skb); data[i++] = LMI_CALLREF; data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; if (lmi == LMI_ANSI) data[i++] = LMI_ANSI_LOCKSHIFT; data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : LMI_ANSI_CISCO_REPTYPE; data[i++] = LMI_REPT_LEN; data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE; data[i++] = LMI_INTEG_LEN; data[i++] = state(hdlc)->txseq = fr_lmi_nextseq(state(hdlc)->txseq); data[i++] = state(hdlc)->rxseq; if (dce && fullrep) { while (pvc) { data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : LMI_ANSI_CISCO_PVCSTAT; data[i++] = stat_len; /* LMI start/restart */ if (state(hdlc)->reliable && !pvc->state.exist) { pvc->state.exist = pvc->state.new = 1; fr_log_dlci_active(pvc); } /* ifconfig PVC up */ if (pvc->open_count && !pvc->state.active && pvc->state.exist && !pvc->state.new) { pvc_carrier(1, pvc); pvc->state.active = 1; fr_log_dlci_active(pvc); } if (lmi == LMI_CISCO) { data[i] = pvc->dlci >> 8; data[i + 1] = pvc->dlci & 0xFF; } else { data[i] = (pvc->dlci >> 4) & 0x3F; data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80; data[i + 2] = 0x80; } if (pvc->state.new) data[i + 2] |= 0x08; else if (pvc->state.active) data[i + 2] |= 0x02; i += stat_len; pvc = pvc->next; } } skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; skb_reset_network_header(skb); dev_queue_xmit(skb); } static void fr_set_link_state(int reliable, struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); struct pvc_device *pvc = state(hdlc)->first_pvc; state(hdlc)->reliable = reliable; if (reliable) { netif_dormant_off(dev); state(hdlc)->n391cnt = 0; /* Request full status */ state(hdlc)->dce_changed = 1; if (state(hdlc)->settings.lmi == LMI_NONE) { while (pvc) { /* Activate all PVCs */ pvc_carrier(1, pvc); pvc->state.exist = pvc->state.active = 1; pvc->state.new = 0; pvc = pvc->next; } } } else { netif_dormant_on(dev); while (pvc) { /* Deactivate all PVCs */ pvc_carrier(0, pvc); pvc->state.exist = pvc->state.active = 0; pvc->state.new = 0; if (!state(hdlc)->settings.dce) pvc->state.bandwidth = 0; pvc = pvc->next; } } } static void fr_timer(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; hdlc_device *hdlc = dev_to_hdlc(dev); int i, cnt = 0, reliable; u32 list; if (state(hdlc)->settings.dce) { reliable = state(hdlc)->request && time_before(jiffies, state(hdlc)->last_poll + state(hdlc)->settings.t392 * HZ); state(hdlc)->request = 0; } else { state(hdlc)->last_errors <<= 1; /* Shift the list */ if (state(hdlc)->request) { if (state(hdlc)->reliable) netdev_info(dev, "No LMI status reply received\n"); state(hdlc)->last_errors |= 1; } list = state(hdlc)->last_errors; for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1) cnt += (list & 1); /* errors count */ reliable = (cnt < state(hdlc)->settings.n392); } if (state(hdlc)->reliable != reliable) { netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un"); fr_set_link_state(reliable, dev); } if (state(hdlc)->settings.dce) state(hdlc)->timer.expires = jiffies + state(hdlc)->settings.t392 * HZ; else { if (state(hdlc)->n391cnt) state(hdlc)->n391cnt--; fr_lmi_send(dev, state(hdlc)->n391cnt == 0); state(hdlc)->last_poll = jiffies; state(hdlc)->request = 1; state(hdlc)->timer.expires = jiffies + state(hdlc)->settings.t391 * HZ; } state(hdlc)->timer.function = fr_timer; state(hdlc)->timer.data = arg; add_timer(&state(hdlc)->timer); } static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) { hdlc_device *hdlc = dev_to_hdlc(dev); struct pvc_device *pvc; u8 rxseq, txseq; int lmi = state(hdlc)->settings.lmi; int dce = state(hdlc)->settings.dce; int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i; if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH)) { netdev_info(dev, "Short LMI frame\n"); return 1; } if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI : NLPID_CCITT_ANSI_LMI)) { netdev_info(dev, "Received non-LMI frame with LMI DLCI\n"); return 1; } if (skb->data[4] != LMI_CALLREF) { netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n", skb->data[4]); return 1; } if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) { netdev_info(dev, "Invalid LMI Message type (0x%02X)\n", skb->data[5]); return 1; } if (lmi == LMI_ANSI) { if (skb->data[6] != LMI_ANSI_LOCKSHIFT) { netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n", skb->data[6]); return 1; } i = 7; } else i = 6; if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : LMI_ANSI_CISCO_REPTYPE)) { netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n", skb->data[i]); return 1; } if (skb->data[++i] != LMI_REPT_LEN) { netdev_info(dev, "Invalid LMI Report type IE length (%u)\n", skb->data[i]); return 1; } reptype = skb->data[++i]; if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) { netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n", reptype); return 1; } if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE)) { netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n", skb->data[i]); return 1; } if (skb->data[++i] != LMI_INTEG_LEN) { netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n", skb->data[i]); return 1; } i++; state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */ rxseq = skb->data[i++]; /* Should confirm our sequence */ txseq = state(hdlc)->txseq; if (dce) state(hdlc)->last_poll = jiffies; error = 0; if (!state(hdlc)->reliable) error = 1; if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */ state(hdlc)->n391cnt = 0; error = 1; } if (dce) { if (state(hdlc)->fullrep_sent && !error) { /* Stop sending full report - the last one has been confirmed by DTE */ state(hdlc)->fullrep_sent = 0; pvc = state(hdlc)->first_pvc; while (pvc) { if (pvc->state.new) { pvc->state.new = 0; /* Tell DTE that new PVC is now active */ state(hdlc)->dce_changed = 1; } pvc = pvc->next; } } if (state(hdlc)->dce_changed) { reptype = LMI_FULLREP; state(hdlc)->fullrep_sent = 1; state(hdlc)->dce_changed = 0; } state(hdlc)->request = 1; /* got request */ fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); return 0; } /* DTE */ state(hdlc)->request = 0; /* got response, no request pending */ if (error) return 0; if (reptype != LMI_FULLREP) return 0; pvc = state(hdlc)->first_pvc; while (pvc) { pvc->state.deleted = 1; pvc = pvc->next; } no_ram = 0; while (skb->len >= i + 2 + stat_len) { u16 dlci; u32 bw; unsigned int active, new; if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : LMI_ANSI_CISCO_PVCSTAT)) { netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n", skb->data[i]); return 1; } if (skb->data[++i] != stat_len) { netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n", skb->data[i]); return 1; } i++; new = !! (skb->data[i + 2] & 0x08); active = !! (skb->data[i + 2] & 0x02); if (lmi == LMI_CISCO) { dlci = (skb->data[i] << 8) | skb->data[i + 1]; bw = (skb->data[i + 3] << 16) | (skb->data[i + 4] << 8) | (skb->data[i + 5]); } else { dlci = ((skb->data[i] & 0x3F) << 4) | ((skb->data[i + 1] & 0x78) >> 3); bw = 0; } pvc = add_pvc(dev, dlci); if (!pvc && !no_ram) { netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n"); no_ram = 1; } if (pvc) { pvc->state.exist = 1; pvc->state.deleted = 0; if (active != pvc->state.active || new != pvc->state.new || bw != pvc->state.bandwidth || !pvc->state.exist) { pvc->state.new = new; pvc->state.active = active; pvc->state.bandwidth = bw; pvc_carrier(active, pvc); fr_log_dlci_active(pvc); } } i += stat_len; } pvc = state(hdlc)->first_pvc; while (pvc) { if (pvc->state.deleted && pvc->state.exist) { pvc_carrier(0, pvc); pvc->state.active = pvc->state.new = 0; pvc->state.exist = 0; pvc->state.bandwidth = 0; fr_log_dlci_active(pvc); } pvc = pvc->next; } /* Next full report after N391 polls */ state(hdlc)->n391cnt = state(hdlc)->settings.n391; return 0; } static int fr_rx(struct sk_buff *skb) { struct net_device *frad = skb->dev; hdlc_device *hdlc = dev_to_hdlc(frad); struct fr_hdr *fh = (struct fr_hdr *)skb->data; u8 *data = skb->data; u16 dlci; struct pvc_device *pvc; struct net_device *dev = NULL; if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) goto rx_error; dlci = q922_to_dlci(skb->data); if ((dlci == LMI_CCITT_ANSI_DLCI && (state(hdlc)->settings.lmi == LMI_ANSI || state(hdlc)->settings.lmi == LMI_CCITT)) || (dlci == LMI_CISCO_DLCI && state(hdlc)->settings.lmi == LMI_CISCO)) { if (fr_lmi_recv(frad, skb)) goto rx_error; dev_kfree_skb_any(skb); return NET_RX_SUCCESS; } pvc = find_pvc(hdlc, dlci); if (!pvc) { #ifdef DEBUG_PKT netdev_info(frad, "No PVC for received frame's DLCI %d\n", dlci); #endif dev_kfree_skb_any(skb); return NET_RX_DROP; } if (pvc->state.fecn != fh->fecn) { #ifdef DEBUG_ECN printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name, dlci, fh->fecn ? "N" : "FF"); #endif pvc->state.fecn ^= 1; } if (pvc->state.becn != fh->becn) { #ifdef DEBUG_ECN printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name, dlci, fh->becn ? "N" : "FF"); #endif pvc->state.becn ^= 1; } if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { frad->stats.rx_dropped++; return NET_RX_DROP; } if (data[3] == NLPID_IP) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ dev = pvc->main; skb->protocol = htons(ETH_P_IP); } else if (data[3] == NLPID_IPV6) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ dev = pvc->main; skb->protocol = htons(ETH_P_IPV6); } else if (skb->len > 10 && data[3] == FR_PAD && data[4] == NLPID_SNAP && data[5] == FR_PAD) { u16 oui = ntohs(*(__be16*)(data + 6)); u16 pid = ntohs(*(__be16*)(data + 8)); skb_pull(skb, 10); switch ((((u32)oui) << 16) | pid) { case ETH_P_ARP: /* routed frame with SNAP */ case ETH_P_IPX: case ETH_P_IP: /* a long variant */ case ETH_P_IPV6: dev = pvc->main; skb->protocol = htons(pid); break; case 0x80C20007: /* bridged Ethernet frame */ if ((dev = pvc->ether) != NULL) skb->protocol = eth_type_trans(skb, dev); break; default: netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n", oui, pid); dev_kfree_skb_any(skb); return NET_RX_DROP; } } else { netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n", data[3], skb->len); dev_kfree_skb_any(skb); return NET_RX_DROP; } if (dev) { dev->stats.rx_packets++; /* PVC traffic */ dev->stats.rx_bytes += skb->len; if (pvc->state.becn) dev->stats.rx_compressed++; skb->dev = dev; netif_rx(skb); return NET_RX_SUCCESS; } else { dev_kfree_skb_any(skb); return NET_RX_DROP; } rx_error: frad->stats.rx_errors++; /* Mark error */ dev_kfree_skb_any(skb); return NET_RX_DROP; } static void fr_start(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); #ifdef DEBUG_LINK printk(KERN_DEBUG "fr_start\n"); #endif if (state(hdlc)->settings.lmi != LMI_NONE) { state(hdlc)->reliable = 0; state(hdlc)->dce_changed = 1; state(hdlc)->request = 0; state(hdlc)->fullrep_sent = 0; state(hdlc)->last_errors = 0xFFFFFFFF; state(hdlc)->n391cnt = 0; state(hdlc)->txseq = state(hdlc)->rxseq = 0; init_timer(&state(hdlc)->timer); /* First poll after 1 s */ state(hdlc)->timer.expires = jiffies + HZ; state(hdlc)->timer.function = fr_timer; state(hdlc)->timer.data = (unsigned long)dev; add_timer(&state(hdlc)->timer); } else fr_set_link_state(1, dev); } static void fr_stop(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); #ifdef DEBUG_LINK printk(KERN_DEBUG "fr_stop\n"); #endif if (state(hdlc)->settings.lmi != LMI_NONE) del_timer_sync(&state(hdlc)->timer); fr_set_link_state(0, dev); } static void fr_close(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); struct pvc_device *pvc = state(hdlc)->first_pvc; while (pvc) { /* Shutdown all PVCs for this FRAD */ if (pvc->main) dev_close(pvc->main); if (pvc->ether) dev_close(pvc->ether); pvc = pvc->next; } } static void pvc_setup(struct net_device *dev) { dev->type = ARPHRD_DLCI; dev->flags = IFF_POINTOPOINT; dev->hard_header_len = 10; dev->addr_len = 2; netif_keep_dst(dev); } static const struct net_device_ops pvc_ops = { .ndo_open = pvc_open, .ndo_stop = pvc_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = pvc_xmit, .ndo_do_ioctl = pvc_ioctl, }; static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) { hdlc_device *hdlc = dev_to_hdlc(frad); struct pvc_device *pvc; struct net_device *dev; int used; if ((pvc = add_pvc(frad, dlci)) == NULL) { netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n"); return -ENOBUFS; } if (*get_dev_p(pvc, type)) return -EEXIST; used = pvc_is_used(pvc); if (type == ARPHRD_ETHER) dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, ether_setup); else dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); if (!dev) { netdev_warn(frad, "Memory squeeze on fr_pvc()\n"); delete_unused_pvcs(hdlc); return -ENOBUFS; } if (type == ARPHRD_ETHER) { dev->priv_flags &= ~IFF_TX_SKB_SHARING; eth_hw_addr_random(dev); } else { *(__be16*)dev->dev_addr = htons(dlci); dlci_to_q922(dev->broadcast, dlci); } dev->netdev_ops = &pvc_ops; dev->mtu = HDLC_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; dev->ml_priv = pvc; if (register_netdevice(dev) != 0) { free_netdev(dev); delete_unused_pvcs(hdlc); return -EIO; } dev->destructor = free_netdev; *get_dev_p(pvc, type) = dev; if (!used) { state(hdlc)->dce_changed = 1; state(hdlc)->dce_pvc_count++; } return 0; } static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) { struct pvc_device *pvc; struct net_device *dev; if ((pvc = find_pvc(hdlc, dlci)) == NULL) return -ENOENT; if ((dev = *get_dev_p(pvc, type)) == NULL) return -ENOENT; if (dev->flags & IFF_UP) return -EBUSY; /* PVC in use */ unregister_netdevice(dev); /* the destructor will free_netdev(dev) */ *get_dev_p(pvc, type) = NULL; if (!pvc_is_used(pvc)) { state(hdlc)->dce_pvc_count--; state(hdlc)->dce_changed = 1; } delete_unused_pvcs(hdlc); return 0; } static void fr_destroy(struct net_device *frad) { hdlc_device *hdlc = dev_to_hdlc(frad); struct pvc_device *pvc = state(hdlc)->first_pvc; state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */ state(hdlc)->dce_pvc_count = 0; state(hdlc)->dce_changed = 1; while (pvc) { struct pvc_device *next = pvc->next; /* destructors will free_netdev() main and ether */ if (pvc->main) unregister_netdevice(pvc->main); if (pvc->ether) unregister_netdevice(pvc->ether); kfree(pvc); pvc = next; } } static struct hdlc_proto proto = { .close = fr_close, .start = fr_start, .stop = fr_stop, .detach = fr_destroy, .ioctl = fr_ioctl, .netif_rx = fr_rx, .module = THIS_MODULE, }; static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) { fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr; const size_t size = sizeof(fr_proto); fr_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); fr_proto_pvc pvc; int result; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ return -EINVAL; ifr->ifr_settings.type = IF_PROTO_FR; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(fr_s, &state(hdlc)->settings, size)) return -EFAULT; return 0; case IF_PROTO_FR: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&new_settings, fr_s, size)) return -EFAULT; if (new_settings.lmi == LMI_DEFAULT) new_settings.lmi = LMI_ANSI; if ((new_settings.lmi != LMI_NONE && new_settings.lmi != LMI_ANSI && new_settings.lmi != LMI_CCITT && new_settings.lmi != LMI_CISCO) || new_settings.t391 < 1 || new_settings.t392 < 2 || new_settings.n391 < 1 || new_settings.n392 < 1 || new_settings.n393 < new_settings.n392 || new_settings.n393 > 32 || (new_settings.dce != 0 && new_settings.dce != 1)) return -EINVAL; result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); if (result) return result; if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */ result = attach_hdlc_protocol(dev, &proto, sizeof(struct frad_state)); if (result) return result; state(hdlc)->first_pvc = NULL; state(hdlc)->dce_pvc_count = 0; } memcpy(&state(hdlc)->settings, &new_settings, size); dev->type = ARPHRD_FRAD; return 0; case IF_PROTO_FR_ADD_PVC: case IF_PROTO_FR_DEL_PVC: case IF_PROTO_FR_ADD_ETH_PVC: case IF_PROTO_FR_DEL_ETH_PVC: if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ return -EINVAL; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc, sizeof(fr_proto_pvc))) return -EFAULT; if (pvc.dlci <= 0 || pvc.dlci >= 1024) return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) result = ARPHRD_ETHER; /* bridged Ethernet device */ else result = ARPHRD_DLCI; if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) return fr_add_pvc(dev, pvc.dlci, result); else return fr_del_pvc(hdlc, pvc.dlci, result); } return -EINVAL; } static int __init mod_init(void) { register_hdlc_protocol(&proto); return 0; } static void __exit mod_exit(void) { unregister_hdlc_protocol(&proto); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); MODULE_LICENSE("GPL v2");
gpl-2.0
hgl888/linux
drivers/i2c/busses/i2c-bcm-iproc.c
114
15674
/* * Copyright (C) 2014 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #define CFG_OFFSET 0x00 #define CFG_RESET_SHIFT 31 #define CFG_EN_SHIFT 30 #define CFG_M_RETRY_CNT_SHIFT 16 #define CFG_M_RETRY_CNT_MASK 0x0f #define TIM_CFG_OFFSET 0x04 #define TIM_CFG_MODE_400_SHIFT 31 #define M_FIFO_CTRL_OFFSET 0x0c #define M_FIFO_RX_FLUSH_SHIFT 31 #define M_FIFO_TX_FLUSH_SHIFT 30 #define M_FIFO_RX_CNT_SHIFT 16 #define M_FIFO_RX_CNT_MASK 0x7f #define M_FIFO_RX_THLD_SHIFT 8 #define M_FIFO_RX_THLD_MASK 0x3f #define M_CMD_OFFSET 0x30 #define M_CMD_START_BUSY_SHIFT 31 #define M_CMD_STATUS_SHIFT 25 #define M_CMD_STATUS_MASK 0x07 #define M_CMD_STATUS_SUCCESS 0x0 #define M_CMD_STATUS_LOST_ARB 0x1 #define M_CMD_STATUS_NACK_ADDR 0x2 #define M_CMD_STATUS_NACK_DATA 0x3 #define M_CMD_STATUS_TIMEOUT 0x4 #define M_CMD_PROTOCOL_SHIFT 9 #define M_CMD_PROTOCOL_MASK 0xf #define M_CMD_PROTOCOL_BLK_WR 0x7 #define M_CMD_PROTOCOL_BLK_RD 0x8 #define M_CMD_PEC_SHIFT 8 #define M_CMD_RD_CNT_SHIFT 0 #define M_CMD_RD_CNT_MASK 0xff #define IE_OFFSET 0x38 #define IE_M_RX_FIFO_FULL_SHIFT 31 #define IE_M_RX_THLD_SHIFT 30 #define IE_M_START_BUSY_SHIFT 28 #define IE_M_TX_UNDERRUN_SHIFT 27 #define IS_OFFSET 0x3c #define IS_M_RX_FIFO_FULL_SHIFT 31 #define IS_M_RX_THLD_SHIFT 30 #define IS_M_START_BUSY_SHIFT 28 #define IS_M_TX_UNDERRUN_SHIFT 27 #define M_TX_OFFSET 0x40 #define M_TX_WR_STATUS_SHIFT 31 #define M_TX_DATA_SHIFT 0 #define M_TX_DATA_MASK 0xff #define M_RX_OFFSET 0x44 #define M_RX_STATUS_SHIFT 30 #define M_RX_STATUS_MASK 0x03 #define M_RX_PEC_ERR_SHIFT 29 #define M_RX_DATA_SHIFT 0 #define M_RX_DATA_MASK 0xff #define I2C_TIMEOUT_MSEC 50000 #define M_TX_RX_FIFO_SIZE 64 enum bus_speed_index { I2C_SPD_100K = 0, I2C_SPD_400K, }; struct bcm_iproc_i2c_dev { struct device *device; int irq; void __iomem *base; struct i2c_adapter adapter; unsigned int bus_speed; struct completion done; int xfer_is_done; struct i2c_msg *msg; /* bytes that have been transferred */ unsigned int tx_bytes; }; /* * Can be expanded in the future if more interrupt status bits are utilized */ #define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT)) static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) { struct bcm_iproc_i2c_dev *iproc_i2c = data; u32 status = readl(iproc_i2c->base + IS_OFFSET); status &= ISR_MASK; if (!status) return IRQ_NONE; /* TX FIFO is empty and we have more data to send */ if (status & BIT(IS_M_TX_UNDERRUN_SHIFT)) { struct i2c_msg *msg = iproc_i2c->msg; unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes; unsigned int i; u32 val; /* can only fill up to the FIFO size */ tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE); for (i = 0; i < tx_bytes; i++) { /* start from where we left over */ unsigned int idx = iproc_i2c->tx_bytes + i; val = msg->buf[idx]; /* mark the last byte */ if (idx == msg->len - 1) { u32 tmp; val |= BIT(M_TX_WR_STATUS_SHIFT); /* * Since this is the last byte, we should * now disable TX FIFO underrun interrupt */ tmp = readl(iproc_i2c->base + IE_OFFSET); tmp &= ~BIT(IE_M_TX_UNDERRUN_SHIFT); writel(tmp, iproc_i2c->base + IE_OFFSET); } /* load data into TX FIFO */ writel(val, iproc_i2c->base + M_TX_OFFSET); } /* update number of transferred bytes */ iproc_i2c->tx_bytes += tx_bytes; } if (status & BIT(IS_M_START_BUSY_SHIFT)) { iproc_i2c->xfer_is_done = 1; complete(&iproc_i2c->done); } writel(status, iproc_i2c->base + IS_OFFSET); return IRQ_HANDLED; } static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c) { u32 val; /* put controller in reset */ val = readl(iproc_i2c->base + CFG_OFFSET); val |= 1 << CFG_RESET_SHIFT; val &= ~(1 << CFG_EN_SHIFT); writel(val, iproc_i2c->base + CFG_OFFSET); /* wait 100 usec per spec */ udelay(100); /* bring controller out of reset */ val &= ~(1 << CFG_RESET_SHIFT); writel(val, iproc_i2c->base + CFG_OFFSET); /* flush TX/RX FIFOs and set RX FIFO threshold to zero */ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT); writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); /* disable all interrupts */ writel(0, iproc_i2c->base + IE_OFFSET); /* clear all pending interrupts */ writel(0xffffffff, iproc_i2c->base + IS_OFFSET); return 0; } static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, bool enable) { u32 val; val = readl(iproc_i2c->base + CFG_OFFSET); if (enable) val |= BIT(CFG_EN_SHIFT); else val &= ~BIT(CFG_EN_SHIFT); writel(val, iproc_i2c->base + CFG_OFFSET); } static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c, struct i2c_msg *msg) { u32 val; val = readl(iproc_i2c->base + M_CMD_OFFSET); val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK; switch (val) { case M_CMD_STATUS_SUCCESS: return 0; case M_CMD_STATUS_LOST_ARB: dev_dbg(iproc_i2c->device, "lost bus arbitration\n"); return -EAGAIN; case M_CMD_STATUS_NACK_ADDR: dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr); return -ENXIO; case M_CMD_STATUS_NACK_DATA: dev_dbg(iproc_i2c->device, "NAK data\n"); return -ENXIO; case M_CMD_STATUS_TIMEOUT: dev_dbg(iproc_i2c->device, "bus timeout\n"); return -ETIMEDOUT; default: dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val); /* re-initialize i2c for recovery */ bcm_iproc_i2c_enable_disable(iproc_i2c, false); bcm_iproc_i2c_init(iproc_i2c); bcm_iproc_i2c_enable_disable(iproc_i2c, true); return -EIO; } } static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c, struct i2c_msg *msg) { int ret, i; u8 addr; u32 val; unsigned int tx_bytes; unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC); /* check if bus is busy */ if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) { dev_warn(iproc_i2c->device, "bus is busy\n"); return -EBUSY; } iproc_i2c->msg = msg; /* format and load slave address into the TX FIFO */ addr = i2c_8bit_addr_from_msg(msg); writel(addr, iproc_i2c->base + M_TX_OFFSET); /* * For a write transaction, load data into the TX FIFO. Only allow * loading up to TX FIFO size - 1 bytes of data since the first byte * has been used up by the slave address */ tx_bytes = min_t(unsigned int, msg->len, M_TX_RX_FIFO_SIZE - 1); if (!(msg->flags & I2C_M_RD)) { for (i = 0; i < tx_bytes; i++) { val = msg->buf[i]; /* mark the last byte */ if (i == msg->len - 1) val |= 1 << M_TX_WR_STATUS_SHIFT; writel(val, iproc_i2c->base + M_TX_OFFSET); } iproc_i2c->tx_bytes = tx_bytes; } /* mark as incomplete before starting the transaction */ reinit_completion(&iproc_i2c->done); iproc_i2c->xfer_is_done = 0; /* * Enable the "start busy" interrupt, which will be triggered after the * transaction is done, i.e., the internal start_busy bit, transitions * from 1 to 0. */ val = BIT(IE_M_START_BUSY_SHIFT); /* * If TX data size is larger than the TX FIFO, need to enable TX * underrun interrupt, which will be triggerred when the TX FIFO is * empty. When that happens we can then pump more data into the FIFO */ if (!(msg->flags & I2C_M_RD) && msg->len > iproc_i2c->tx_bytes) val |= BIT(IE_M_TX_UNDERRUN_SHIFT); writel(val, iproc_i2c->base + IE_OFFSET); /* * Now we can activate the transfer. For a read operation, specify the * number of bytes to read */ val = BIT(M_CMD_START_BUSY_SHIFT); if (msg->flags & I2C_M_RD) { val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) | (msg->len << M_CMD_RD_CNT_SHIFT); } else { val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT); } writel(val, iproc_i2c->base + M_CMD_OFFSET); time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left); /* disable all interrupts */ writel(0, iproc_i2c->base + IE_OFFSET); /* read it back to flush the write */ readl(iproc_i2c->base + IE_OFFSET); /* make sure the interrupt handler isn't running */ synchronize_irq(iproc_i2c->irq); if (!time_left && !iproc_i2c->xfer_is_done) { dev_err(iproc_i2c->device, "transaction timed out\n"); /* flush FIFOs */ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT); writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); return -ETIMEDOUT; } ret = bcm_iproc_i2c_check_status(iproc_i2c, msg); if (ret) { /* flush both TX/RX FIFOs */ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT); writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); return ret; } /* * For a read operation, we now need to load the data from FIFO * into the memory buffer */ if (msg->flags & I2C_M_RD) { for (i = 0; i < msg->len; i++) { msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK; } } return 0; } static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) { struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter); int ret, i; /* go through all messages */ for (i = 0; i < num; i++) { ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]); if (ret) { dev_dbg(iproc_i2c->device, "xfer failed\n"); return ret; } } return num; } static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm bcm_iproc_algo = { .master_xfer = bcm_iproc_i2c_xfer, .functionality = bcm_iproc_i2c_functionality, }; static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = { /* need to reserve one byte in the FIFO for the slave address */ .max_read_len = M_TX_RX_FIFO_SIZE - 1, }; static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c) { unsigned int bus_speed; u32 val; int ret = of_property_read_u32(iproc_i2c->device->of_node, "clock-frequency", &bus_speed); if (ret < 0) { dev_info(iproc_i2c->device, "unable to interpret clock-frequency DT property\n"); bus_speed = 100000; } if (bus_speed < 100000) { dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n", bus_speed); dev_err(iproc_i2c->device, "valid speeds are 100khz and 400khz\n"); return -EINVAL; } else if (bus_speed < 400000) { bus_speed = 100000; } else { bus_speed = 400000; } iproc_i2c->bus_speed = bus_speed; val = readl(iproc_i2c->base + TIM_CFG_OFFSET); val &= ~(1 << TIM_CFG_MODE_400_SHIFT); val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT; writel(val, iproc_i2c->base + TIM_CFG_OFFSET); dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed); return 0; } static int bcm_iproc_i2c_probe(struct platform_device *pdev) { int irq, ret = 0; struct bcm_iproc_i2c_dev *iproc_i2c; struct i2c_adapter *adap; struct resource *res; iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c), GFP_KERNEL); if (!iproc_i2c) return -ENOMEM; platform_set_drvdata(pdev, iproc_i2c); iproc_i2c->device = &pdev->dev; init_completion(&iproc_i2c->done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res); if (IS_ERR(iproc_i2c->base)) return PTR_ERR(iproc_i2c->base); ret = bcm_iproc_i2c_init(iproc_i2c); if (ret) return ret; ret = bcm_iproc_i2c_cfg_speed(iproc_i2c); if (ret) return ret; irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(iproc_i2c->device, "no irq resource\n"); return irq; } iproc_i2c->irq = irq; ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0, pdev->name, iproc_i2c); if (ret < 0) { dev_err(iproc_i2c->device, "unable to request irq %i\n", irq); return ret; } bcm_iproc_i2c_enable_disable(iproc_i2c, true); adap = &iproc_i2c->adapter; i2c_set_adapdata(adap, iproc_i2c); strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name)); adap->algo = &bcm_iproc_algo; adap->quirks = &bcm_iproc_i2c_quirks; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; return i2c_add_adapter(adap); } static int bcm_iproc_i2c_remove(struct platform_device *pdev) { struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); /* make sure there's no pending interrupt when we remove the adapter */ writel(0, iproc_i2c->base + IE_OFFSET); readl(iproc_i2c->base + IE_OFFSET); synchronize_irq(iproc_i2c->irq); i2c_del_adapter(&iproc_i2c->adapter); bcm_iproc_i2c_enable_disable(iproc_i2c, false); return 0; } #ifdef CONFIG_PM_SLEEP static int bcm_iproc_i2c_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); /* make sure there's no pending interrupt when we go into suspend */ writel(0, iproc_i2c->base + IE_OFFSET); readl(iproc_i2c->base + IE_OFFSET); synchronize_irq(iproc_i2c->irq); /* now disable the controller */ bcm_iproc_i2c_enable_disable(iproc_i2c, false); return 0; } static int bcm_iproc_i2c_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); int ret; u32 val; /* * Power domain could have been shut off completely in system deep * sleep, so re-initialize the block here */ ret = bcm_iproc_i2c_init(iproc_i2c); if (ret) return ret; /* configure to the desired bus speed */ val = readl(iproc_i2c->base + TIM_CFG_OFFSET); val &= ~(1 << TIM_CFG_MODE_400_SHIFT); val |= (iproc_i2c->bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT; writel(val, iproc_i2c->base + TIM_CFG_OFFSET); bcm_iproc_i2c_enable_disable(iproc_i2c, true); return 0; } static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = { .suspend_late = &bcm_iproc_i2c_suspend, .resume_early = &bcm_iproc_i2c_resume }; #define BCM_IPROC_I2C_PM_OPS (&bcm_iproc_i2c_pm_ops) #else #define BCM_IPROC_I2C_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static const struct of_device_id bcm_iproc_i2c_of_match[] = { { .compatible = "brcm,iproc-i2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match); static struct platform_driver bcm_iproc_i2c_driver = { .driver = { .name = "bcm-iproc-i2c", .of_match_table = bcm_iproc_i2c_of_match, .pm = BCM_IPROC_I2C_PM_OPS, }, .probe = bcm_iproc_i2c_probe, .remove = bcm_iproc_i2c_remove, }; module_platform_driver(bcm_iproc_i2c_driver); MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>"); MODULE_DESCRIPTION("Broadcom iProc I2C Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
KylinUI/android_kernel_htc_m7
drivers/media/rc/keymaps/rc-videomate-m1f.c
370
2216
/* videomate-k100.h - Keytable for videomate_k100 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Pavel Osnova <pvosnova@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table videomate_k100[] = { { 0x01, KEY_POWER }, { 0x31, KEY_TUNER }, { 0x33, KEY_VIDEO }, { 0x2f, KEY_RADIO }, { 0x30, KEY_CAMERA }, { 0x2d, KEY_NEW }, { 0x17, KEY_CYCLEWINDOWS }, { 0x2c, KEY_ANGLE }, { 0x2b, KEY_LANGUAGE }, { 0x32, KEY_SEARCH }, { 0x11, KEY_UP }, { 0x13, KEY_LEFT }, { 0x15, KEY_OK }, { 0x14, KEY_RIGHT }, { 0x12, KEY_DOWN }, { 0x16, KEY_BACKSPACE }, { 0x02, KEY_ZOOM }, { 0x04, KEY_INFO }, { 0x05, KEY_VOLUMEUP }, { 0x03, KEY_MUTE }, { 0x07, KEY_CHANNELUP }, { 0x06, KEY_VOLUMEDOWN }, { 0x08, KEY_CHANNELDOWN }, { 0x0c, KEY_RECORD }, { 0x0e, KEY_STOP }, { 0x0a, KEY_BACK }, { 0x0b, KEY_PLAY }, { 0x09, KEY_FORWARD }, { 0x10, KEY_PREVIOUS }, { 0x0d, KEY_PAUSE }, { 0x0f, KEY_NEXT }, { 0x1e, KEY_1 }, { 0x1f, KEY_2 }, { 0x20, KEY_3 }, { 0x21, KEY_4 }, { 0x22, KEY_5 }, { 0x23, KEY_6 }, { 0x24, KEY_7 }, { 0x25, KEY_8 }, { 0x26, KEY_9 }, { 0x2a, KEY_NUMERIC_STAR }, { 0x1d, KEY_0 }, { 0x29, KEY_SUBTITLE }, { 0x27, KEY_CLEAR }, { 0x34, KEY_SCREEN }, { 0x28, KEY_ENTER }, { 0x19, KEY_RED }, { 0x1a, KEY_GREEN }, { 0x1b, KEY_YELLOW }, { 0x1c, KEY_BLUE }, { 0x18, KEY_TEXT }, }; static struct rc_map_list videomate_k100_map = { .map = { .scan = videomate_k100, .size = ARRAY_SIZE(videomate_k100), .rc_type = RC_TYPE_UNKNOWN, .name = RC_MAP_VIDEOMATE_K100, } }; static int __init init_rc_map_videomate_k100(void) { return rc_map_register(&videomate_k100_map); } static void __exit exit_rc_map_videomate_k100(void) { rc_map_unregister(&videomate_k100_map); } module_init(init_rc_map_videomate_k100) module_exit(exit_rc_map_videomate_k100) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pavel Osnova <pvosnova@gmail.com>");
gpl-2.0
flar2/m7-GPE-4.4.3
drivers/media/rc/ir-rc6-decoder.c
370
6343
/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol * * Copyright (C) 2010 by David Härdeman <david@hardeman.nu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "rc-core-priv.h" #include <linux/module.h> #define RC6_UNIT 444444 #define RC6_HEADER_NBITS 4 #define RC6_0_NBITS 16 #define RC6_6A_32_NBITS 32 #define RC6_6A_NBITS 128 #define RC6_PREFIX_PULSE (6 * RC6_UNIT) #define RC6_PREFIX_SPACE (2 * RC6_UNIT) #define RC6_BIT_START (1 * RC6_UNIT) #define RC6_BIT_END (1 * RC6_UNIT) #define RC6_TOGGLE_START (2 * RC6_UNIT) #define RC6_TOGGLE_END (2 * RC6_UNIT) #define RC6_SUFFIX_SPACE (6 * RC6_UNIT) #define RC6_MODE_MASK 0x07 #define RC6_STARTBIT_MASK 0x08 #define RC6_6A_MCE_TOGGLE_MASK 0x8000 #define RC6_6A_LCC_MASK 0xffff0000 #define RC6_6A_MCE_CC 0x800f0000 #ifndef CHAR_BIT #define CHAR_BIT 8 #endif enum rc6_mode { RC6_MODE_0, RC6_MODE_6A, RC6_MODE_UNKNOWN, }; enum rc6_state { STATE_INACTIVE, STATE_PREFIX_SPACE, STATE_HEADER_BIT_START, STATE_HEADER_BIT_END, STATE_TOGGLE_START, STATE_TOGGLE_END, STATE_BODY_BIT_START, STATE_BODY_BIT_END, STATE_FINISHED, }; static enum rc6_mode rc6_mode(struct rc6_dec *data) { switch (data->header & RC6_MODE_MASK) { case 0: return RC6_MODE_0; case 6: if (!data->toggle) return RC6_MODE_6A; default: return RC6_MODE_UNKNOWN; } } static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct rc6_dec *data = &dev->raw->rc6; u32 scancode; u8 toggle; if (!(dev->raw->enabled_protocols & RC_TYPE_RC6)) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) goto out; again: IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) return 0; switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT)) break; data->state = STATE_PREFIX_SPACE; data->count = 0; return 0; case STATE_PREFIX_SPACE: if (ev.pulse) break; if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2)) break; data->state = STATE_HEADER_BIT_START; data->header = 0; return 0; case STATE_HEADER_BIT_START: if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) break; data->header <<= 1; if (ev.pulse) data->header |= 1; data->count++; data->state = STATE_HEADER_BIT_END; return 0; case STATE_HEADER_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == RC6_HEADER_NBITS) data->state = STATE_TOGGLE_START; else data->state = STATE_HEADER_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_TOGGLE_START: if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2)) break; data->toggle = ev.pulse; data->state = STATE_TOGGLE_END; return 0; case STATE_TOGGLE_END: if (!is_transition(&ev, &dev->raw->prev_ev) || !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2)) break; if (!(data->header & RC6_STARTBIT_MASK)) { IR_dprintk(1, "RC6 invalid start bit\n"); break; } data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_TOGGLE_END); data->count = 0; data->body = 0; switch (rc6_mode(data)) { case RC6_MODE_0: data->wanted_bits = RC6_0_NBITS; break; case RC6_MODE_6A: data->wanted_bits = RC6_6A_NBITS; break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } goto again; case STATE_BODY_BIT_START: if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) { if (data->count++ < CHAR_BIT * sizeof data->body) { data->body <<= 1; if (ev.pulse) data->body |= 1; } data->state = STATE_BODY_BIT_END; return 0; } else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse && geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) { data->state = STATE_FINISHED; goto again; } break; case STATE_BODY_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == data->wanted_bits) data->state = STATE_FINISHED; else data->state = STATE_BODY_BIT_START; decrease_duration(&ev, RC6_BIT_END); goto again; case STATE_FINISHED: if (ev.pulse) break; switch (rc6_mode(data)) { case RC6_MODE_0: scancode = data->body; toggle = data->toggle; IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n", scancode, toggle); break; case RC6_MODE_6A: if (data->count > CHAR_BIT * sizeof data->body) { IR_dprintk(1, "RC6 too many (%u) data bits\n", data->count); goto out; } scancode = data->body; if (data->count == RC6_6A_32_NBITS && (scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0; scancode &= ~RC6_6A_MCE_TOGGLE_MASK; } else { toggle = 0; } IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n", scancode, toggle); break; default: IR_dprintk(1, "RC6 unknown mode\n"); goto out; } rc_keydown(dev, scancode, toggle); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler rc6_handler = { .protocols = RC_TYPE_RC6, .decode = ir_rc6_decode, }; static int __init ir_rc6_decode_init(void) { ir_raw_handler_register(&rc6_handler); printk(KERN_INFO "IR RC6 protocol handler initialized\n"); return 0; } static void __exit ir_rc6_decode_exit(void) { ir_raw_handler_unregister(&rc6_handler); } module_init(ir_rc6_decode_init); module_exit(ir_rc6_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Härdeman <david@hardeman.nu>"); MODULE_DESCRIPTION("RC6 IR protocol decoder");
gpl-2.0
zhmz90/linux
net/ieee802154/nl-mac.c
626
35369
/* * Netlink interface for IEEE 802.15.4 stack * * Copyright 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Maxim Osipov <maxim.osipov@siemens.com> */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/ieee802154.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/sock.h> #include <linux/nl802154.h> #include <linux/export.h> #include <net/af_ieee802154.h> #include <net/ieee802154_netdev.h> #include <net/cfg802154.h> #include "ieee802154.h" static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr) { return nla_put_u64(msg, type, swab64((__force u64)hwaddr)); } static __le64 nla_get_hwaddr(const struct nlattr *nla) { return ieee802154_devaddr_from_raw(nla_data(nla)); } static int nla_put_shortaddr(struct sk_buff *msg, int type, __le16 addr) { return nla_put_u16(msg, type, le16_to_cpu(addr)); } static __le16 nla_get_shortaddr(const struct nlattr *nla) { return cpu_to_le16(nla_get_u16(nla)); } static int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_START_CONF); if (!msg) return -ENOBUFS; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr) || nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) goto nla_put_failure; return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct net_device *dev) { void *hdr; struct wpan_phy *phy; struct ieee802154_mlme_ops *ops; __le16 short_addr, pan_id; pr_debug("%s\n", __func__); hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, IEEE802154_LIST_IFACE); if (!hdr) goto out; ops = ieee802154_mlme_ops(dev); phy = dev->ieee802154_ptr->wpan_phy; BUG_ON(!phy); get_device(&phy->dev); rtnl_lock(); short_addr = dev->ieee802154_ptr->short_addr; pan_id = dev->ieee802154_ptr->pan_id; rtnl_unlock(); if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr) || nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, pan_id)) goto nla_put_failure; if (ops->get_mac_params) { struct ieee802154_mac_params params; rtnl_lock(); ops->get_mac_params(dev, &params); rtnl_unlock(); if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, params.transmit_power / 100) || nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) || nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, params.cca.mode) || nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, params.cca_ed_level / 100) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, params.csma_retries) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, params.min_be) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE, params.max_be) || nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES, params.frame_retries)) goto nla_put_failure; } wpan_phy_put(phy); genlmsg_end(msg, hdr); return 0; nla_put_failure: wpan_phy_put(phy); genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; } /* Requests from userspace */ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) { struct net_device *dev; if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { char name[IFNAMSIZ + 1]; nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME], sizeof(name)); dev = dev_get_by_name(&init_net, name); } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) { dev = dev_get_by_index(&init_net, nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); } else { return NULL; } if (!dev) return NULL; if (dev->type != ARPHRD_IEEE802154) { dev_put(dev); return NULL; } return dev; } int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; u8 page; int ret = -EOPNOTSUPP; if (!info->attrs[IEEE802154_ATTR_CHANNEL] || !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] && !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) || !info->attrs[IEEE802154_ATTR_CAPABILITY]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!ieee802154_mlme_ops(dev)->assoc_req) goto out; if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) { addr.mode = IEEE802154_ADDR_LONG; addr.extended_addr = nla_get_hwaddr( info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]); } else { addr.mode = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); } addr.pan_id = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); if (info->attrs[IEEE802154_ATTR_PAGE]) page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); else page = 0; ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), page, nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); out: dev_put(dev); return ret; } int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; int ret = -EOPNOTSUPP; if (!info->attrs[IEEE802154_ATTR_STATUS] || !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] || !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!ieee802154_mlme_ops(dev)->assoc_resp) goto out; addr.mode = IEEE802154_ADDR_LONG; addr.extended_addr = nla_get_hwaddr( info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]); rtnl_lock(); addr.pan_id = dev->ieee802154_ptr->pan_id; rtnl_unlock(); ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS])); out: dev_put(dev); return ret; } int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; int ret = -EOPNOTSUPP; if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] && !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) || !info->attrs[IEEE802154_ATTR_REASON]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!ieee802154_mlme_ops(dev)->disassoc_req) goto out; if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) { addr.mode = IEEE802154_ADDR_LONG; addr.extended_addr = nla_get_hwaddr( info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]); } else { addr.mode = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); } rtnl_lock(); addr.pan_id = dev->ieee802154_ptr->pan_id; rtnl_unlock(); ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); out: dev_put(dev); return ret; } /* PANid, channel, beacon_order = 15, superframe_order = 15, * PAN_coordinator, battery_life_extension = 0, * coord_realignment = 0, security_enable = 0 */ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; u8 channel, bcn_ord, sf_ord; u8 page; int pan_coord, blx, coord_realign; int ret = -EBUSY; if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] || !info->attrs[IEEE802154_ATTR_CHANNEL] || !info->attrs[IEEE802154_ATTR_BCN_ORD] || !info->attrs[IEEE802154_ATTR_SF_ORD] || !info->attrs[IEEE802154_ATTR_PAN_COORD] || !info->attrs[IEEE802154_ATTR_BAT_EXT] || !info->attrs[IEEE802154_ATTR_COORD_REALIGN] ) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (netif_running(dev)) goto out; if (!ieee802154_mlme_ops(dev)->start_req) { ret = -EOPNOTSUPP; goto out; } addr.mode = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); addr.pan_id = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]); bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]); sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]); pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]); blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); if (info->attrs[IEEE802154_ATTR_PAGE]) page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); else page = 0; if (addr.short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) { ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS); dev_put(dev); return -EINVAL; } rtnl_lock(); ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, bcn_ord, sf_ord, pan_coord, blx, coord_realign); rtnl_unlock(); /* FIXME: add validation for unused parameters to be sane * for SoftMAC */ ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS); out: dev_put(dev); return ret; } int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; int ret = -EOPNOTSUPP; u8 type; u32 channels; u8 duration; u8 page; if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || !info->attrs[IEEE802154_ATTR_CHANNELS] || !info->attrs[IEEE802154_ATTR_DURATION]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!ieee802154_mlme_ops(dev)->scan_req) goto out; type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]); channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); if (info->attrs[IEEE802154_ATTR_PAGE]) page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); else page = 0; ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page, duration); out: dev_put(dev); return ret; } int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info) { /* Request for interface name, index, type, IEEE address, * PAN Id, short address */ struct sk_buff *msg; struct net_device *dev = NULL; int rc = -ENOBUFS; pr_debug("%s\n", __func__); dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) goto out_dev; rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq, 0, dev); if (rc < 0) goto out_free; dev_put(dev); return genlmsg_reply(msg, info); out_free: nlmsg_free(msg); out_dev: dev_put(dev); return rc; } int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct net_device *dev; int idx; int s_idx = cb->args[0]; pr_debug("%s\n", __func__); idx = 0; for_each_netdev(net, dev) { if (idx < s_idx || dev->type != ARPHRD_IEEE802154) goto cont; if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) break; cont: idx++; } cb->args[0] = idx; return skb->len; } int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev = NULL; struct ieee802154_mlme_ops *ops; struct ieee802154_mac_params params; struct wpan_phy *phy; int rc = -EINVAL; pr_debug("%s\n", __func__); dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; ops = ieee802154_mlme_ops(dev); if (!ops->get_mac_params || !ops->set_mac_params) { rc = -EOPNOTSUPP; goto out; } if (netif_running(dev)) { rc = -EBUSY; goto out; } if (!info->attrs[IEEE802154_ATTR_LBT_ENABLED] && !info->attrs[IEEE802154_ATTR_CCA_MODE] && !info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL] && !info->attrs[IEEE802154_ATTR_CSMA_RETRIES] && !info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] && !info->attrs[IEEE802154_ATTR_CSMA_MAX_BE] && !info->attrs[IEEE802154_ATTR_FRAME_RETRIES]) goto out; phy = dev->ieee802154_ptr->wpan_phy; get_device(&phy->dev); rtnl_lock(); ops->get_mac_params(dev, &params); if (info->attrs[IEEE802154_ATTR_TXPOWER]) params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100; if (info->attrs[IEEE802154_ATTR_LBT_ENABLED]) params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]); if (info->attrs[IEEE802154_ATTR_CCA_MODE]) params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]); if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100; if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES]) params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]); if (info->attrs[IEEE802154_ATTR_CSMA_MIN_BE]) params.min_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MIN_BE]); if (info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]) params.max_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]); if (info->attrs[IEEE802154_ATTR_FRAME_RETRIES]) params.frame_retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]); rc = ops->set_mac_params(dev, &params); rtnl_unlock(); wpan_phy_put(phy); dev_put(dev); return 0; out: dev_put(dev); return rc; } static int ieee802154_llsec_parse_key_id(struct genl_info *info, struct ieee802154_llsec_key_id *desc) { memset(desc, 0, sizeof(*desc)); if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) return -EINVAL; desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]); if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) { if (!info->attrs[IEEE802154_ATTR_PAN_ID] && !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] || info->attrs[IEEE802154_ATTR_HW_ADDR])) return -EINVAL; desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]); if (info->attrs[IEEE802154_ATTR_SHORT_ADDR]) { desc->device_addr.mode = IEEE802154_ADDR_SHORT; desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]); } else { desc->device_addr.mode = IEEE802154_ADDR_LONG; desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); } } if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT && !info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]) return -EINVAL; if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX && !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]) return -EINVAL; if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX && !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]) return -EINVAL; if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT) desc->id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]); switch (desc->mode) { case IEEE802154_SCF_KEY_SHORT_INDEX: { u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]); desc->short_source = cpu_to_le32(source); break; } case IEEE802154_SCF_KEY_HW_INDEX: desc->extended_source = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]); break; } return 0; } static int ieee802154_llsec_fill_key_id(struct sk_buff *msg, const struct ieee802154_llsec_key_id *desc) { if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode)) return -EMSGSIZE; if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) { if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->device_addr.pan_id)) return -EMSGSIZE; if (desc->device_addr.mode == IEEE802154_ADDR_SHORT && nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, desc->device_addr.short_addr)) return -EMSGSIZE; if (desc->device_addr.mode == IEEE802154_ADDR_LONG && nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->device_addr.extended_addr)) return -EMSGSIZE; } if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT && nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id)) return -EMSGSIZE; if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX && nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT, le32_to_cpu(desc->short_source))) return -EMSGSIZE; if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX && nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED, desc->extended_source)) return -EMSGSIZE; return 0; } int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct net_device *dev = NULL; int rc = -ENOBUFS; struct ieee802154_mlme_ops *ops; void *hdr; struct ieee802154_llsec_params params; pr_debug("%s\n", __func__); dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; ops = ieee802154_mlme_ops(dev); if (!ops->llsec) { rc = -EOPNOTSUPP; goto out_dev; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) goto out_dev; hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0, IEEE802154_LLSEC_GETPARAMS); if (!hdr) goto out_free; rc = ops->llsec->get_params(dev, &params); if (rc < 0) goto out_free; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, be32_to_cpu(params.frame_counter)) || ieee802154_llsec_fill_key_id(msg, &params.out_key)) goto out_free; dev_put(dev); return ieee802154_nl_reply(msg, info); out_free: nlmsg_free(msg); out_dev: dev_put(dev); return rc; } int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev = NULL; int rc = -EINVAL; struct ieee802154_mlme_ops *ops; struct ieee802154_llsec_params params; int changed = 0; pr_debug("%s\n", __func__); dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] && !info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] && !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) goto out; ops = ieee802154_mlme_ops(dev); if (!ops->llsec) { rc = -EOPNOTSUPP; goto out; } if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] && nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7) goto out; if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) { params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]); changed |= IEEE802154_LLSEC_PARAM_ENABLED; } if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) { if (ieee802154_llsec_parse_key_id(info, &params.out_key)) goto out; changed |= IEEE802154_LLSEC_PARAM_OUT_KEY; } if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) { params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]); changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL; } if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) { u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]); params.frame_counter = cpu_to_be32(fc); changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER; } rc = ops->llsec->set_params(dev, &params, changed); dev_put(dev); return rc; out: dev_put(dev); return rc; } struct llsec_dump_data { struct sk_buff *skb; int s_idx, s_idx2; int portid; int nlmsg_seq; struct net_device *dev; struct ieee802154_mlme_ops *ops; struct ieee802154_llsec_table *table; }; static int ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb, int (*step)(struct llsec_dump_data *)) { struct net *net = sock_net(skb->sk); struct net_device *dev; struct llsec_dump_data data; int idx = 0; int first_dev = cb->args[0]; int rc; for_each_netdev(net, dev) { if (idx < first_dev || dev->type != ARPHRD_IEEE802154) goto skip; data.ops = ieee802154_mlme_ops(dev); if (!data.ops->llsec) goto skip; data.skb = skb; data.s_idx = cb->args[1]; data.s_idx2 = cb->args[2]; data.dev = dev; data.portid = NETLINK_CB(cb->skb).portid; data.nlmsg_seq = cb->nlh->nlmsg_seq; data.ops->llsec->lock_table(dev); data.ops->llsec->get_table(data.dev, &data.table); rc = step(&data); data.ops->llsec->unlock_table(dev); if (rc < 0) break; skip: idx++; } cb->args[0] = idx; return skb->len; } static int ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info, int (*fn)(struct net_device*, struct genl_info*)) { struct net_device *dev = NULL; int rc = -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!ieee802154_mlme_ops(dev)->llsec) rc = -EOPNOTSUPP; else rc = fn(dev, info); dev_put(dev); return rc; } static int ieee802154_llsec_parse_key(struct genl_info *info, struct ieee802154_llsec_key *key) { u8 frames; u32 commands[256 / 32]; memset(key, 0, sizeof(*key)); if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] || !info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES]) return -EINVAL; frames = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES]); if ((frames & BIT(IEEE802154_FC_TYPE_MAC_CMD)) && !info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) return -EINVAL; if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) { nla_memcpy(commands, info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS], 256 / 8); if (commands[0] || commands[1] || commands[2] || commands[3] || commands[4] || commands[5] || commands[6] || commands[7] >= BIT(IEEE802154_CMD_GTS_REQ + 1)) return -EINVAL; key->cmd_frame_ids = commands[7]; } key->frame_types = frames; nla_memcpy(key->key, info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES], IEEE802154_LLSEC_KEY_SIZE); return 0; } static int llsec_add_key(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct ieee802154_llsec_key key; struct ieee802154_llsec_key_id id; if (ieee802154_llsec_parse_key(info, &key) || ieee802154_llsec_parse_key_id(info, &id)) return -EINVAL; return ops->llsec->add_key(dev, &id, &key); } int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info) { if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) != (NLM_F_CREATE | NLM_F_EXCL)) return -EINVAL; return ieee802154_nl_llsec_change(skb, info, llsec_add_key); } static int llsec_remove_key(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct ieee802154_llsec_key_id id; if (ieee802154_llsec_parse_key_id(info, &id)) return -EINVAL; return ops->llsec->del_key(dev, &id); } int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info) { return ieee802154_nl_llsec_change(skb, info, llsec_remove_key); } static int ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq, const struct ieee802154_llsec_key_entry *key, const struct net_device *dev) { void *hdr; u32 commands[256 / 32]; hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, IEEE802154_LLSEC_LIST_KEY); if (!hdr) goto out; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || ieee802154_llsec_fill_key_id(msg, &key->id) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES, key->key->frame_types)) goto nla_put_failure; if (key->key->frame_types & BIT(IEEE802154_FC_TYPE_MAC_CMD)) { memset(commands, 0, sizeof(commands)); commands[7] = key->key->cmd_frame_ids; if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS, sizeof(commands), commands)) goto nla_put_failure; } if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES, IEEE802154_LLSEC_KEY_SIZE, key->key->key)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; } static int llsec_iter_keys(struct llsec_dump_data *data) { struct ieee802154_llsec_key_entry *pos; int rc = 0, idx = 0; list_for_each_entry(pos, &data->table->keys, list) { if (idx++ < data->s_idx) continue; if (ieee802154_nl_fill_key(data->skb, data->portid, data->nlmsg_seq, pos, data->dev)) { rc = -EMSGSIZE; break; } data->s_idx++; } return rc; } int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb) { return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys); } static int llsec_parse_dev(struct genl_info *info, struct ieee802154_llsec_device *dev) { memset(dev, 0, sizeof(*dev)); if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] || !info->attrs[IEEE802154_ATTR_HW_ADDR] || !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] || !info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] || (!!info->attrs[IEEE802154_ATTR_PAN_ID] != !!info->attrs[IEEE802154_ATTR_SHORT_ADDR])) return -EINVAL; if (info->attrs[IEEE802154_ATTR_PAN_ID]) { dev->pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]); dev->short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]); } else { dev->short_addr = cpu_to_le16(IEEE802154_ADDR_UNDEF); } dev->hwaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); dev->frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]); dev->seclevel_exempt = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]); dev->key_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE]); if (dev->key_mode >= __IEEE802154_LLSEC_DEVKEY_MAX) return -EINVAL; return 0; } static int llsec_add_dev(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct ieee802154_llsec_device desc; if (llsec_parse_dev(info, &desc)) return -EINVAL; return ops->llsec->add_dev(dev, &desc); } int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info) { if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) != (NLM_F_CREATE | NLM_F_EXCL)) return -EINVAL; return ieee802154_nl_llsec_change(skb, info, llsec_add_dev); } static int llsec_del_dev(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); __le64 devaddr; if (!info->attrs[IEEE802154_ATTR_HW_ADDR]) return -EINVAL; devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); return ops->llsec->del_dev(dev, devaddr); } int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info) { return ieee802154_nl_llsec_change(skb, info, llsec_del_dev); } static int ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq, const struct ieee802154_llsec_device *desc, const struct net_device *dev) { void *hdr; hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, IEEE802154_LLSEC_LIST_DEV); if (!hdr) goto out; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) || nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, desc->short_addr) || nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) || nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, desc->frame_counter) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, desc->seclevel_exempt) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; } static int llsec_iter_devs(struct llsec_dump_data *data) { struct ieee802154_llsec_device *pos; int rc = 0, idx = 0; list_for_each_entry(pos, &data->table->devices, list) { if (idx++ < data->s_idx) continue; if (ieee802154_nl_fill_dev(data->skb, data->portid, data->nlmsg_seq, pos, data->dev)) { rc = -EMSGSIZE; break; } data->s_idx++; } return rc; } int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb) { return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs); } static int llsec_add_devkey(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct ieee802154_llsec_device_key key; __le64 devaddr; if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] || !info->attrs[IEEE802154_ATTR_HW_ADDR] || ieee802154_llsec_parse_key_id(info, &key.key_id)) return -EINVAL; devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); key.frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]); return ops->llsec->add_devkey(dev, devaddr, &key); } int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info) { if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) != (NLM_F_CREATE | NLM_F_EXCL)) return -EINVAL; return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey); } static int llsec_del_devkey(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct ieee802154_llsec_device_key key; __le64 devaddr; if (!info->attrs[IEEE802154_ATTR_HW_ADDR] || ieee802154_llsec_parse_key_id(info, &key.key_id)) return -EINVAL; devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); return ops->llsec->del_devkey(dev, devaddr, &key); } int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info) { return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey); } static int ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq, __le64 devaddr, const struct ieee802154_llsec_device_key *devkey, const struct net_device *dev) { void *hdr; hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, IEEE802154_LLSEC_LIST_DEVKEY); if (!hdr) goto out; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) || nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, devkey->frame_counter) || ieee802154_llsec_fill_key_id(msg, &devkey->key_id)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; } static int llsec_iter_devkeys(struct llsec_dump_data *data) { struct ieee802154_llsec_device *dpos; struct ieee802154_llsec_device_key *kpos; int rc = 0, idx = 0, idx2; list_for_each_entry(dpos, &data->table->devices, list) { if (idx++ < data->s_idx) continue; idx2 = 0; list_for_each_entry(kpos, &dpos->keys, list) { if (idx2++ < data->s_idx2) continue; if (ieee802154_nl_fill_devkey(data->skb, data->portid, data->nlmsg_seq, dpos->hwaddr, kpos, data->dev)) { return rc = -EMSGSIZE; } data->s_idx2++; } data->s_idx++; } return rc; } int ieee802154_llsec_dump_devkeys(struct sk_buff *skb, struct netlink_callback *cb) { return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys); } static int llsec_parse_seclevel(struct genl_info *info, struct ieee802154_llsec_seclevel *sl) { memset(sl, 0, sizeof(*sl)); if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE] || !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS] || !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]) return -EINVAL; sl->frame_type = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE]); if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD) { if (!info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]) return -EINVAL; sl->cmd_frame_id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]); } sl->sec_levels = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS]); sl->device_override = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]); return 0; } static int llsec_add_seclevel(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct ieee802154_llsec_seclevel sl; if (llsec_parse_seclevel(info, &sl)) return -EINVAL; return ops->llsec->add_seclevel(dev, &sl); } int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info) { if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) != (NLM_F_CREATE | NLM_F_EXCL)) return -EINVAL; return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel); } static int llsec_del_seclevel(struct net_device *dev, struct genl_info *info) { struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct ieee802154_llsec_seclevel sl; if (llsec_parse_seclevel(info, &sl)) return -EINVAL; return ops->llsec->del_seclevel(dev, &sl); } int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info) { return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel); } static int ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq, const struct ieee802154_llsec_seclevel *sl, const struct net_device *dev) { void *hdr; hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI, IEEE802154_LLSEC_LIST_SECLEVEL); if (!hdr) goto out; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, sl->device_override)) goto nla_put_failure; if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD && nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID, sl->cmd_frame_id)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; } static int llsec_iter_seclevels(struct llsec_dump_data *data) { struct ieee802154_llsec_seclevel *pos; int rc = 0, idx = 0; list_for_each_entry(pos, &data->table->security_levels, list) { if (idx++ < data->s_idx) continue; if (ieee802154_nl_fill_seclevel(data->skb, data->portid, data->nlmsg_seq, pos, data->dev)) { rc = -EMSGSIZE; break; } data->s_idx++; } return rc; } int ieee802154_llsec_dump_seclevels(struct sk_buff *skb, struct netlink_callback *cb) { return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels); }
gpl-2.0
Perferom/android_kernel_lge_msm7x27
fs/lockd/clntproc.c
882
21321
/* * linux/fs/lockd/clntproc.c * * RPC procedures for the client side NLM implementation * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/smp_lock.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/nfs_fs.h> #include <linux/utsname.h> #include <linux/freezer.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_CLIENT #define NLMCLNT_GRACE_WAIT (5*HZ) #define NLMCLNT_POLL_TIMEOUT (30*HZ) #define NLMCLNT_MAX_RETRIES 3 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); static int nlm_stat_to_errno(__be32 stat); static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); static const struct rpc_call_ops nlmclnt_unlock_ops; static const struct rpc_call_ops nlmclnt_cancel_ops; /* * Cookie counter for NLM requests */ static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); void nlmclnt_next_cookie(struct nlm_cookie *c) { u32 cookie = atomic_inc_return(&nlm_cookie); memcpy(c->data, &cookie, 4); c->len=4; } static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) { atomic_inc(&lockowner->count); return lockowner; } static void nlm_put_lockowner(struct nlm_lockowner *lockowner) { if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) return; list_del(&lockowner->list); spin_unlock(&lockowner->host->h_lock); nlm_release_host(lockowner->host); kfree(lockowner); } static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) { struct nlm_lockowner *lockowner; list_for_each_entry(lockowner, &host->h_lockowners, list) { if (lockowner->pid == pid) return -EBUSY; } return 0; } static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) { uint32_t res; do { res = host->h_pidcount++; } while (nlm_pidbusy(host, res) < 0); return res; } static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) { struct nlm_lockowner *lockowner; list_for_each_entry(lockowner, &host->h_lockowners, list) { if (lockowner->owner != owner) continue; return nlm_get_lockowner(lockowner); } return NULL; } static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) { struct nlm_lockowner *res, *new = NULL; spin_lock(&host->h_lock); res = __nlm_find_lockowner(host, owner); if (res == NULL) { spin_unlock(&host->h_lock); new = kmalloc(sizeof(*new), GFP_KERNEL); spin_lock(&host->h_lock); res = __nlm_find_lockowner(host, owner); if (res == NULL && new != NULL) { res = new; atomic_set(&new->count, 1); new->owner = owner; new->pid = __nlm_alloc_pid(host); new->host = nlm_get_host(host); list_add(&new->list, &host->h_lockowners); new = NULL; } } spin_unlock(&host->h_lock); kfree(new); return res; } /* * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls */ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_args *argp = &req->a_args; struct nlm_lock *lock = &argp->lock; nlmclnt_next_cookie(&argp->cookie); memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh)); lock->caller = utsname()->nodename; lock->oh.data = req->a_owner; lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", (unsigned int)fl->fl_u.nfs_fl.owner->pid, utsname()->nodename); lock->svid = fl->fl_u.nfs_fl.owner->pid; lock->fl.fl_start = fl->fl_start; lock->fl.fl_end = fl->fl_end; lock->fl.fl_type = fl->fl_type; } static void nlmclnt_release_lockargs(struct nlm_rqst *req) { BUG_ON(req->a_args.lock.fl.fl_ops != NULL); } /** * nlmclnt_proc - Perform a single client-side lock request * @host: address of a valid nlm_host context representing the NLM server * @cmd: fcntl-style file lock operation to perform * @fl: address of arguments for the lock operation * */ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) { struct nlm_rqst *call; int status; nlm_get_host(host); call = nlm_alloc_call(host); if (call == NULL) return -ENOMEM; nlmclnt_locks_init_private(fl, host); /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); lock_kernel(); if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (fl->fl_type != F_UNLCK) { call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; status = nlmclnt_lock(call, fl); } else status = nlmclnt_unlock(call, fl); } else if (IS_GETLK(cmd)) status = nlmclnt_test(call, fl); else status = -EINVAL; fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; unlock_kernel(); dprintk("lockd: clnt proc returns %d\n", status); return status; } EXPORT_SYMBOL_GPL(nlmclnt_proc); /* * Allocate an NLM RPC call struct * * Note: the caller must hold a reference to host. In case of failure, * this reference will be released. */ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) { struct nlm_rqst *call; for(;;) { call = kzalloc(sizeof(*call), GFP_KERNEL); if (call != NULL) { atomic_set(&call->a_count, 1); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); call->a_host = host; return call; } if (signalled()) break; printk("nlm_alloc_call: failed, waiting for memory\n"); schedule_timeout_interruptible(5*HZ); } nlm_release_host(host); return NULL; } void nlm_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; nlm_release_host(call->a_host); nlmclnt_release_lockargs(call); kfree(call); } static void nlmclnt_rpc_release(void *data) { lock_kernel(); nlm_release_call(data); unlock_kernel(); } static int nlm_wait_on_grace(wait_queue_head_t *queue) { DEFINE_WAIT(wait); int status = -EINTR; prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); if (!signalled ()) { schedule_timeout(NLMCLNT_GRACE_WAIT); try_to_freeze(); if (!signalled ()) status = 0; } finish_wait(queue, &wait); return status; } /* * Generic NLM call */ static int nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; struct rpc_message msg = { .rpc_argp = argp, .rpc_resp = resp, .rpc_cred = cred, }; int status; dprintk("lockd: call procedure %d on %s\n", (int)proc, host->h_name); do { if (host->h_reclaiming && !argp->reclaim) goto in_grace_period; /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* Perform the RPC call. If an error occurs, try again */ if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { dprintk("lockd: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: nlm_rebind_host(host); status = -EAGAIN; break; case -ERESTARTSYS: return signalled () ? -EINTR : status; default: break; } break; } else if (resp->status == nlm_lck_denied_grace_period) { dprintk("lockd: server in grace period\n"); if (argp->reclaim) { printk(KERN_WARNING "lockd: spurious grace period reject?!\n"); return -ENOLCK; } } else { if (!argp->reclaim) { /* We appear to be out of the grace period */ wake_up_all(&host->h_gracewait); } dprintk("lockd: server returns status %d\n", resp->status); return 0; /* Okay, call complete */ } in_grace_period: /* * The server has rebooted and appears to be in the grace * period during which locks are only allowed to be * reclaimed. * We can only back off and try again later. */ status = nlm_wait_on_grace(&host->h_gracewait); } while (status == 0); return status; } /* * Generic NLM call, async version. */ static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_task_setup task_setup_data = { .rpc_message = msg, .callback_ops = tk_ops, .callback_data = req, .flags = RPC_TASK_ASYNC, }; dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); /* If we have no RPC client yet, create one. */ clnt = nlm_bind_host(host); if (clnt == NULL) goto out_err; msg->rpc_proc = &clnt->cl_procinfo[proc]; task_setup_data.rpc_client = clnt; /* bootstrap and kick off the async RPC call */ return rpc_run_task(&task_setup_data); out_err: tk_ops->rpc_release(req); return ERR_PTR(-ENOLCK); } static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; task = __nlm_async_call(req, proc, msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } /* * NLM asynchronous call. */ int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, }; return nlm_do_async_call(req, proc, &msg, tk_ops); } int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_res, }; return nlm_do_async_call(req, proc, &msg, tk_ops); } /* * NLM client asynchronous call. * * Note that although the calls are asynchronous, and are therefore * guaranteed to complete, we still always attempt to wait for * completion in order to be able to correctly track the lock * state. */ static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, .rpc_cred = cred, }; struct rpc_task *task; int err; task = __nlm_async_call(req, proc, &msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); err = rpc_wait_for_completion_task(task); rpc_put_task(task); return err; } /* * TEST for the presence of a conflicting lock */ static int nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) { int status; status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); if (status < 0) goto out; switch (req->a_res.status) { case nlm_granted: fl->fl_type = F_UNLCK; break; case nlm_lck_denied: /* * Report the conflicting lock back to the application. */ fl->fl_start = req->a_res.lock.fl.fl_start; fl->fl_end = req->a_res.lock.fl.fl_end; fl->fl_type = req->a_res.lock.fl.fl_type; fl->fl_pid = 0; break; default: status = nlm_stat_to_errno(req->a_res.status); } out: nlm_release_call(req); return status; } static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) { new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); } static void nlmclnt_locks_release_private(struct file_lock *fl) { list_del(&fl->fl_u.nfs_fl.list); nlm_put_lockowner(fl->fl_u.nfs_fl.owner); } static const struct file_lock_operations nlmclnt_lock_ops = { .fl_copy_lock = nlmclnt_locks_copy_lock, .fl_release_private = nlmclnt_locks_release_private, }; static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) { BUG_ON(fl->fl_ops != NULL); fl->fl_u.nfs_fl.state = 0; fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); fl->fl_ops = &nlmclnt_lock_ops; } static int do_vfs_lock(struct file_lock *fl) { int res = 0; switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { case FL_POSIX: res = posix_lock_file_wait(fl->fl_file, fl); break; case FL_FLOCK: res = flock_lock_file_wait(fl->fl_file, fl); break; default: BUG(); } return res; } /* * LOCK: Try to create a lock * * Programmer Harassment Alert * * When given a blocking lock request in a sync RPC call, the HPUX lockd * will faithfully return LCK_BLOCKED but never cares to notify us when * the lock could be granted. This way, our local process could hang * around forever waiting for the callback. * * Solution A: Implement busy-waiting * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) * * For now I am implementing solution A, because I hate the idea of * re-implementing lockd for a third time in two months. The async * calls shouldn't be too hard to do, however. * * This is one of the lovely things about standards in the NFS area: * they're so soft and squishy you can't really blame HP for doing this. */ static int nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) { struct rpc_cred *cred = nfs_file_cred(fl->fl_file); struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_wait *block = NULL; unsigned char fl_flags = fl->fl_flags; unsigned char fl_type; int status = -ENOLCK; if (nsm_monitor(host) < 0) goto out; req->a_args.state = nsm_local_state; fl->fl_flags |= FL_ACCESS; status = do_vfs_lock(fl); fl->fl_flags = fl_flags; if (status < 0) goto out; block = nlmclnt_prepare_block(host, fl); again: /* * Initialise resp->status to a valid non-zero value, * since 0 == nlm_lck_granted */ resp->status = nlm_lck_blocked; for(;;) { /* Reboot protection */ fl->fl_u.nfs_fl.state = host->h_state; status = nlmclnt_call(cred, req, NLMPROC_LOCK); if (status < 0) break; /* Did a reclaimer thread notify us of a server reboot? */ if (resp->status == nlm_lck_denied_grace_period) continue; if (resp->status != nlm_lck_blocked) break; /* Wait on an NLM blocking lock */ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); if (status < 0) break; if (resp->status != nlm_lck_blocked) break; } /* if we were interrupted while blocking, then cancel the lock request * and exit */ if (resp->status == nlm_lck_blocked) { if (!req->a_args.block) goto out_unlock; if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) goto out_unblock; } if (resp->status == nlm_granted) { down_read(&host->h_rwsem); /* Check whether or not the server has rebooted */ if (fl->fl_u.nfs_fl.state != host->h_state) { up_read(&host->h_rwsem); goto again; } /* Ensure the resulting lock will get added to granted list */ fl->fl_flags |= FL_SLEEP; if (do_vfs_lock(fl) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; status = 0; } if (status < 0) goto out_unlock; /* * EAGAIN doesn't make sense for sleeping locks, and in some * cases NLM_LCK_DENIED is returned for a permanent error. So * turn it into an ENOLCK. */ if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP)) status = -ENOLCK; else status = nlm_stat_to_errno(resp->status); out_unblock: nlmclnt_finish_block(block); out: nlm_release_call(req); return status; out_unlock: /* Fatal error: ensure that we remove the lock altogether */ dprintk("lockd: lock attempt ended in fatal error.\n" " Attempting to unlock.\n"); nlmclnt_finish_block(block); fl_type = fl->fl_type; fl->fl_type = F_UNLCK; down_read(&host->h_rwsem); do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_type = fl_type; fl->fl_flags = fl_flags; nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); return status; } /* * RECLAIM: Try to reclaim a lock */ int nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) { struct nlm_rqst reqst, *req; int status; req = &reqst; memset(req, 0, sizeof(*req)); locks_init_lock(&req->a_args.lock.fl); locks_init_lock(&req->a_res.lock.fl); req->a_host = host; req->a_flags = 0; /* Set up the argument struct */ nlmclnt_setlockargs(req, fl); req->a_args.reclaim = 1; status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); if (status >= 0 && req->a_res.status == nlm_granted) return 0; printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " "(errno %d, status %d)\n", fl->fl_pid, status, ntohl(req->a_res.status)); /* * FIXME: This is a serious failure. We can * * a. Ignore the problem * b. Send the owning process some signal (Linux doesn't have * SIGLOST, though...) * c. Retry the operation * * Until someone comes up with a simple implementation * for b or c, I'll choose option a. */ return -ENOLCK; } /* * UNLOCK: remove an existing lock */ static int nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; int status; unsigned char fl_flags = fl->fl_flags; /* * Note: the server is supposed to either grant us the unlock * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either * case, we want to unlock. */ fl->fl_flags |= FL_EXISTS; down_read(&host->h_rwsem); status = do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; if (status == -ENOENT) { status = 0; goto out; } atomic_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); if (status < 0) goto out; if (resp->status == nlm_granted) goto out; if (resp->status != nlm_lck_denied_nolocks) printk("lockd: unexpected unlock status: %d\n", resp->status); /* What to do now? I'm out of my depth... */ status = -ENOLCK; out: nlm_release_call(req); return status; } static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; u32 status = ntohl(req->a_res.status); if (RPC_ASSASSINATED(task)) goto die; if (task->tk_status < 0) { dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); goto retry_rebind; } if (status == NLM_LCK_DENIED_GRACE_PERIOD) { rpc_delay(task, NLMCLNT_GRACE_WAIT); goto retry_unlock; } if (status != NLM_LCK_GRANTED) printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); die: return; retry_rebind: lock_kernel(); nlm_rebind_host(req->a_host); unlock_kernel(); retry_unlock: rpc_restart_call(task); } static const struct rpc_call_ops nlmclnt_unlock_ops = { .rpc_call_done = nlmclnt_unlock_callback, .rpc_release = nlmclnt_rpc_release, }; /* * Cancel a blocked lock request. * We always use an async RPC call for this in order not to hang a * process that has been Ctrl-C'ed. */ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) { struct nlm_rqst *req; int status; dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" " Attempting to cancel lock.\n"); req = nlm_alloc_call(nlm_get_host(host)); if (!req) return -ENOMEM; req->a_flags = RPC_TASK_ASYNC; nlmclnt_setlockargs(req, fl); req->a_args.block = block; atomic_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); if (status == 0 && req->a_res.status == nlm_lck_denied) status = -ENOLCK; nlm_release_call(req); return status; } static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; u32 status = ntohl(req->a_res.status); if (RPC_ASSASSINATED(task)) goto die; if (task->tk_status < 0) { dprintk("lockd: CANCEL call error %d, retrying.\n", task->tk_status); goto retry_cancel; } dprintk("lockd: cancel status %u (task %u)\n", status, task->tk_pid); switch (status) { case NLM_LCK_GRANTED: case NLM_LCK_DENIED_GRACE_PERIOD: case NLM_LCK_DENIED: /* Everything's good */ break; case NLM_LCK_DENIED_NOLOCKS: dprintk("lockd: CANCEL failed (server has no locks)\n"); goto retry_cancel; default: printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", status); } die: return; retry_cancel: /* Don't ever retry more than 3 times */ if (req->a_retries++ >= NLMCLNT_MAX_RETRIES) goto die; lock_kernel(); nlm_rebind_host(req->a_host); unlock_kernel(); rpc_restart_call(task); rpc_delay(task, 30 * HZ); } static const struct rpc_call_ops nlmclnt_cancel_ops = { .rpc_call_done = nlmclnt_cancel_callback, .rpc_release = nlmclnt_rpc_release, }; /* * Convert an NLM status code to a generic kernel errno */ static int nlm_stat_to_errno(__be32 status) { switch(ntohl(status)) { case NLM_LCK_GRANTED: return 0; case NLM_LCK_DENIED: return -EAGAIN; case NLM_LCK_DENIED_NOLOCKS: case NLM_LCK_DENIED_GRACE_PERIOD: return -ENOLCK; case NLM_LCK_BLOCKED: printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); return -ENOLCK; #ifdef CONFIG_LOCKD_V4 case NLM_DEADLCK: return -EDEADLK; case NLM_ROFS: return -EROFS; case NLM_STALE_FH: return -ESTALE; case NLM_FBIG: return -EOVERFLOW; case NLM_FAILED: return -ENOLCK; #endif } printk(KERN_NOTICE "lockd: unexpected server status %d\n", status); return -ENOLCK; }
gpl-2.0
psyke83/android_kernel_samsung_msm-codeaurora
sound/atmel/ac97c.c
882
30839
/* * Driver for Atmel AC97C * * Copyright (C) 2005-2009 Atmel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/bitmap.h> #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/atmel_pdc.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/gpio.h> #include <linux/io.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/ac97_codec.h> #include <sound/atmel-ac97c.h> #include <sound/memalloc.h> #include <linux/dw_dmac.h> #include <mach/cpu.h> #include <mach/hardware.h> #include <mach/gpio.h> #include "ac97c.h" enum { DMA_TX_READY = 0, DMA_RX_READY, DMA_TX_CHAN_PRESENT, DMA_RX_CHAN_PRESENT, }; /* Serialize access to opened variable */ static DEFINE_MUTEX(opened_mutex); struct atmel_ac97c_dma { struct dma_chan *rx_chan; struct dma_chan *tx_chan; }; struct atmel_ac97c { struct clk *pclk; struct platform_device *pdev; struct atmel_ac97c_dma dma; struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; struct snd_card *card; struct snd_pcm *pcm; struct snd_ac97 *ac97; struct snd_ac97_bus *ac97_bus; u64 cur_format; unsigned int cur_rate; unsigned long flags; int playback_period, capture_period; /* Serialize access to opened variable */ spinlock_t lock; void __iomem *regs; int irq; int opened; int reset_pin; }; #define get_chip(card) ((struct atmel_ac97c *)(card)->private_data) #define ac97c_writel(chip, reg, val) \ __raw_writel((val), (chip)->regs + AC97C_##reg) #define ac97c_readl(chip, reg) \ __raw_readl((chip)->regs + AC97C_##reg) /* This function is called by the DMA driver. */ static void atmel_ac97c_dma_playback_period_done(void *arg) { struct atmel_ac97c *chip = arg; snd_pcm_period_elapsed(chip->playback_substream); } static void atmel_ac97c_dma_capture_period_done(void *arg) { struct atmel_ac97c *chip = arg; snd_pcm_period_elapsed(chip->capture_substream); } static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip, struct snd_pcm_substream *substream, enum dma_data_direction direction) { struct dma_chan *chan; struct dw_cyclic_desc *cdesc; struct snd_pcm_runtime *runtime = substream->runtime; unsigned long buffer_len, period_len; /* * We don't do DMA on "complex" transfers, i.e. with * non-halfword-aligned buffers or lengths. */ if (runtime->dma_addr & 1 || runtime->buffer_size & 1) { dev_dbg(&chip->pdev->dev, "too complex transfer\n"); return -EINVAL; } if (direction == DMA_TO_DEVICE) chan = chip->dma.tx_chan; else chan = chip->dma.rx_chan; buffer_len = frames_to_bytes(runtime, runtime->buffer_size); period_len = frames_to_bytes(runtime, runtime->period_size); cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len, period_len, direction); if (IS_ERR(cdesc)) { dev_dbg(&chip->pdev->dev, "could not prepare cyclic DMA\n"); return PTR_ERR(cdesc); } if (direction == DMA_TO_DEVICE) { cdesc->period_callback = atmel_ac97c_dma_playback_period_done; set_bit(DMA_TX_READY, &chip->flags); } else { cdesc->period_callback = atmel_ac97c_dma_capture_period_done; set_bit(DMA_RX_READY, &chip->flags); } cdesc->period_callback_param = chip; return 0; } static struct snd_pcm_hardware atmel_ac97c_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_JOINT_DUPLEX | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS), .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 2 * 2 * 64 * 2048, .period_bytes_min = 4096, .period_bytes_max = 4096, .periods_min = 6, .periods_max = 64, }; static int atmel_ac97c_playback_open(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; mutex_lock(&opened_mutex); chip->opened++; runtime->hw = atmel_ac97c_hw; if (chip->cur_rate) { runtime->hw.rate_min = chip->cur_rate; runtime->hw.rate_max = chip->cur_rate; } if (chip->cur_format) runtime->hw.formats = (1ULL << chip->cur_format); mutex_unlock(&opened_mutex); chip->playback_substream = substream; return 0; } static int atmel_ac97c_capture_open(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; mutex_lock(&opened_mutex); chip->opened++; runtime->hw = atmel_ac97c_hw; if (chip->cur_rate) { runtime->hw.rate_min = chip->cur_rate; runtime->hw.rate_max = chip->cur_rate; } if (chip->cur_format) runtime->hw.formats = (1ULL << chip->cur_format); mutex_unlock(&opened_mutex); chip->capture_substream = substream; return 0; } static int atmel_ac97c_playback_close(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); mutex_lock(&opened_mutex); chip->opened--; if (!chip->opened) { chip->cur_rate = 0; chip->cur_format = 0; } mutex_unlock(&opened_mutex); chip->playback_substream = NULL; return 0; } static int atmel_ac97c_capture_close(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); mutex_lock(&opened_mutex); chip->opened--; if (!chip->opened) { chip->cur_rate = 0; chip->cur_format = 0; } mutex_unlock(&opened_mutex); chip->capture_substream = NULL; return 0; } static int atmel_ac97c_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); int retval; retval = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (retval < 0) return retval; /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ if (cpu_is_at32ap7000()) { /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ if (retval == 1) if (test_and_clear_bit(DMA_TX_READY, &chip->flags)) dw_dma_cyclic_free(chip->dma.tx_chan); } /* Set restrictions to params. */ mutex_lock(&opened_mutex); chip->cur_rate = params_rate(hw_params); chip->cur_format = params_format(hw_params); mutex_unlock(&opened_mutex); return retval; } static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); int retval; retval = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (retval < 0) return retval; /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ if (cpu_is_at32ap7000()) { if (retval < 0) return retval; /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ if (retval == 1) if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) dw_dma_cyclic_free(chip->dma.rx_chan); } /* Set restrictions to params. */ mutex_lock(&opened_mutex); chip->cur_rate = params_rate(hw_params); chip->cur_format = params_format(hw_params); mutex_unlock(&opened_mutex); return retval; } static int atmel_ac97c_playback_hw_free(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); if (cpu_is_at32ap7000()) { if (test_and_clear_bit(DMA_TX_READY, &chip->flags)) dw_dma_cyclic_free(chip->dma.tx_chan); } return snd_pcm_lib_free_pages(substream); } static int atmel_ac97c_capture_hw_free(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); if (cpu_is_at32ap7000()) { if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) dw_dma_cyclic_free(chip->dma.rx_chan); } return snd_pcm_lib_free_pages(substream); } static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int block_size = frames_to_bytes(runtime, runtime->period_size); unsigned long word = ac97c_readl(chip, OCA); int retval; chip->playback_period = 0; word &= ~(AC97C_CH_MASK(PCM_LEFT) | AC97C_CH_MASK(PCM_RIGHT)); /* assign channels to AC97C channel A */ switch (runtime->channels) { case 1: word |= AC97C_CH_ASSIGN(PCM_LEFT, A); break; case 2: word |= AC97C_CH_ASSIGN(PCM_LEFT, A) | AC97C_CH_ASSIGN(PCM_RIGHT, A); break; default: /* TODO: support more than two channels */ return -EINVAL; } ac97c_writel(chip, OCA, word); /* configure sample format and size */ word = ac97c_readl(chip, CAMR); if (chip->opened <= 1) word = AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16; else word |= AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16; switch (runtime->format) { case SNDRV_PCM_FORMAT_S16_LE: if (cpu_is_at32ap7000()) word |= AC97C_CMR_CEM_LITTLE; break; case SNDRV_PCM_FORMAT_S16_BE: /* fall through */ word &= ~(AC97C_CMR_CEM_LITTLE); break; default: word = ac97c_readl(chip, OCA); word &= ~(AC97C_CH_MASK(PCM_LEFT) | AC97C_CH_MASK(PCM_RIGHT)); ac97c_writel(chip, OCA, word); return -EINVAL; } /* Enable underrun interrupt on channel A */ word |= AC97C_CSR_UNRUN; ac97c_writel(chip, CAMR, word); /* Enable channel A event interrupt */ word = ac97c_readl(chip, IMR); word |= AC97C_SR_CAEVT; ac97c_writel(chip, IER, word); /* set variable rate if needed */ if (runtime->rate != 48000) { word = ac97c_readl(chip, MR); word |= AC97C_MR_VRA; ac97c_writel(chip, MR, word); } else { word = ac97c_readl(chip, MR); word &= ~(AC97C_MR_VRA); ac97c_writel(chip, MR, word); } retval = snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE, runtime->rate); if (retval) dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n", runtime->rate); if (cpu_is_at32ap7000()) { if (!test_bit(DMA_TX_READY, &chip->flags)) retval = atmel_ac97c_prepare_dma(chip, substream, DMA_TO_DEVICE); } else { /* Initialize and start the PDC */ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR); writel(block_size / 2, chip->regs + ATMEL_PDC_TCR); writel(runtime->dma_addr + block_size, chip->regs + ATMEL_PDC_TNPR); writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR); } return retval; } static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int block_size = frames_to_bytes(runtime, runtime->period_size); unsigned long word = ac97c_readl(chip, ICA); int retval; chip->capture_period = 0; word &= ~(AC97C_CH_MASK(PCM_LEFT) | AC97C_CH_MASK(PCM_RIGHT)); /* assign channels to AC97C channel A */ switch (runtime->channels) { case 1: word |= AC97C_CH_ASSIGN(PCM_LEFT, A); break; case 2: word |= AC97C_CH_ASSIGN(PCM_LEFT, A) | AC97C_CH_ASSIGN(PCM_RIGHT, A); break; default: /* TODO: support more than two channels */ return -EINVAL; } ac97c_writel(chip, ICA, word); /* configure sample format and size */ word = ac97c_readl(chip, CAMR); if (chip->opened <= 1) word = AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16; else word |= AC97C_CMR_DMAEN | AC97C_CMR_SIZE_16; switch (runtime->format) { case SNDRV_PCM_FORMAT_S16_LE: if (cpu_is_at32ap7000()) word |= AC97C_CMR_CEM_LITTLE; break; case SNDRV_PCM_FORMAT_S16_BE: /* fall through */ word &= ~(AC97C_CMR_CEM_LITTLE); break; default: word = ac97c_readl(chip, ICA); word &= ~(AC97C_CH_MASK(PCM_LEFT) | AC97C_CH_MASK(PCM_RIGHT)); ac97c_writel(chip, ICA, word); return -EINVAL; } /* Enable overrun interrupt on channel A */ word |= AC97C_CSR_OVRUN; ac97c_writel(chip, CAMR, word); /* Enable channel A event interrupt */ word = ac97c_readl(chip, IMR); word |= AC97C_SR_CAEVT; ac97c_writel(chip, IER, word); /* set variable rate if needed */ if (runtime->rate != 48000) { word = ac97c_readl(chip, MR); word |= AC97C_MR_VRA; ac97c_writel(chip, MR, word); } else { word = ac97c_readl(chip, MR); word &= ~(AC97C_MR_VRA); ac97c_writel(chip, MR, word); } retval = snd_ac97_set_rate(chip->ac97, AC97_PCM_LR_ADC_RATE, runtime->rate); if (retval) dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n", runtime->rate); if (cpu_is_at32ap7000()) { if (!test_bit(DMA_RX_READY, &chip->flags)) retval = atmel_ac97c_prepare_dma(chip, substream, DMA_FROM_DEVICE); } else { /* Initialize and start the PDC */ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR); writel(block_size / 2, chip->regs + ATMEL_PDC_RCR); writel(runtime->dma_addr + block_size, chip->regs + ATMEL_PDC_RNPR); writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR); } return retval; } static int atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); unsigned long camr, ptcr = 0; int retval = 0; camr = ac97c_readl(chip, CAMR); switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: if (cpu_is_at32ap7000()) { retval = dw_dma_cyclic_start(chip->dma.tx_chan); if (retval) goto out; } else { ptcr = ATMEL_PDC_TXTEN; } camr |= AC97C_CMR_CENA | AC97C_CSR_ENDTX; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: if (cpu_is_at32ap7000()) dw_dma_cyclic_stop(chip->dma.tx_chan); else ptcr |= ATMEL_PDC_TXTDIS; if (chip->opened <= 1) camr &= ~AC97C_CMR_CENA; break; default: retval = -EINVAL; goto out; } ac97c_writel(chip, CAMR, camr); if (!cpu_is_at32ap7000()) writel(ptcr, chip->regs + ATMEL_PDC_PTCR); out: return retval; } static int atmel_ac97c_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); unsigned long camr, ptcr = 0; int retval = 0; camr = ac97c_readl(chip, CAMR); ptcr = readl(chip->regs + ATMEL_PDC_PTSR); switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: if (cpu_is_at32ap7000()) { retval = dw_dma_cyclic_start(chip->dma.rx_chan); if (retval) goto out; } else { ptcr = ATMEL_PDC_RXTEN; } camr |= AC97C_CMR_CENA | AC97C_CSR_ENDRX; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: if (cpu_is_at32ap7000()) dw_dma_cyclic_stop(chip->dma.rx_chan); else ptcr |= (ATMEL_PDC_RXTDIS); if (chip->opened <= 1) camr &= ~AC97C_CMR_CENA; break; default: retval = -EINVAL; break; } ac97c_writel(chip, CAMR, camr); if (!cpu_is_at32ap7000()) writel(ptcr, chip->regs + ATMEL_PDC_PTCR); out: return retval; } static snd_pcm_uframes_t atmel_ac97c_playback_pointer(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames; unsigned long bytes; if (cpu_is_at32ap7000()) bytes = dw_dma_get_src_addr(chip->dma.tx_chan); else bytes = readl(chip->regs + ATMEL_PDC_TPR); bytes -= runtime->dma_addr; frames = bytes_to_frames(runtime, bytes); if (frames >= runtime->buffer_size) frames -= runtime->buffer_size; return frames; } static snd_pcm_uframes_t atmel_ac97c_capture_pointer(struct snd_pcm_substream *substream) { struct atmel_ac97c *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames; unsigned long bytes; if (cpu_is_at32ap7000()) bytes = dw_dma_get_dst_addr(chip->dma.rx_chan); else bytes = readl(chip->regs + ATMEL_PDC_RPR); bytes -= runtime->dma_addr; frames = bytes_to_frames(runtime, bytes); if (frames >= runtime->buffer_size) frames -= runtime->buffer_size; return frames; } static struct snd_pcm_ops atmel_ac97_playback_ops = { .open = atmel_ac97c_playback_open, .close = atmel_ac97c_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = atmel_ac97c_playback_hw_params, .hw_free = atmel_ac97c_playback_hw_free, .prepare = atmel_ac97c_playback_prepare, .trigger = atmel_ac97c_playback_trigger, .pointer = atmel_ac97c_playback_pointer, }; static struct snd_pcm_ops atmel_ac97_capture_ops = { .open = atmel_ac97c_capture_open, .close = atmel_ac97c_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = atmel_ac97c_capture_hw_params, .hw_free = atmel_ac97c_capture_hw_free, .prepare = atmel_ac97c_capture_prepare, .trigger = atmel_ac97c_capture_trigger, .pointer = atmel_ac97c_capture_pointer, }; static irqreturn_t atmel_ac97c_interrupt(int irq, void *dev) { struct atmel_ac97c *chip = (struct atmel_ac97c *)dev; irqreturn_t retval = IRQ_NONE; u32 sr = ac97c_readl(chip, SR); u32 casr = ac97c_readl(chip, CASR); u32 cosr = ac97c_readl(chip, COSR); u32 camr = ac97c_readl(chip, CAMR); if (sr & AC97C_SR_CAEVT) { struct snd_pcm_runtime *runtime; int offset, next_period, block_size; dev_dbg(&chip->pdev->dev, "channel A event%s%s%s%s%s%s\n", casr & AC97C_CSR_OVRUN ? " OVRUN" : "", casr & AC97C_CSR_RXRDY ? " RXRDY" : "", casr & AC97C_CSR_UNRUN ? " UNRUN" : "", casr & AC97C_CSR_TXEMPTY ? " TXEMPTY" : "", casr & AC97C_CSR_TXRDY ? " TXRDY" : "", !casr ? " NONE" : ""); if (!cpu_is_at32ap7000()) { if ((casr & camr) & AC97C_CSR_ENDTX) { runtime = chip->playback_substream->runtime; block_size = frames_to_bytes(runtime, runtime->period_size); chip->playback_period++; if (chip->playback_period == runtime->periods) chip->playback_period = 0; next_period = chip->playback_period + 1; if (next_period == runtime->periods) next_period = 0; offset = block_size * next_period; writel(runtime->dma_addr + offset, chip->regs + ATMEL_PDC_TNPR); writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR); snd_pcm_period_elapsed( chip->playback_substream); } if ((casr & camr) & AC97C_CSR_ENDRX) { runtime = chip->capture_substream->runtime; block_size = frames_to_bytes(runtime, runtime->period_size); chip->capture_period++; if (chip->capture_period == runtime->periods) chip->capture_period = 0; next_period = chip->capture_period + 1; if (next_period == runtime->periods) next_period = 0; offset = block_size * next_period; writel(runtime->dma_addr + offset, chip->regs + ATMEL_PDC_RNPR); writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR); snd_pcm_period_elapsed(chip->capture_substream); } } retval = IRQ_HANDLED; } if (sr & AC97C_SR_COEVT) { dev_info(&chip->pdev->dev, "codec channel event%s%s%s%s%s\n", cosr & AC97C_CSR_OVRUN ? " OVRUN" : "", cosr & AC97C_CSR_RXRDY ? " RXRDY" : "", cosr & AC97C_CSR_TXEMPTY ? " TXEMPTY" : "", cosr & AC97C_CSR_TXRDY ? " TXRDY" : "", !cosr ? " NONE" : ""); retval = IRQ_HANDLED; } if (retval == IRQ_NONE) { dev_err(&chip->pdev->dev, "spurious interrupt sr 0x%08x " "casr 0x%08x cosr 0x%08x\n", sr, casr, cosr); } return retval; } static struct ac97_pcm at91_ac97_pcm_defs[] __devinitdata = { /* Playback */ { .exclusive = 1, .r = { { .slots = ((1 << AC97_SLOT_PCM_LEFT) | (1 << AC97_SLOT_PCM_RIGHT)), } }, }, /* PCM in */ { .stream = 1, .exclusive = 1, .r = { { .slots = ((1 << AC97_SLOT_PCM_LEFT) | (1 << AC97_SLOT_PCM_RIGHT)), } } }, /* Mic in */ { .stream = 1, .exclusive = 1, .r = { { .slots = (1<<AC97_SLOT_MIC), } } }, }; static int __devinit atmel_ac97c_pcm_new(struct atmel_ac97c *chip) { struct snd_pcm *pcm; struct snd_pcm_hardware hw = atmel_ac97c_hw; int capture, playback, retval, err; capture = test_bit(DMA_RX_CHAN_PRESENT, &chip->flags); playback = test_bit(DMA_TX_CHAN_PRESENT, &chip->flags); if (!cpu_is_at32ap7000()) { err = snd_ac97_pcm_assign(chip->ac97_bus, ARRAY_SIZE(at91_ac97_pcm_defs), at91_ac97_pcm_defs); if (err) return err; } retval = snd_pcm_new(chip->card, chip->card->shortname, chip->pdev->id, playback, capture, &pcm); if (retval) return retval; if (capture) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &atmel_ac97_capture_ops); if (playback) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_ac97_playback_ops); retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, &chip->pdev->dev, hw.periods_min * hw.period_bytes_min, hw.buffer_bytes_max); if (retval) return retval; pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; return 0; } static int atmel_ac97c_mixer_new(struct atmel_ac97c *chip) { struct snd_ac97_template template; memset(&template, 0, sizeof(template)); template.private_data = chip; return snd_ac97_mixer(chip->ac97_bus, &template, &chip->ac97); } static void atmel_ac97c_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct atmel_ac97c *chip = get_chip(ac97); unsigned long word; int timeout = 40; word = (reg & 0x7f) << 16 | val; do { if (ac97c_readl(chip, COSR) & AC97C_CSR_TXRDY) { ac97c_writel(chip, COTHR, word); return; } udelay(1); } while (--timeout); dev_dbg(&chip->pdev->dev, "codec write timeout\n"); } static unsigned short atmel_ac97c_read(struct snd_ac97 *ac97, unsigned short reg) { struct atmel_ac97c *chip = get_chip(ac97); unsigned long word; int timeout = 40; int write = 10; word = (0x80 | (reg & 0x7f)) << 16; if ((ac97c_readl(chip, COSR) & AC97C_CSR_RXRDY) != 0) ac97c_readl(chip, CORHR); retry_write: timeout = 40; do { if ((ac97c_readl(chip, COSR) & AC97C_CSR_TXRDY) != 0) { ac97c_writel(chip, COTHR, word); goto read_reg; } udelay(10); } while (--timeout); if (!--write) goto timed_out; goto retry_write; read_reg: do { if ((ac97c_readl(chip, COSR) & AC97C_CSR_RXRDY) != 0) { unsigned short val = ac97c_readl(chip, CORHR); return val; } udelay(10); } while (--timeout); if (!--write) goto timed_out; goto retry_write; timed_out: dev_dbg(&chip->pdev->dev, "codec read timeout\n"); return 0xffff; } static bool filter(struct dma_chan *chan, void *slave) { struct dw_dma_slave *dws = slave; if (dws->dma_dev == chan->device->dev) { chan->private = dws; return true; } else return false; } static void atmel_ac97c_reset(struct atmel_ac97c *chip) { ac97c_writel(chip, MR, 0); ac97c_writel(chip, MR, AC97C_MR_ENA); ac97c_writel(chip, CAMR, 0); ac97c_writel(chip, COMR, 0); if (gpio_is_valid(chip->reset_pin)) { gpio_set_value(chip->reset_pin, 0); /* AC97 v2.2 specifications says minimum 1 us. */ udelay(2); gpio_set_value(chip->reset_pin, 1); } } static int __devinit atmel_ac97c_probe(struct platform_device *pdev) { struct snd_card *card; struct atmel_ac97c *chip; struct resource *regs; struct ac97c_platform_data *pdata; struct clk *pclk; static struct snd_ac97_bus_ops ops = { .write = atmel_ac97c_write, .read = atmel_ac97c_read, }; int retval; int irq; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no memory resource\n"); return -ENXIO; } pdata = pdev->dev.platform_data; if (!pdata) { dev_dbg(&pdev->dev, "no platform data\n"); return -ENXIO; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_dbg(&pdev->dev, "could not get irq\n"); return -ENXIO; } if (cpu_is_at32ap7000()) { pclk = clk_get(&pdev->dev, "pclk"); } else { pclk = clk_get(&pdev->dev, "ac97_clk"); } if (IS_ERR(pclk)) { dev_dbg(&pdev->dev, "no peripheral clock\n"); return PTR_ERR(pclk); } clk_enable(pclk); retval = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, sizeof(struct atmel_ac97c), &card); if (retval) { dev_dbg(&pdev->dev, "could not create sound card device\n"); goto err_snd_card_new; } chip = get_chip(card); retval = request_irq(irq, atmel_ac97c_interrupt, 0, "AC97C", chip); if (retval) { dev_dbg(&pdev->dev, "unable to request irq %d\n", irq); goto err_request_irq; } chip->irq = irq; spin_lock_init(&chip->lock); strcpy(card->driver, "Atmel AC97C"); strcpy(card->shortname, "Atmel AC97C"); sprintf(card->longname, "Atmel AC97 controller"); chip->card = card; chip->pclk = pclk; chip->pdev = pdev; chip->regs = ioremap(regs->start, regs->end - regs->start + 1); if (!chip->regs) { dev_dbg(&pdev->dev, "could not remap register memory\n"); goto err_ioremap; } if (gpio_is_valid(pdata->reset_pin)) { if (gpio_request(pdata->reset_pin, "reset_pin")) { dev_dbg(&pdev->dev, "reset pin not available\n"); chip->reset_pin = -ENODEV; } else { gpio_direction_output(pdata->reset_pin, 1); chip->reset_pin = pdata->reset_pin; } } snd_card_set_dev(card, &pdev->dev); atmel_ac97c_reset(chip); /* Enable overrun interrupt from codec channel */ ac97c_writel(chip, COMR, AC97C_CSR_OVRUN); ac97c_writel(chip, IER, ac97c_readl(chip, IMR) | AC97C_SR_COEVT); retval = snd_ac97_bus(card, 0, &ops, chip, &chip->ac97_bus); if (retval) { dev_dbg(&pdev->dev, "could not register on ac97 bus\n"); goto err_ac97_bus; } retval = atmel_ac97c_mixer_new(chip); if (retval) { dev_dbg(&pdev->dev, "could not register ac97 mixer\n"); goto err_ac97_bus; } if (cpu_is_at32ap7000()) { if (pdata->rx_dws.dma_dev) { struct dw_dma_slave *dws = &pdata->rx_dws; dma_cap_mask_t mask; dws->rx_reg = regs->start + AC97C_CARHR + 2; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); chip->dma.rx_chan = dma_request_channel(mask, filter, dws); dev_info(&chip->pdev->dev, "using %s for DMA RX\n", dev_name(&chip->dma.rx_chan->dev->device)); set_bit(DMA_RX_CHAN_PRESENT, &chip->flags); } if (pdata->tx_dws.dma_dev) { struct dw_dma_slave *dws = &pdata->tx_dws; dma_cap_mask_t mask; dws->tx_reg = regs->start + AC97C_CATHR + 2; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); chip->dma.tx_chan = dma_request_channel(mask, filter, dws); dev_info(&chip->pdev->dev, "using %s for DMA TX\n", dev_name(&chip->dma.tx_chan->dev->device)); set_bit(DMA_TX_CHAN_PRESENT, &chip->flags); } if (!test_bit(DMA_RX_CHAN_PRESENT, &chip->flags) && !test_bit(DMA_TX_CHAN_PRESENT, &chip->flags)) { dev_dbg(&pdev->dev, "DMA not available\n"); retval = -ENODEV; goto err_dma; } } else { /* Just pretend that we have DMA channel(for at91 i is actually * the PDC) */ set_bit(DMA_RX_CHAN_PRESENT, &chip->flags); set_bit(DMA_TX_CHAN_PRESENT, &chip->flags); } retval = atmel_ac97c_pcm_new(chip); if (retval) { dev_dbg(&pdev->dev, "could not register ac97 pcm device\n"); goto err_dma; } retval = snd_card_register(card); if (retval) { dev_dbg(&pdev->dev, "could not register sound card\n"); goto err_dma; } platform_set_drvdata(pdev, card); dev_info(&pdev->dev, "Atmel AC97 controller at 0x%p, irq = %d\n", chip->regs, irq); return 0; err_dma: if (cpu_is_at32ap7000()) { if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags)) dma_release_channel(chip->dma.rx_chan); if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags)) dma_release_channel(chip->dma.tx_chan); clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags); clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags); chip->dma.rx_chan = NULL; chip->dma.tx_chan = NULL; } err_ac97_bus: snd_card_set_dev(card, NULL); if (gpio_is_valid(chip->reset_pin)) gpio_free(chip->reset_pin); iounmap(chip->regs); err_ioremap: free_irq(irq, chip); err_request_irq: snd_card_free(card); err_snd_card_new: clk_disable(pclk); clk_put(pclk); return retval; } #ifdef CONFIG_PM static int atmel_ac97c_suspend(struct platform_device *pdev, pm_message_t msg) { struct snd_card *card = platform_get_drvdata(pdev); struct atmel_ac97c *chip = card->private_data; if (cpu_is_at32ap7000()) { if (test_bit(DMA_RX_READY, &chip->flags)) dw_dma_cyclic_stop(chip->dma.rx_chan); if (test_bit(DMA_TX_READY, &chip->flags)) dw_dma_cyclic_stop(chip->dma.tx_chan); } clk_disable(chip->pclk); return 0; } static int atmel_ac97c_resume(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); struct atmel_ac97c *chip = card->private_data; clk_enable(chip->pclk); if (cpu_is_at32ap7000()) { if (test_bit(DMA_RX_READY, &chip->flags)) dw_dma_cyclic_start(chip->dma.rx_chan); if (test_bit(DMA_TX_READY, &chip->flags)) dw_dma_cyclic_start(chip->dma.tx_chan); } return 0; } #else #define atmel_ac97c_suspend NULL #define atmel_ac97c_resume NULL #endif static int __devexit atmel_ac97c_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); struct atmel_ac97c *chip = get_chip(card); if (gpio_is_valid(chip->reset_pin)) gpio_free(chip->reset_pin); ac97c_writel(chip, CAMR, 0); ac97c_writel(chip, COMR, 0); ac97c_writel(chip, MR, 0); clk_disable(chip->pclk); clk_put(chip->pclk); iounmap(chip->regs); free_irq(chip->irq, chip); if (cpu_is_at32ap7000()) { if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags)) dma_release_channel(chip->dma.rx_chan); if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags)) dma_release_channel(chip->dma.tx_chan); clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags); clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags); chip->dma.rx_chan = NULL; chip->dma.tx_chan = NULL; } snd_card_set_dev(card, NULL); snd_card_free(card); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver atmel_ac97c_driver = { .remove = __devexit_p(atmel_ac97c_remove), .driver = { .name = "atmel_ac97c", }, .suspend = atmel_ac97c_suspend, .resume = atmel_ac97c_resume, }; static int __init atmel_ac97c_init(void) { return platform_driver_probe(&atmel_ac97c_driver, atmel_ac97c_probe); } module_init(atmel_ac97c_init); static void __exit atmel_ac97c_exit(void) { platform_driver_unregister(&atmel_ac97c_driver); } module_exit(atmel_ac97c_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for Atmel AC97 controller"); MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
gpl-2.0
aqua-project/Linux-Minimal-x86-Reimplementation
arch/sparc/kernel/unaligned_32.c
1650
9440
/* * unaligned.c: Unaligned load/store trap handling with special * cases for the kernel to do them more quickly. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/perf_event.h> #include <asm/setup.h> #include "kernel.h" enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ both, /* Swap, ldstub, etc. */ fpload, fpstore, invalid, }; static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; if(!tmp) return load; else { if(((insn>>19)&0x3f) == 15) return both; else return store; } } /* 8 = double-word, 4 = word, 2 = half-word */ static inline int decode_access_size(unsigned int insn) { insn = (insn >> 19) & 3; if(!insn) return 4; else if(insn == 3) return 8; else if(insn == 2) return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs); return 4; /* just to keep gcc happy. */ } } /* 0x400000 = signed, 0 = unsigned */ static inline int decode_signedness(unsigned int insn) { return (insn & 0x400000); } static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd) { if(rs2 >= 16 || rs1 >= 16 || rd >= 16) { /* Wheee... */ __asm__ __volatile__("save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "restore; restore; restore; restore;\n\t" "restore; restore; restore;\n\t"); } } static inline int sign_extend_imm13(int imm) { return imm << 19 >> 19; } static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { struct reg_window32 *win; if(reg < 16) return (!reg ? 0 : regs->u_regs[reg]); /* Ho hum, the slightly complicated case. */ win = (struct reg_window32 *) regs->u_regs[UREG_FP]; return win->locals[reg - 16]; /* yes, I know what this does... */ } static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs) { struct reg_window32 __user *win; unsigned long ret; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); /* Ho hum, the slightly complicated case. */ win = (struct reg_window32 __user *) regs->u_regs[UREG_FP]; if ((unsigned long)win & 3) return -1; if (get_user(ret, &win->locals[reg - 16])) return -1; return ret; } static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) { struct reg_window32 *win; if(reg < 16) return &regs->u_regs[reg]; win = (struct reg_window32 *) regs->u_regs[UREG_FP]; return &win->locals[reg - 16]; } static unsigned long compute_effective_address(struct pt_regs *regs, unsigned int insn) { unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; unsigned int rd = (insn >> 25) & 0x1f; if(insn & 0x2000) { maybe_flush_windows(rs1, 0, rd); return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd); return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); } } unsigned long safe_compute_effective_address(struct pt_regs *regs, unsigned int insn) { unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; unsigned int rd = (insn >> 25) & 0x1f; if(insn & 0x2000) { maybe_flush_windows(rs1, 0, rd); return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd); return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs)); } } /* This is just to make gcc think panic does return... */ static void unaligned_panic(char *str) { panic("%s", str); } /* una_asm.S */ extern int do_int_load(unsigned long *dest_reg, int size, unsigned long *saddr, int is_signed); extern int __do_int_store(unsigned long *dst_addr, int size, unsigned long *src_val); static int do_int_store(int reg_num, int size, unsigned long *dst_addr, struct pt_regs *regs) { unsigned long zero[2] = { 0, 0 }; unsigned long *src_val; if (reg_num) src_val = fetch_reg_addr(reg_num, regs); else { src_val = &zero[0]; if (size == 8) zero[1] = fetch_reg(1, regs); } return __do_int_store(dst_addr, size, src_val); } extern void smp_capture(void); extern void smp_release(void); static inline void advance(struct pt_regs *regs) { regs->pc = regs->npc; regs->npc += 4; } static inline int floating_point_load_or_store_p(unsigned int insn) { return (insn >> 24) & 1; } static inline int ok_for_kernel(unsigned int insn) { return !floating_point_load_or_store_p(insn); } static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { unsigned long g2 = regs->u_regs [UREG_G2]; unsigned long fixup = search_extables_range(regs->pc, &g2); if (!fixup) { unsigned long address = compute_effective_address(regs, insn); if(address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); } else printk(KERN_ALERT "Unable to handle kernel paging request in mna handler"); printk(KERN_ALERT " at virtual address %08lx\n",address); printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n", (current->mm ? current->mm->context : current->active_mm->context)); printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n", (current->mm ? (unsigned long) current->mm->pgd : (unsigned long) current->active_mm->pgd)); die_if_kernel("Oops", regs); /* Not reached */ } regs->pc = fixup; regs->npc = regs->pc + 4; regs->u_regs [UREG_G2] = g2; } asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(insn); if(!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", regs->pc); unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); } else { unsigned long addr = compute_effective_address(regs, insn); int err; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, decode_signedness(insn)); break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } if (err) kernel_mna_trap_fault(regs, insn); else advance(regs); } } static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, enum direction dir) { unsigned int reg; int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE; int size = ((insn >> 19) & 3) == 3 ? 8 : 4; if ((regs->pc | regs->npc) & 3) return 0; /* Must access_ok() in all the necessary places. */ #define WINREG_ADDR(regnum) \ ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum))) reg = (insn >> 25) & 0x1f; if (reg >= 16) { if (!access_ok(check, WINREG_ADDR(reg - 16), size)) return -EFAULT; } reg = (insn >> 14) & 0x1f; if (reg >= 16) { if (!access_ok(check, WINREG_ADDR(reg - 16), size)) return -EFAULT; } if (!(insn & 0x2000)) { reg = (insn & 0x1f); if (reg >= 16) { if (!access_ok(check, WINREG_ADDR(reg - 16), size)) return -EFAULT; } } #undef WINREG_ADDR return 0; } static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { siginfo_t info; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *)safe_compute_effective_address(regs, insn); info.si_trapno = 0; send_sig_info(SIGBUS, &info, current); } asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir; if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || (((insn >> 30) & 3) != 3)) goto kill_user; dir = decode_direction(insn); if(!ok_for_user(regs, insn, dir)) { goto kill_user; } else { int err, size = decode_access_size(insn); unsigned long addr; if(floating_point_load_or_store_p(insn)) { printk("User FPU load/store unaligned unsupported.\n"); goto kill_user; } addr = compute_effective_address(regs, insn); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, decode_signedness(insn)); break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs); break; case both: /* * This was supported in 2.4. However, we question * the value of SWAP instruction across word boundaries. */ printk("Unaligned SWAP unsupported.\n"); err = -EFAULT; break; default: unaligned_panic("Impossible user unaligned trap."); goto out; } if (err) goto kill_user; else advance(regs); goto out; } kill_user: user_mna_trap_fault(regs, insn); out: ; }
gpl-2.0
goodwinos/linux-2.6
drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
1906
2490
/* fusionhdtv-mce.h - Keytable for fusionhdtv_mce Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* DViCO FUSION HDTV MCE remote */ static struct rc_map_table fusionhdtv_mce[] = { { 0x0b, KEY_1 }, { 0x17, KEY_2 }, { 0x1b, KEY_3 }, { 0x07, KEY_4 }, { 0x50, KEY_5 }, { 0x54, KEY_6 }, { 0x48, KEY_7 }, { 0x4c, KEY_8 }, { 0x58, KEY_9 }, { 0x03, KEY_0 }, { 0x5e, KEY_OK }, { 0x51, KEY_UP }, { 0x53, KEY_DOWN }, { 0x5b, KEY_LEFT }, { 0x5f, KEY_RIGHT }, { 0x02, KEY_TV }, /* Labeled DTV on remote */ { 0x0e, KEY_MP3 }, { 0x1a, KEY_DVD }, { 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */ { 0x16, KEY_SETUP }, { 0x46, KEY_POWER2 }, /* TV On/Off button on remote */ { 0x0a, KEY_EPG }, /* Labeled Guide on remote */ { 0x49, KEY_BACK }, { 0x59, KEY_INFO }, /* Labeled MORE on remote */ { 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */ { 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */ { 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */ { 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */ { 0x42, KEY_ENTER }, /* Labeled START with a green MS windows logo on remote */ { 0x15, KEY_VOLUMEUP }, { 0x05, KEY_VOLUMEDOWN }, { 0x11, KEY_CHANNELUP }, { 0x09, KEY_CHANNELDOWN }, { 0x52, KEY_CAMERA }, { 0x5a, KEY_TUNER }, { 0x19, KEY_OPEN }, { 0x13, KEY_MODE }, /* 4:3 16:9 select */ { 0x1f, KEY_ZOOM }, { 0x43, KEY_REWIND }, { 0x47, KEY_PLAYPAUSE }, { 0x4f, KEY_FASTFORWARD }, { 0x57, KEY_MUTE }, { 0x0d, KEY_STOP }, { 0x01, KEY_RECORD }, { 0x4e, KEY_POWER }, }; static struct rc_map_list fusionhdtv_mce_map = { .map = { .scan = fusionhdtv_mce, .size = ARRAY_SIZE(fusionhdtv_mce), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_FUSIONHDTV_MCE, } }; static int __init init_rc_map_fusionhdtv_mce(void) { return rc_map_register(&fusionhdtv_mce_map); } static void __exit exit_rc_map_fusionhdtv_mce(void) { rc_map_unregister(&fusionhdtv_mce_map); } module_init(init_rc_map_fusionhdtv_mce) module_exit(exit_rc_map_fusionhdtv_mce) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab");
gpl-2.0
ivecera/net-next
drivers/media/rc/keymaps/rc-pctv-sedna.c
1906
2094
/* pctv-sedna.h - Keytable for pctv_sedna Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> /* Mapping for the 28 key remote control as seen at http://www.sednacomputer.com/photo/cardbus-tv.jpg Pavel Mihaylov <bin@bash.info> Also for the remote bundled with Kozumi KTV-01C card */ static struct rc_map_table pctv_sedna[] = { { 0x00, KEY_0 }, { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x0a, KEY_AGAIN }, /* Recall */ { 0x0b, KEY_CHANNELUP }, { 0x0c, KEY_VOLUMEUP }, { 0x0d, KEY_MODE }, /* Stereo */ { 0x0e, KEY_STOP }, { 0x0f, KEY_PREVIOUSSONG }, { 0x10, KEY_ZOOM }, { 0x11, KEY_VIDEO }, /* Source */ { 0x12, KEY_POWER }, { 0x13, KEY_MUTE }, { 0x15, KEY_CHANNELDOWN }, { 0x18, KEY_VOLUMEDOWN }, { 0x19, KEY_CAMERA }, /* Snapshot */ { 0x1a, KEY_NEXTSONG }, { 0x1b, KEY_TIME }, /* Time Shift */ { 0x1c, KEY_RADIO }, /* FM Radio */ { 0x1d, KEY_RECORD }, { 0x1e, KEY_PAUSE }, /* additional codes for Kozumi's remote */ { 0x14, KEY_INFO }, /* OSD */ { 0x16, KEY_OK }, /* OK */ { 0x17, KEY_DIGITS }, /* Plus */ { 0x1f, KEY_PLAY }, /* Play */ }; static struct rc_map_list pctv_sedna_map = { .map = { .scan = pctv_sedna, .size = ARRAY_SIZE(pctv_sedna), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PCTV_SEDNA, } }; static int __init init_rc_map_pctv_sedna(void) { return rc_map_register(&pctv_sedna_map); } static void __exit exit_rc_map_pctv_sedna(void) { rc_map_unregister(&pctv_sedna_map); } module_init(init_rc_map_pctv_sedna) module_exit(exit_rc_map_pctv_sedna) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab");
gpl-2.0
Strongmind91/N8013_JB_Kernel_TW
fs/squashfs/cache.c
2930
11319
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * cache.c */ /* * Blocks in Squashfs are compressed. To avoid repeatedly decompressing * recently accessed data Squashfs uses two small metadata and fragment caches. * * This file implements a generic cache implementation used for both caches, * plus functions layered ontop of the generic cache implementation to * access the metadata and fragment caches. * * To avoid out of memory and fragmentation issues with vmalloc the cache * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. * * It should be noted that the cache is not used for file datablocks, these * are decompressed and cached in the page-cache in the normal way. The * cache is only used to temporarily cache fragment and metadata blocks * which have been read as as a result of a metadata (i.e. inode or * directory) or fragment access. Because metadata and fragments are packed * together into blocks (to gain greater compression) the read of a particular * piece of metadata or fragment will retrieve other metadata/fragments which * have been packed with it, these because of locality-of-reference may be read * in the near future. Temporarily caching them ensures they are available for * near future access without requiring an additional read and decompress. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/pagemap.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs.h" /* * Look-up block in cache, and increment usage count. If not in cache, read * and decompress it from disk. */ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, struct squashfs_cache *cache, u64 block, int length) { int i, n; struct squashfs_cache_entry *entry; spin_lock(&cache->lock); while (1) { for (i = 0; i < cache->entries; i++) if (cache->entry[i].block == block) break; if (i == cache->entries) { /* * Block not in cache, if all cache entries are used * go to sleep waiting for one to become available. */ if (cache->unused == 0) { cache->num_waiters++; spin_unlock(&cache->lock); wait_event(cache->wait_queue, cache->unused); spin_lock(&cache->lock); cache->num_waiters--; continue; } /* * At least one unused cache entry. A simple * round-robin strategy is used to choose the entry to * be evicted from the cache. */ i = cache->next_blk; for (n = 0; n < cache->entries; n++) { if (cache->entry[i].refcount == 0) break; i = (i + 1) % cache->entries; } cache->next_blk = (i + 1) % cache->entries; entry = &cache->entry[i]; /* * Initialise chosen cache entry, and fill it in from * disk. */ cache->unused--; entry->block = block; entry->refcount = 1; entry->pending = 1; entry->num_waiters = 0; entry->error = 0; spin_unlock(&cache->lock); entry->length = squashfs_read_data(sb, entry->data, block, length, &entry->next_index, cache->block_size, cache->pages); spin_lock(&cache->lock); if (entry->length < 0) entry->error = entry->length; entry->pending = 0; /* * While filling this entry one or more other processes * have looked it up in the cache, and have slept * waiting for it to become available. */ if (entry->num_waiters) { spin_unlock(&cache->lock); wake_up_all(&entry->wait_queue); } else spin_unlock(&cache->lock); goto out; } /* * Block already in cache. Increment refcount so it doesn't * get reused until we're finished with it, if it was * previously unused there's one less cache entry available * for reuse. */ entry = &cache->entry[i]; if (entry->refcount == 0) cache->unused--; entry->refcount++; /* * If the entry is currently being filled in by another process * go to sleep waiting for it to become available. */ if (entry->pending) { entry->num_waiters++; spin_unlock(&cache->lock); wait_event(entry->wait_queue, !entry->pending); } else spin_unlock(&cache->lock); goto out; } out: TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", cache->name, i, entry->block, entry->refcount, entry->error); if (entry->error) ERROR("Unable to read %s cache entry [%llx]\n", cache->name, block); return entry; } /* * Release cache entry, once usage count is zero it can be reused. */ void squashfs_cache_put(struct squashfs_cache_entry *entry) { struct squashfs_cache *cache = entry->cache; spin_lock(&cache->lock); entry->refcount--; if (entry->refcount == 0) { cache->unused++; /* * If there's any processes waiting for a block to become * available, wake one up. */ if (cache->num_waiters) { spin_unlock(&cache->lock); wake_up(&cache->wait_queue); return; } } spin_unlock(&cache->lock); } /* * Delete cache reclaiming all kmalloced buffers. */ void squashfs_cache_delete(struct squashfs_cache *cache) { int i, j; if (cache == NULL) return; for (i = 0; i < cache->entries; i++) { if (cache->entry[i].data) { for (j = 0; j < cache->pages; j++) kfree(cache->entry[i].data[j]); kfree(cache->entry[i].data); } } kfree(cache->entry); kfree(cache); } /* * Initialise cache allocating the specified number of entries, each of * size block_size. To avoid vmalloc fragmentation issues each entry * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. */ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) { int i, j; struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) { ERROR("Failed to allocate %s cache\n", name); return NULL; } cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); if (cache->entry == NULL) { ERROR("Failed to allocate %s cache\n", name); goto cleanup; } cache->next_blk = 0; cache->unused = entries; cache->entries = entries; cache->block_size = block_size; cache->pages = block_size >> PAGE_CACHE_SHIFT; cache->pages = cache->pages ? cache->pages : 1; cache->name = name; cache->num_waiters = 0; spin_lock_init(&cache->lock); init_waitqueue_head(&cache->wait_queue); for (i = 0; i < entries; i++) { struct squashfs_cache_entry *entry = &cache->entry[i]; init_waitqueue_head(&cache->entry[i].wait_queue); entry->cache = cache; entry->block = SQUASHFS_INVALID_BLK; entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); if (entry->data == NULL) { ERROR("Failed to allocate %s cache entry\n", name); goto cleanup; } for (j = 0; j < cache->pages; j++) { entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); if (entry->data[j] == NULL) { ERROR("Failed to allocate %s buffer\n", name); goto cleanup; } } } return cache; cleanup: squashfs_cache_delete(cache); return NULL; } /* * Copy up to length bytes from cache entry to buffer starting at offset bytes * into the cache entry. If there's not length bytes then copy the number of * bytes available. In all cases return the number of bytes copied. */ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, int offset, int length) { int remaining = length; if (length == 0) return 0; else if (buffer == NULL) return min(length, entry->length - offset); while (offset < entry->length) { void *buff = entry->data[offset / PAGE_CACHE_SIZE] + (offset % PAGE_CACHE_SIZE); int bytes = min_t(int, entry->length - offset, PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); if (bytes >= remaining) { memcpy(buffer, buff, remaining); remaining = 0; break; } memcpy(buffer, buff, bytes); buffer += bytes; remaining -= bytes; offset += bytes; } return length - remaining; } /* * Read length bytes from metadata position <block, offset> (block is the * start of the compressed block on disk, and offset is the offset into * the block once decompressed). Data is packed into consecutive blocks, * and length bytes may require reading more than one block. */ int squashfs_read_metadata(struct super_block *sb, void *buffer, u64 *block, int *offset, int length) { struct squashfs_sb_info *msblk = sb->s_fs_info; int bytes, copied = length; struct squashfs_cache_entry *entry; TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); while (length) { entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); if (entry->error) return entry->error; else if (*offset >= entry->length) return -EIO; bytes = squashfs_copy_data(buffer, entry, *offset, length); if (buffer) buffer += bytes; length -= bytes; *offset += bytes; if (*offset == entry->length) { *block = entry->next_index; *offset = 0; } squashfs_cache_put(entry); } return copied; } /* * Look-up in the fragmment cache the fragment located at <start_block> in the * filesystem. If necessary read and decompress it from disk. */ struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb, u64 start_block, int length) { struct squashfs_sb_info *msblk = sb->s_fs_info; return squashfs_cache_get(sb, msblk->fragment_cache, start_block, length); } /* * Read and decompress the datablock located at <start_block> in the * filesystem. The cache is used here to avoid duplicating locking and * read/decompress code. */ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, u64 start_block, int length) { struct squashfs_sb_info *msblk = sb->s_fs_info; return squashfs_cache_get(sb, msblk->read_page, start_block, length); } /* * Read a filesystem table (uncompressed sequence of bytes) from disk */ void *squashfs_read_table(struct super_block *sb, u64 block, int length) { int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int i, res; void *table, *buffer, **data; table = buffer = kmalloc(length, GFP_KERNEL); if (table == NULL) return ERR_PTR(-ENOMEM); data = kcalloc(pages, sizeof(void *), GFP_KERNEL); if (data == NULL) { res = -ENOMEM; goto failed; } for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) data[i] = buffer; res = squashfs_read_data(sb, data, block, length | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); kfree(data); if (res < 0) goto failed; return table; failed: kfree(table); return ERR_PTR(res); }
gpl-2.0
thesawolf/android_kernel_allwinner_a10
drivers/mtd/chips/cfi_util.c
3186
6868
/* * Common Flash Interface support: * Generic utility functions not dependent on command set * * Copyright (C) 2002 Red Hat * Copyright (C) 2003 STMicroelectronics Limited * * This code is covered by the GPL. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/mtd/xip.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> int __xipram cfi_qry_present(struct map_info *map, __u32 base, struct cfi_private *cfi) { int osf = cfi->interleave * cfi->device_type; /* scale factor */ map_word val[3]; map_word qry[3]; qry[0] = cfi_build_cmd('Q', map, cfi); qry[1] = cfi_build_cmd('R', map, cfi); qry[2] = cfi_build_cmd('Y', map, cfi); val[0] = map_read(map, base + osf*0x10); val[1] = map_read(map, base + osf*0x11); val[2] = map_read(map, base + osf*0x12); if (!map_word_equal(map, qry[0], val[0])) return 0; if (!map_word_equal(map, qry[1], val[1])) return 0; if (!map_word_equal(map, qry[2], val[2])) return 0; return 1; /* "QRY" found */ } EXPORT_SYMBOL_GPL(cfi_qry_present); int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, struct cfi_private *cfi) { cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* QRY not found probably we deal with some odd CFI chips */ /* Some revisions of some old Intel chips? */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* ST M29DW chips */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* some old SST chips, e.g. 39VF160x/39VF320x */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* SST 39VF640xB */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* QRY not found */ return 0; } EXPORT_SYMBOL_GPL(cfi_qry_mode_on); void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, struct cfi_private *cfi) { cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); /* M29W128G flashes require an additional reset command when exit qry mode */ if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E)) cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); } EXPORT_SYMBOL_GPL(cfi_qry_mode_off); struct cfi_extquery * __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) { struct cfi_private *cfi = map->fldrv_priv; __u32 base = 0; // cfi->chips[0].start; int ofs_factor = cfi->interleave * cfi->device_type; int i; struct cfi_extquery *extp = NULL; if (!adr) goto out; printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr); extp = kmalloc(size, GFP_KERNEL); if (!extp) { printk(KERN_ERR "Failed to allocate memory\n"); goto out; } #ifdef CONFIG_MTD_XIP local_irq_disable(); #endif /* Switch it into Query Mode */ cfi_qry_mode_on(base, map, cfi); /* Read in the Extended Query Table */ for (i=0; i<size; i++) { ((unsigned char *)extp)[i] = cfi_read_query(map, base+((adr+i)*ofs_factor)); } /* Make sure it returns to read mode */ cfi_qry_mode_off(base, map, cfi); #ifdef CONFIG_MTD_XIP (void) map_read(map, base); xip_iprefetch(); local_irq_enable(); #endif out: return extp; } EXPORT_SYMBOL(cfi_read_pri); void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; struct cfi_fixup *f; for (f=fixups; f->fixup; f++) { if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { f->fixup(mtd); } } } EXPORT_SYMBOL(cfi_fixup); int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, loff_t ofs, size_t len, void *thunk) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long adr; int chipnum, ret = 0; int i, first; struct mtd_erase_region_info *regions = mtd->eraseregions; if (ofs > mtd->size) return -EINVAL; if ((len + ofs) > mtd->size) return -EINVAL; /* Check that both start and end of the requested erase are * aligned with the erasesize at the appropriate addresses. */ i = 0; /* Skip all erase regions which are ended before the start of the requested erase. Actually, to save on the calculations, we skip to the first erase region which starts after the start of the requested erase, and then go back one. */ while (i < mtd->numeraseregions && ofs >= regions[i].offset) i++; i--; /* OK, now i is pointing at the erase region in which this erase request starts. Check the start of the requested erase range is aligned with the erase size which is in effect here. */ if (ofs & (regions[i].erasesize-1)) return -EINVAL; /* Remember the erase region we start on */ first = i; /* Next, check that the end of the requested erase is aligned * with the erase region at that address. */ while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset) i++; /* As before, drop back one to point at the region in which the address actually falls */ i--; if ((ofs + len) & (regions[i].erasesize-1)) return -EINVAL; chipnum = ofs >> cfi->chipshift; adr = ofs - (chipnum << cfi->chipshift); i=first; while(len) { int size = regions[i].erasesize; ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk); if (ret) return ret; adr += size; ofs += size; len -= size; if (ofs == regions[i].offset + size * regions[i].numblocks) i++; if (adr >> cfi->chipshift) { adr = 0; chipnum++; if (chipnum >= cfi->numchips) break; } } return 0; } EXPORT_SYMBOL(cfi_varsize_frob); MODULE_LICENSE("GPL");
gpl-2.0
supersonicninja/L01FJBKERNEL
arch/sh/kernel/module.c
9842
3741
/* Kernel module help for SH. SHcompact version by Kaz Kojima and Paul Mundt. SHmedia bits: Copyright 2004 SuperH (UK) Ltd Author: Richard Curnow Based on the sh version, and on code from the sh64-specific parts of modutils, originally written by Richard Curnow and Ben Gaster. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/bug.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <asm/unaligned.h> #include <asm/dwarf.h> int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; Elf32_Addr relocation; uint32_t *location; uint32_t value; pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); relocation = sym->st_value + rel[i].r_addend; #ifdef CONFIG_SUPERH64 /* For text addresses, bit2 of the st_other field indicates * whether the symbol is SHmedia (1) or SHcompact (0). If * SHmedia, the LSB of the symbol needs to be asserted * for the CPU to be in SHmedia mode when it starts executing * the branch target. */ relocation |= !!(sym->st_other & 4); #endif switch (ELF32_R_TYPE(rel[i].r_info)) { case R_SH_NONE: break; case R_SH_DIR32: value = get_unaligned(location); value += relocation; put_unaligned(value, location); break; case R_SH_REL32: relocation = (relocation - (Elf32_Addr) location); value = get_unaligned(location); value += relocation; put_unaligned(value, location); break; case R_SH_IMM_LOW16: *location = (*location & ~0x3fffc00) | ((relocation & 0xffff) << 10); break; case R_SH_IMM_MEDLOW16: *location = (*location & ~0x3fffc00) | (((relocation >> 16) & 0xffff) << 10); break; case R_SH_IMM_LOW16_PCREL: relocation -= (Elf32_Addr) location; *location = (*location & ~0x3fffc00) | ((relocation & 0xffff) << 10); break; case R_SH_IMM_MEDLOW16_PCREL: relocation -= (Elf32_Addr) location; *location = (*location & ~0x3fffc00) | (((relocation >> 16) & 0xffff) << 10); break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { int ret = 0; ret |= module_dwarf_finalize(hdr, sechdrs, me); return ret; } void module_arch_cleanup(struct module *mod) { module_dwarf_cleanup(mod); }
gpl-2.0
iamroot9C-arm/linux
arch/x86/pci/early.c
13682
2383
#include <linux/kernel.h> #include <linux/pci.h> #include <asm/pci-direct.h> #include <asm/io.h> #include <asm/pci_x86.h> /* Direct PCI access. This is used for PCI accesses in early boot before the PCI subsystem works. */ u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) { u32 v; outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); v = inl(0xcfc); return v; } u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset) { u8 v; outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); v = inb(0xcfc + (offset&3)); return v; } u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset) { u16 v; outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); v = inw(0xcfc + (offset&2)); return v; } void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val) { outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); outl(val, 0xcfc); } void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val) { outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); outb(val, 0xcfc + (offset&3)); } void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val) { outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); outw(val, 0xcfc + (offset&2)); } int early_pci_allowed(void) { return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) == PCI_PROBE_CONF1; } void early_dump_pci_device(u8 bus, u8 slot, u8 func) { int i; int j; u32 val; printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:", bus, slot, func); for (i = 0; i < 256; i += 4) { if (!(i & 0x0f)) printk("\n %02x:",i); val = read_pci_config(bus, slot, func, i); for (j = 0; j < 4; j++) { printk(" %02x", val & 0xff); val >>= 8; } } printk("\n"); } void early_dump_pci_devices(void) { unsigned bus, slot, func; if (!early_pci_allowed()) return; for (bus = 0; bus < 256; bus++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { u32 class; u8 type; class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) continue; early_dump_pci_device(bus, slot, func); if (func == 0) { type = read_pci_config_byte(bus, slot, func, PCI_HEADER_TYPE); if (!(type & 0x80)) break; } } } } }
gpl-2.0
rneugeba/linux-stable
net/dsa/tag_lan9303.c
115
4494
/* * Copyright (C) 2017 Pengutronix, Juergen Borleis <jbe@pengutronix.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/dsa/lan9303.h> #include <linux/etherdevice.h> #include <linux/list.h> #include <linux/slab.h> #include "dsa_priv.h" /* To define the outgoing port and to discover the incoming port a regular * VLAN tag is used by the LAN9303. But its VID meaning is 'special': * * Dest MAC Src MAC TAG Type * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 | 1 2 |... * |<------->| * TAG: * |<------------->| * | 1 2 | 3 4 | * TPID VID * 0x8100 * * VID bit 3 indicates a request for an ALR lookup. * * If VID bit 3 is zero, then bits 0 and 1 specify the destination port * (0, 1, 2) or broadcast (3) or the source port (1, 2). * * VID bit 4 is used to specify if the STP port state should be overridden. * Required when no forwarding between the external ports should happen. */ #define LAN9303_TAG_LEN 4 # define LAN9303_TAG_TX_USE_ALR BIT(3) # define LAN9303_TAG_TX_STP_OVERRIDE BIT(4) # define LAN9303_TAG_RX_IGMP BIT(3) # define LAN9303_TAG_RX_STP BIT(4) # define LAN9303_TAG_RX_TRAPPED_TO_CPU (LAN9303_TAG_RX_IGMP | \ LAN9303_TAG_RX_STP) /* Decide whether to transmit using ALR lookup, or transmit directly to * port using tag. ALR learning is performed only when using ALR lookup. * If the two external ports are bridged and the frame is unicast, * then use ALR lookup to allow ALR learning on CPU port. * Otherwise transmit directly to port with STP state override. * See also: lan9303_separate_ports() and lan9303.pdf 6.4.10.1 */ static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr) { struct lan9303 *chip = dp->ds->priv; return chip->is_bridged && !is_multicast_ether_addr(dest_addr); } static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); u16 *lan9303_tag; /* insert a special VLAN tag between the MAC addresses * and the current ethertype field. */ if (skb_cow_head(skb, LAN9303_TAG_LEN) < 0) { dev_dbg(&dev->dev, "Cannot make room for the special tag. Dropping packet\n"); return NULL; } /* provide 'LAN9303_TAG_LEN' bytes additional space */ skb_push(skb, LAN9303_TAG_LEN); /* make room between MACs and Ether-Type */ memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN); lan9303_tag = (u16 *)(skb->data + 2 * ETH_ALEN); lan9303_tag[0] = htons(ETH_P_8021Q); lan9303_tag[1] = lan9303_xmit_use_arl(dp, skb->data) ? LAN9303_TAG_TX_USE_ALR : dp->index | LAN9303_TAG_TX_STP_OVERRIDE; lan9303_tag[1] = htons(lan9303_tag[1]); return skb; } static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u16 *lan9303_tag; u16 lan9303_tag1; unsigned int source_port; if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) { dev_warn_ratelimited(&dev->dev, "Dropping packet, cannot pull\n"); return NULL; } /* '->data' points into the middle of our special VLAN tag information: * * ~ MAC src | 0x81 | 0x00 | 0xyy | 0xzz | ether type * ^ * ->data */ lan9303_tag = (u16 *)(skb->data - 2); if (lan9303_tag[0] != htons(ETH_P_8021Q)) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n"); return NULL; } lan9303_tag1 = ntohs(lan9303_tag[1]); source_port = lan9303_tag1 & 0x3; skb->dev = dsa_master_find_slave(dev, 0, source_port); if (!skb->dev) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n"); return NULL; } /* remove the special VLAN tag between the MAC addresses * and the current ethertype field. */ skb_pull_rcsum(skb, 2 + 2); memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN), 2 * ETH_ALEN); skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU); return skb; } const struct dsa_device_ops lan9303_netdev_ops = { .xmit = lan9303_xmit, .rcv = lan9303_rcv, };
gpl-2.0
wisniew/Harfix2_kernel_I9300
drivers/sensorhub/ssp_sensorhub.c
115
16370
/* * Copyright (C) 2012, Samsung Electronics Co. Ltd. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "ssp.h" static const struct fast_data { char library_data[3]; } fast_data_table[] = { { { 1, 1, 7 } }, }; static ssize_t ssp_sensorhub_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct ssp_sensorhub_data *hub_data = container_of(file->private_data, struct ssp_sensorhub_data, sensorhub_device); int ret = 0; int i; u8 instruction = buf[0]; if (count == 0) { pr_err("%s: library command length err(%d)", __func__, count); return -EINVAL; } for (i = 0; i < count; i++) pr_info("%s[%d] = 0x%x", __func__, i, buf[i]); if (buf[0] == MSG2SSP_INST_LIBRARY_REMOVE) instruction = REMOVE_LIBRARY; else if (buf[0] == MSG2SSP_INST_LIBRARY_ADD) instruction = ADD_LIBRARY; if (hub_data->ssp_data->bSspShutdown) { pr_err("%s: stop sending command(no ssp_data)", __func__); return -ENOMEM; } ret = send_instruction(hub_data->ssp_data, instruction, (u8)buf[1], (u8 *)(buf+2), count-2); if (ret <= 0) pr_err("%s: send library command err(%d)", __func__, ret); /* i2c transfer fail */ if (ret == ERROR) return -EIO; /* no ack from MCU */ else if (ret == FAIL) return -EAGAIN; /* success */ else return count; } static long ssp_sensorhub_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct ssp_sensorhub_data *hub_data = container_of(file->private_data, struct ssp_sensorhub_data, sensorhub_device); int ret = 0; int i = 0; switch (cmd) { case IOCTL_READ_CONTEXT_DATA: /* for receive_msg */ if (!hub_data->large_library_length && hub_data->large_library_data == NULL) { ret = copy_to_user(argp, hub_data->first_event->library_data, hub_data->first_event->length); if (ret < 0) { pr_err("%s: send library data err(%d)", __func__, ret); complete(&hub_data->transfer_done); goto exit; } for (i = 0; i < hub_data->first_event->length; i++) { pr_info("%s[%d] = 0x%x", __func__, i, hub_data->first_event->library_data[i]); } hub_data->transfer_try = 0; complete(&hub_data->transfer_done); /* for receive_large_msg */ } else { pr_info("%s: receive_large_msg ioctl", __func__); ret = copy_to_user(argp, hub_data->large_library_data, hub_data->large_library_length); if (ret < 0) { pr_err("%s: send large library data err(%d)", __func__, ret); goto exit; } kfree(hub_data->large_library_data); hub_data->large_library_length = 0; } break; default: pr_err("%s: icotl cmd err(%d)", __func__, cmd); ret = -EINVAL; } exit: return ret; } static struct file_operations ssp_sensorhub_fops = { .owner = THIS_MODULE, .open = nonseekable_open, .write = ssp_sensorhub_write, .unlocked_ioctl = ssp_sensorhub_ioctl, }; void ssp_report_sensorhub_notice(struct ssp_data *ssp_data, char notice) { struct ssp_sensorhub_data *hub_data = ssp_data->hub_data; input_report_rel(hub_data->sensorhub_input_dev, REL_RY, notice); input_sync(hub_data->sensorhub_input_dev); if (notice == MSG2SSP_AP_STATUS_WAKEUP) pr_info("%s: wake up", __func__); else if (notice == MSG2SSP_AP_STATUS_SLEEP) pr_info("%s: sleep", __func__); else if (notice == MSG2SSP_AP_STATUS_RESET) pr_info("%s: reset", __func__); else pr_err("%s: invalid notice", __func__); } static void ssp_report_sensorhub_length(struct ssp_sensorhub_data *hub_data, int length) { input_report_rel(hub_data->sensorhub_input_dev, REL_RX, length); input_sync(hub_data->sensorhub_input_dev); pr_info("%s = %d", __func__, length); } static int ssp_sensorhub_is_fast_data(char *data, int start) { int i, j; for (i = 0; i < ARRAY_SIZE(fast_data_table); i++) { for (j = 0; j < sizeof(fast_data_table[0]); j++) { if (data[start + j] != fast_data_table[i].library_data[j]) break; } if (j == sizeof(fast_data_table[0])) return i; } return -EINVAL; } static int ssp_queue_sensorhub_events(struct ssp_sensorhub_data *hub_data, char *dataframe, int start, int end) { struct sensorhub_event *event; int length = end - start; int event_number = hub_data->event_number; int events = 0; int ret = 0; int i = 0; if (length <= 0) { pr_err("%s: library length err(%d)", __func__, length); return -EINVAL; } /* how many events in the list? */ spin_lock_bh(&hub_data->sensorhub_lock); list_for_each_entry(event, &hub_data->events_head.list, list) events++; spin_unlock_bh(&hub_data->sensorhub_lock); /* drop event if queue is full */ if (events >= LIBRARY_MAX_NUM) { pr_info("%s: queue is full", __func__); hub_data->transfer_ready++; ret = ssp_sensorhub_is_fast_data(dataframe, start); if (ret >= 0) event_number = LIBRARY_MAX_NUM + ret; else return -ENOMEM; } /* allocate memory for new event */ if (hub_data->events[event_number].library_data != NULL) kfree(hub_data->events[event_number].library_data); hub_data->events[event_number].library_data = kzalloc(length * sizeof(char), GFP_KERNEL); if (hub_data->events[event_number].library_data == NULL) { pr_err("%s: allocate memory for library data err", __func__); return -ENOMEM; } /* copy sensorhub event into queue */ while (start < end) { hub_data->events[event_number].library_data[i++] = dataframe[start++]; pr_info("%s[%d] = 0x%x", __func__, i-1, hub_data->events[event_number].library_data[i-1]); } hub_data->events[event_number].length = length; if (events <= LIBRARY_MAX_NUM) { /* add new event at the end of queue */ spin_lock_bh(&hub_data->sensorhub_lock); list_add_tail(&hub_data->events[event_number].list, &hub_data->events_head.list); if (events++ < LIBRARY_MAX_NUM) hub_data->transfer_ready = 0; spin_unlock_bh(&hub_data->sensorhub_lock); /* do not exceed max queue number */ if (hub_data->event_number++ >= LIBRARY_MAX_NUM - 1) hub_data->event_number = 0; } else { spin_lock_bh(&hub_data->sensorhub_lock); list_replace(hub_data->events_head.list.prev, &hub_data->events[event_number].list); spin_unlock_bh(&hub_data->sensorhub_lock); } pr_info("%s: total %d events", __func__, events); return events; } static int ssp_receive_large_msg(struct ssp_sensorhub_data *hub_data, u8 sub_cmd) { char send_data[2] = { 0, }; char receive_data[2] = { 0, }; char *large_msg_data; /* Nth large msg data */ int length = 0; /* length of Nth large msg */ int data_locater = 0; /* large_library_data current position */ int total_msg_number; /* total number of large msg */ int msg_number; /* current number of large msg */ int ret = 0; /* receive the first msg length */ send_data[0] = MSG2SSP_STT; send_data[1] = sub_cmd; /* receive_data(msg length) is two byte because msg is large */ ret = ssp_i2c_read(hub_data->ssp_data, send_data, 2, receive_data, 2, 0); if (ret < 0) { pr_err("%s: MSG2SSP_STT i2c err(%d)", __func__, ret); return ret; } /* get the first msg length */ length = ((unsigned int)receive_data[0] << 8) + (unsigned int)receive_data[1]; if (length < 3) { /* do not print err message with power-up */ if (sub_cmd != SUBCMD_POWEREUP) pr_err("%s: 1st large msg data not ready(length=%d)", __func__, length); return -EINVAL; } /* receive the first msg data */ send_data[0] = MSG2SSP_SRM; large_msg_data = kzalloc((length * sizeof(char)), GFP_KERNEL); ret = ssp_i2c_read(hub_data->ssp_data, send_data, 1, large_msg_data, length, 0); if (ret < 0) { pr_err("%s: receive 1st large msg err(%d)", __func__, ret); kfree(large_msg_data); return ret; } /* empty the previous large library data */ if (hub_data->large_library_length != 0) kfree(hub_data->large_library_data); /* large_msg_data[0] of the first msg: total number of large msg * large_msg_data[1-2] of the first msg: total msg length * large_msg_data[3-N] of the first msg: the first msg data itself */ total_msg_number = large_msg_data[0]; hub_data->large_library_length = (int)((unsigned int)large_msg_data[1] << 8) + (unsigned int)large_msg_data[2]; hub_data->large_library_data = kzalloc((hub_data->large_library_length * sizeof(char)), GFP_KERNEL); /* copy the fist msg data into large_library_data */ memcpy(hub_data->large_library_data, &large_msg_data[3], (length - 3) * sizeof(char)); kfree(large_msg_data); data_locater = length - 3; /* 2nd, 3rd,...Nth msg */ for (msg_number = 0; msg_number < total_msg_number; msg_number++) { /* receive Nth msg length */ send_data[0] = MSG2SSP_STT; send_data[1] = 0x81 + msg_number; /* receive_data(msg length) is two byte because msg is large */ ret = ssp_i2c_read(hub_data->ssp_data, send_data, 2, receive_data, 2, 0); if (ret < 0) { pr_err("%s: MSG2SSP_STT i2c err(%d)", __func__, ret); return ret; } /* get the Nth msg length */ length = ((unsigned int)receive_data[0] << 8) + (unsigned int)receive_data[1]; if (length <= 0) { pr_err("%s: %dth large msg data not ready(length=%d)", __func__, msg_number + 2, length); return -EINVAL; } large_msg_data = kzalloc((length * sizeof(char)), GFP_KERNEL); /* receive Nth msg data */ send_data[0] = MSG2SSP_SRM; ret = ssp_i2c_read(hub_data->ssp_data, send_data, 1, large_msg_data, length, 0); if (ret < 0) { pr_err("%s: recieve %dth large msg err(%d)", __func__, msg_number + 2, ret); kfree(large_msg_data); return ret; } /* copy(append) Nth msg data into large_library_data */ memcpy(&hub_data->large_library_data[data_locater], large_msg_data, length * sizeof(char)); data_locater += length; kfree(large_msg_data); } return hub_data->large_library_length; } static int ssp_senosrhub_thread_func(void *arg) { struct ssp_sensorhub_data *hub_data = (struct ssp_sensorhub_data *)arg; struct sensorhub_event *event; int events = 0; int ret = 0; while (!kthread_should_stop()) { /* run if only event queue is not empty */ wait_event_interruptible(hub_data->sensorhub_waitqueue, kthread_should_stop() || !list_empty(&hub_data->events_head.list)); /* exit thread if kthread should stop */ if (unlikely(kthread_should_stop())) { pr_info("%s: kthread_stop()", __func__); break; } /* exit thread * if user does not get data with consecutive trials */ if (unlikely(hub_data->transfer_try++ >= LIBRARY_MAX_TRY)) { pr_err("%s: user does not get data", __func__); break; } /* report sensorhub event to user */ if (hub_data->transfer_ready == 0) { /* first in first out */ hub_data->first_event = list_first_entry(&hub_data->events_head.list, struct sensorhub_event, list); if (IS_ERR(hub_data->first_event)) { pr_err("%s: first event err(%ld)", __func__, PTR_ERR(hub_data->first_event)); continue; } /* report sensorhub event to user */ ssp_report_sensorhub_length(hub_data, hub_data->first_event->length); wake_lock_timeout(&hub_data->sensorhub_wake_lock, 5*HZ); hub_data->transfer_ready++; } /* wait until user gets data */ ret = wait_for_completion_timeout(&hub_data->transfer_done, 3*HZ); if (ret == 0) { pr_err("%s: wait timed out", __func__); hub_data->transfer_ready = 0; } else if (ret < 0) { pr_err("%s: wait_for_completion_timeout err(%d)", __func__, ret); } /* remove first event only if transfer succeed */ if (hub_data->transfer_try == 0) { /* remove first event */ spin_lock_bh(&hub_data->sensorhub_lock); if (!list_empty(&hub_data->events_head.list)) list_del(&hub_data->first_event->list); hub_data->transfer_ready = 0; /* how many events in the list? */ events = 0; list_for_each_entry(event, &hub_data->events_head.list, list) events++; spin_unlock_bh(&hub_data->sensorhub_lock); pr_info("%s: %d events remain", __func__, events); continue; } /* throw away extra events */ if (hub_data->transfer_ready > EVENT_WAIT_COUNT) hub_data->transfer_ready = 0; usleep_range(10000, 10000); } pr_info("%s: exit", __func__); return ret; } int ssp_handle_sensorhub_data(struct ssp_data *ssp_data, char *dataframe, int start, int end) { struct ssp_sensorhub_data *hub_data = ssp_data->hub_data; /* add new sensorhub event into queue */ int ret = ssp_queue_sensorhub_events(hub_data, dataframe, start, end); wake_up(&hub_data->sensorhub_waitqueue); return ret; } int ssp_handle_sensorhub_large_data(struct ssp_data *ssp_data, u8 sub_cmd) { struct ssp_sensorhub_data *hub_data = ssp_data->hub_data; /* receive large size of library data */ int ret = ssp_receive_large_msg(hub_data, sub_cmd); if (ret >= 0) { ssp_report_sensorhub_length(hub_data, hub_data->large_library_length); wake_lock_timeout(&hub_data->sensorhub_wake_lock, 3*HZ); } else { pr_err("%s: ssp_receive_large_msg err(%d)", __func__, ret); } return ret; } int ssp_initialize_sensorhub(struct ssp_data *ssp_data) { struct ssp_sensorhub_data *hub_data; int ret; hub_data = kzalloc(sizeof(*hub_data), GFP_KERNEL); if (!hub_data) { pr_err("%s: failed to allocate memory for sensorhub data", __func__); return -ENOMEM; } hub_data->ssp_data = ssp_data; ssp_data->hub_data = hub_data; /* allocate sensorhub input devices */ hub_data->sensorhub_input_dev = input_allocate_device(); if (!hub_data->sensorhub_input_dev) { pr_err("%s: allocate sensorhub input devices err", __func__); ret = -ENOMEM; goto err_input_allocate_device_sensorhub; } wake_lock_init(&hub_data->sensorhub_wake_lock, WAKE_LOCK_SUSPEND, "sensorhub_wake_lock"); INIT_LIST_HEAD(&hub_data->events_head.list); init_waitqueue_head(&hub_data->sensorhub_waitqueue); init_completion(&hub_data->transfer_done); spin_lock_init(&hub_data->sensorhub_lock); ret = input_register_device(hub_data->sensorhub_input_dev); if (ret < 0) { pr_err("%s: could not register sensorhub input device(%d)", __func__, ret); input_free_device(hub_data->sensorhub_input_dev); goto err_input_register_device_sensorhub; } hub_data->sensorhub_input_dev->name = "ssp_context"; input_set_drvdata(hub_data->sensorhub_input_dev, hub_data); input_set_capability(hub_data->sensorhub_input_dev, EV_REL, REL_RX); input_set_capability(hub_data->sensorhub_input_dev, EV_REL, REL_RY); /* create sensorhub device node */ hub_data->sensorhub_device.minor = MISC_DYNAMIC_MINOR; hub_data->sensorhub_device.name = "ssp_sensorhub"; hub_data->sensorhub_device.fops = &ssp_sensorhub_fops; ret = misc_register(&hub_data->sensorhub_device); if (ret < 0) { pr_err("%s: misc_register() failed", __func__); goto err_misc_register; } hub_data->sensorhub_task = kthread_run(ssp_senosrhub_thread_func, (void *)hub_data, "ssp_sensorhub_task"); if (IS_ERR(hub_data->sensorhub_task)) { ret = PTR_ERR(hub_data->sensorhub_task); goto err_kthread_create; } return 0; err_kthread_create: misc_deregister(&hub_data->sensorhub_device); err_misc_register: input_unregister_device(hub_data->sensorhub_input_dev); err_input_register_device_sensorhub: complete_all(&hub_data->transfer_done); wake_lock_destroy(&hub_data->sensorhub_wake_lock); err_input_allocate_device_sensorhub: kfree(hub_data); return ret; } void ssp_remove_sensorhub(struct ssp_data *ssp_data) { struct ssp_sensorhub_data *hub_data = ssp_data->hub_data; ssp_sensorhub_fops.write = NULL; ssp_sensorhub_fops.unlocked_ioctl = NULL; misc_deregister(&hub_data->sensorhub_device); input_unregister_device(hub_data->sensorhub_input_dev); wake_lock_destroy(&hub_data->sensorhub_wake_lock); complete_all(&hub_data->transfer_done); if (hub_data->sensorhub_task) kthread_stop(hub_data->sensorhub_task); kfree(hub_data); } MODULE_DESCRIPTION("Samsung Sensor Platform(SSP) sensorhub driver"); MODULE_AUTHOR("Samsung Electronics"); MODULE_LICENSE("GPL");
gpl-2.0
shengzhou/linux
drivers/usb/serial/mos7840.c
371
72666
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Clean ups from Moschip version and a few ioctl implementations by: * Paul B Schroeder <pschroeder "at" uplogix "dot" com> * * Originally based on drivers/usb/serial/io_edgeport.c which is: * Copyright (C) 2000 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/uaccess.h> #define DRIVER_DESC "Moschip 7840/7820 USB Serial Driver" /* * 16C50 UART register defines */ #define LCR_BITS_5 0x00 /* 5 bits/char */ #define LCR_BITS_6 0x01 /* 6 bits/char */ #define LCR_BITS_7 0x02 /* 7 bits/char */ #define LCR_BITS_8 0x03 /* 8 bits/char */ #define LCR_BITS_MASK 0x03 /* Mask for bits/char field */ #define LCR_STOP_1 0x00 /* 1 stop bit */ #define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */ #define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */ #define LCR_STOP_MASK 0x04 /* Mask for stop bits field */ #define LCR_PAR_NONE 0x00 /* No parity */ #define LCR_PAR_ODD 0x08 /* Odd parity */ #define LCR_PAR_EVEN 0x18 /* Even parity */ #define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */ #define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */ #define LCR_PAR_MASK 0x38 /* Mask for parity field */ #define LCR_SET_BREAK 0x40 /* Set Break condition */ #define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */ #define MCR_DTR 0x01 /* Assert DTR */ #define MCR_RTS 0x02 /* Assert RTS */ #define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */ #define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */ #define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */ #define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */ #define MOS7840_MSR_CTS 0x10 /* Current state of CTS */ #define MOS7840_MSR_DSR 0x20 /* Current state of DSR */ #define MOS7840_MSR_RI 0x40 /* Current state of RI */ #define MOS7840_MSR_CD 0x80 /* Current state of CD */ /* * Defines used for sending commands to port */ #define MOS_WDR_TIMEOUT 5000 /* default urb timeout */ #define MOS_PORT1 0x0200 #define MOS_PORT2 0x0300 #define MOS_VENREG 0x0000 #define MOS_MAX_PORT 0x02 #define MOS_WRITE 0x0E #define MOS_READ 0x0D /* Requests */ #define MCS_RD_RTYPE 0xC0 #define MCS_WR_RTYPE 0x40 #define MCS_RDREQ 0x0D #define MCS_WRREQ 0x0E #define MCS_CTRL_TIMEOUT 500 #define VENDOR_READ_LENGTH (0x01) #define MAX_NAME_LEN 64 #define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */ #define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */ /* For higher baud Rates use TIOCEXBAUD */ #define TIOCEXBAUD 0x5462 /* vendor id and device id defines */ /* The native mos7840/7820 component */ #define USB_VENDOR_ID_MOSCHIP 0x9710 #define MOSCHIP_DEVICE_ID_7840 0x7840 #define MOSCHIP_DEVICE_ID_7820 0x7820 #define MOSCHIP_DEVICE_ID_7810 0x7810 /* The native component can have its vendor/device id's overridden * in vendor-specific implementations. Such devices can be handled * by making a change here, in id_table. */ #define USB_VENDOR_ID_BANDB 0x0856 #define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 #define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00 #define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 #define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01 #define BANDB_DEVICE_ID_US9ML2_2 0xAC29 #define BANDB_DEVICE_ID_US9ML2_4 0xAC30 #define BANDB_DEVICE_ID_USPTL4_2 0xAC31 #define BANDB_DEVICE_ID_USPTL4_4 0xAC32 #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 #define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 * ATEN UC2322 device using Moschip MCS7820 */ #define USB_VENDOR_ID_ATENINTL 0x0557 #define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2322 0x7820 /* Interrupt Routine Defines */ #define SERIAL_IIR_RLS 0x06 #define SERIAL_IIR_MS 0x00 /* * Emulation of the bit mask on the LINE STATUS REGISTER. */ #define SERIAL_LSR_DR 0x0001 #define SERIAL_LSR_OE 0x0002 #define SERIAL_LSR_PE 0x0004 #define SERIAL_LSR_FE 0x0008 #define SERIAL_LSR_BI 0x0010 #define MOS_MSR_DELTA_CTS 0x10 #define MOS_MSR_DELTA_DSR 0x20 #define MOS_MSR_DELTA_RI 0x40 #define MOS_MSR_DELTA_CD 0x80 /* Serial Port register Address */ #define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01)) #define FIFO_CONTROL_REGISTER ((__u16)(0x02)) #define LINE_CONTROL_REGISTER ((__u16)(0x03)) #define MODEM_CONTROL_REGISTER ((__u16)(0x04)) #define LINE_STATUS_REGISTER ((__u16)(0x05)) #define MODEM_STATUS_REGISTER ((__u16)(0x06)) #define SCRATCH_PAD_REGISTER ((__u16)(0x07)) #define DIVISOR_LATCH_LSB ((__u16)(0x00)) #define DIVISOR_LATCH_MSB ((__u16)(0x01)) #define CLK_MULTI_REGISTER ((__u16)(0x02)) #define CLK_START_VALUE_REGISTER ((__u16)(0x03)) #define GPIO_REGISTER ((__u16)(0x07)) #define SERIAL_LCR_DLAB ((__u16)(0x0080)) /* * URB POOL related defines */ #define NUM_URBS 16 /* URB Count */ #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ /* LED on/off milliseconds*/ #define LED_ON_MS 500 #define LED_OFF_MS 500 enum mos7840_flag { MOS7840_FLAG_CTRL_BUSY, MOS7840_FLAG_LED_BUSY, }; static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); /* This structure holds all of the local port information */ struct moschip_port { int port_num; /*Actual port number in the device(1,2,etc) */ struct urb *write_urb; /* write URB for this port */ struct urb *read_urb; /* read URB for this port */ __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ char open; char open_ports; struct usb_serial_port *port; /* loop back to the owner of this object */ /* Offsets */ __u8 SpRegOffset; __u8 ControlRegOffset; __u8 DcrRegOffset; /* for processing control URBS in interrupt context */ struct urb *control_urb; struct usb_ctrlrequest *dr; char *ctrl_buf; int MsrLsr; spinlock_t pool_lock; struct urb *write_urb_pool[NUM_URBS]; char busy[NUM_URBS]; bool read_urb_busy; /* For device(s) with LED indicator */ bool has_led; struct timer_list led_timer1; /* Timer for LED on */ struct timer_list led_timer2; /* Timer for LED off */ struct urb *led_urb; struct usb_ctrlrequest *led_dr; unsigned long flags; }; /* * mos7840_set_reg_sync * To set the Control register by calling usb_fill_control_urb function * by passing usb_sndctrlpipe function as parameter. */ static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; val = val & 0x00ff; dev_dbg(&port->dev, "mos7840_set_reg_sync offset is %x, value %x\n", reg, val); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } /* * mos7840_get_reg_sync * To set the Uart register by calling usb_fill_control_urb function by * passing usb_rcvctrlpipe function as parameter. */ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val) { struct usb_device *dev = port->serial->dev; int ret = 0; u8 *buf; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); *val = buf[0]; dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val); kfree(buf); return ret; } /* * mos7840_set_uart_reg * To set the Uart register by calling usb_fill_control_urb function by * passing usb_sndctrlpipe function as parameter. */ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; val = val & 0x00ff; /* For the UART control registers, the application number need to be Or'ed */ if (port->serial->num_ports == 4) { val |= ((__u16)port->port_number + 1) << 8; } else { if (port->port_number == 0) { val |= ((__u16)port->port_number + 1) << 8; } else { val |= ((__u16)port->port_number + 2) << 8; } } dev_dbg(&port->dev, "%s application number is %x\n", __func__, val); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } /* * mos7840_get_uart_reg * To set the Control register by calling usb_fill_control_urb function * by passing usb_rcvctrlpipe function as parameter. */ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val) { struct usb_device *dev = port->serial->dev; int ret = 0; __u16 Wval; u8 *buf; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; /* Wval is same as application number */ if (port->serial->num_ports == 4) { Wval = ((__u16)port->port_number + 1) << 8; } else { if (port->port_number == 0) { Wval = ((__u16)port->port_number + 1) << 8; } else { Wval = ((__u16)port->port_number + 2) << 8; } } dev_dbg(&port->dev, "%s application number is %x\n", __func__, Wval); ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); *val = buf[0]; kfree(buf); return ret; } static void mos7840_dump_serial_port(struct usb_serial_port *port, struct moschip_port *mos7840_port) { dev_dbg(&port->dev, "SpRegOffset is %2x\n", mos7840_port->SpRegOffset); dev_dbg(&port->dev, "ControlRegOffset is %2x\n", mos7840_port->ControlRegOffset); dev_dbg(&port->dev, "DCRRegOffset is %2x\n", mos7840_port->DcrRegOffset); } /************************************************************************/ /************************************************************************/ /* I N T E R F A C E F U N C T I O N S */ /* I N T E R F A C E F U N C T I O N S */ /************************************************************************/ /************************************************************************/ static inline void mos7840_set_port_private(struct usb_serial_port *port, struct moschip_port *data) { usb_set_serial_port_data(port, (void *)data); } static inline struct moschip_port *mos7840_get_port_private(struct usb_serial_port *port) { return (struct moschip_port *)usb_get_serial_port_data(port); } static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr) { struct moschip_port *mos7840_port; struct async_icount *icount; mos7840_port = port; if (new_msr & (MOS_MSR_DELTA_CTS | MOS_MSR_DELTA_DSR | MOS_MSR_DELTA_RI | MOS_MSR_DELTA_CD)) { icount = &mos7840_port->port->icount; /* update input line counters */ if (new_msr & MOS_MSR_DELTA_CTS) icount->cts++; if (new_msr & MOS_MSR_DELTA_DSR) icount->dsr++; if (new_msr & MOS_MSR_DELTA_CD) icount->dcd++; if (new_msr & MOS_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&port->port->port.delta_msr_wait); } } static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr) { struct async_icount *icount; if (new_lsr & SERIAL_LSR_BI) { /* * Parity and Framing errors only count if they * occur exclusive of a break being * received. */ new_lsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI); } /* update input line counters */ icount = &port->port->icount; if (new_lsr & SERIAL_LSR_BI) icount->brk++; if (new_lsr & SERIAL_LSR_OE) icount->overrun++; if (new_lsr & SERIAL_LSR_PE) icount->parity++; if (new_lsr & SERIAL_LSR_FE) icount->frame++; } /************************************************************************/ /************************************************************************/ /* U S B C A L L B A C K F U N C T I O N S */ /* U S B C A L L B A C K F U N C T I O N S */ /************************************************************************/ /************************************************************************/ static void mos7840_control_callback(struct urb *urb) { unsigned char *data; struct moschip_port *mos7840_port; struct device *dev = &urb->dev->dev; __u8 regval = 0x0; int status = urb->status; mos7840_port = urb->context; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); goto out; default: dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); goto out; } dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, mos7840_port->MsrLsr, mos7840_port->port_num); data = urb->transfer_buffer; regval = (__u8) data[0]; dev_dbg(dev, "%s data is %x\n", __func__, regval); if (mos7840_port->MsrLsr == 0) mos7840_handle_new_msr(mos7840_port, regval); else if (mos7840_port->MsrLsr == 1) mos7840_handle_new_lsr(mos7840_port, regval); out: clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags); } static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, __u16 *val) { struct usb_device *dev = mcs->port->serial->dev; struct usb_ctrlrequest *dr = mcs->dr; unsigned char *buffer = mcs->ctrl_buf; int ret; if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags)) return -EBUSY; dr->bRequestType = MCS_RD_RTYPE; dr->bRequest = MCS_RDREQ; dr->wValue = cpu_to_le16(Wval); /* 0 */ dr->wIndex = cpu_to_le16(reg); dr->wLength = cpu_to_le16(2); usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0), (unsigned char *)dr, buffer, 2, mos7840_control_callback, mcs); mcs->control_urb->transfer_buffer_length = 2; ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC); if (ret) clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags); return ret; } static void mos7840_set_led_callback(struct urb *urb) { switch (urb->status) { case 0: /* Success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* This urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n", __func__, urb->status); break; default: dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, urb->status); } } static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval, __u16 reg) { struct usb_device *dev = mcs->port->serial->dev; struct usb_ctrlrequest *dr = mcs->led_dr; dr->bRequestType = MCS_WR_RTYPE; dr->bRequest = MCS_WRREQ; dr->wValue = cpu_to_le16(wval); dr->wIndex = cpu_to_le16(reg); dr->wLength = cpu_to_le16(0); usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0), (unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL); usb_submit_urb(mcs->led_urb, GFP_ATOMIC); } static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } static void mos7840_led_off(unsigned long arg) { struct moschip_port *mcs = (struct moschip_port *) arg; /* Turn off LED */ mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER); mod_timer(&mcs->led_timer2, jiffies + msecs_to_jiffies(LED_OFF_MS)); } static void mos7840_led_flag_off(unsigned long arg) { struct moschip_port *mcs = (struct moschip_port *) arg; clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags); } static void mos7840_led_activity(struct usb_serial_port *port) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags)) return; mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER); mod_timer(&mos7840_port->led_timer1, jiffies + msecs_to_jiffies(LED_ON_MS)); } /***************************************************************************** * mos7840_interrupt_callback * this is the callback function for when we have received data on the * interrupt endpoint. *****************************************************************************/ static void mos7840_interrupt_callback(struct urb *urb) { int result; int length; struct moschip_port *mos7840_port; struct usb_serial *serial; __u16 Data; unsigned char *data; __u8 sp[5], st; int i, rv = 0; __u16 wval, wreg = 0; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } length = urb->actual_length; data = urb->transfer_buffer; serial = urb->context; /* Moschip get 5 bytes * Byte 1 IIR Port 1 (port.number is 0) * Byte 2 IIR Port 2 (port.number is 1) * Byte 3 IIR Port 3 (port.number is 2) * Byte 4 IIR Port 4 (port.number is 3) * Byte 5 FIFO status for both */ if (length && length > 5) { dev_dbg(&urb->dev->dev, "%s", "Wrong data !!!\n"); return; } sp[0] = (__u8) data[0]; sp[1] = (__u8) data[1]; sp[2] = (__u8) data[2]; sp[3] = (__u8) data[3]; st = (__u8) data[4]; for (i = 0; i < serial->num_ports; i++) { mos7840_port = mos7840_get_port_private(serial->port[i]); wval = ((__u16)serial->port[i]->port_number + 1) << 8; if (mos7840_port->open) { if (sp[i] & 0x01) { dev_dbg(&urb->dev->dev, "SP%d No Interrupt !!!\n", i); } else { switch (sp[i] & 0x0f) { case SERIAL_IIR_RLS: dev_dbg(&urb->dev->dev, "Serial Port %d: Receiver status error or \n", i); dev_dbg(&urb->dev->dev, "address bit detected in 9-bit mode\n"); mos7840_port->MsrLsr = 1; wreg = LINE_STATUS_REGISTER; break; case SERIAL_IIR_MS: dev_dbg(&urb->dev->dev, "Serial Port %d: Modem status change\n", i); mos7840_port->MsrLsr = 0; wreg = MODEM_STATUS_REGISTER; break; } rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); } } } if (!(rv < 0)) /* the completion handler for the control urb will resubmit */ return; exit: result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&urb->dev->dev, "%s - Error %d submitting interrupt urb\n", __func__, result); } } static int mos7840_port_paranoia_check(struct usb_serial_port *port, const char *function) { if (!port) { pr_debug("%s - port == NULL\n", function); return -1; } if (!port->serial) { pr_debug("%s - port->serial == NULL\n", function); return -1; } return 0; } /* Inline functions to check the sanity of a pointer that is passed to us */ static int mos7840_serial_paranoia_check(struct usb_serial *serial, const char *function) { if (!serial) { pr_debug("%s - serial == NULL\n", function); return -1; } if (!serial->type) { pr_debug("%s - serial->type == NULL!\n", function); return -1; } return 0; } static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port, const char *function) { /* if no port was specified, or it fails a paranoia check */ if (!port || mos7840_port_paranoia_check(port, function) || mos7840_serial_paranoia_check(port->serial, function)) { /* then say that we don't have a valid usb_serial thing, * which will end up genrating -ENODEV return values */ return NULL; } return port->serial; } /***************************************************************************** * mos7840_bulk_in_callback * this is the callback function for when we have received data on the * bulk in endpoint. *****************************************************************************/ static void mos7840_bulk_in_callback(struct urb *urb) { int retval; unsigned char *data; struct usb_serial *serial; struct usb_serial_port *port; struct moschip_port *mos7840_port; int status = urb->status; mos7840_port = urb->context; if (!mos7840_port) return; if (status) { dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status); mos7840_port->read_urb_busy = false; return; } port = mos7840_port->port; if (mos7840_port_paranoia_check(port, __func__)) { mos7840_port->read_urb_busy = false; return; } serial = mos7840_get_usb_serial(port, __func__); if (!serial) { mos7840_port->read_urb_busy = false; return; } data = urb->transfer_buffer; usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); if (urb->actual_length) { struct tty_port *tport = &mos7840_port->port->port; tty_insert_flip_string(tport, data, urb->actual_length); tty_flip_buffer_push(tport); port->icount.rx += urb->actual_length; dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx); } if (!mos7840_port->read_urb) { dev_dbg(&port->dev, "%s", "URB KILLED !!!\n"); mos7840_port->read_urb_busy = false; return; } if (mos7840_port->has_led) mos7840_led_activity(port); mos7840_port->read_urb_busy = true; retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); if (retval) { dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, retval = %d\n", retval); mos7840_port->read_urb_busy = false; } } /***************************************************************************** * mos7840_bulk_out_data_callback * this is the callback function for when we have finished sending * serial data on the bulk out endpoint. *****************************************************************************/ static void mos7840_bulk_out_data_callback(struct urb *urb) { struct moschip_port *mos7840_port; struct usb_serial_port *port; int status = urb->status; int i; mos7840_port = urb->context; port = mos7840_port->port; spin_lock(&mos7840_port->pool_lock); for (i = 0; i < NUM_URBS; i++) { if (urb == mos7840_port->write_urb_pool[i]) { mos7840_port->busy[i] = 0; break; } } spin_unlock(&mos7840_port->pool_lock); if (status) { dev_dbg(&port->dev, "nonzero write bulk status received:%d\n", status); return; } if (mos7840_port_paranoia_check(port, __func__)) return; if (mos7840_port->open) tty_port_tty_wakeup(&port->port); } /************************************************************************/ /* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */ /************************************************************************/ /***************************************************************************** * mos7840_open * this function is called by the tty driver when a port is opened * If successful, we return 0 * Otherwise we return a negative error number. *****************************************************************************/ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) { int response; int j; struct usb_serial *serial; struct urb *urb; __u16 Data; int status; struct moschip_port *mos7840_port; struct moschip_port *port0; if (mos7840_port_paranoia_check(port, __func__)) return -ENODEV; serial = port->serial; if (mos7840_serial_paranoia_check(serial, __func__)) return -ENODEV; mos7840_port = mos7840_get_port_private(port); port0 = mos7840_get_port_private(serial->port[0]); if (mos7840_port == NULL || port0 == NULL) return -ENODEV; usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe); port0->open_ports++; /* Initialising the write urb pool */ for (j = 0; j < NUM_URBS; ++j) { urb = usb_alloc_urb(0, GFP_KERNEL); mos7840_port->write_urb_pool[j] = urb; if (!urb) continue; urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); if (!urb->transfer_buffer) { usb_free_urb(urb); mos7840_port->write_urb_pool[j] = NULL; continue; } } /***************************************************************************** * Initialize MCS7840 -- Write Init values to corresponding Registers * * Register Index * 1 : IER * 2 : FCR * 3 : LCR * 4 : MCR * * 0x08 : SP1/2 Control Reg *****************************************************************************/ /* NEED to check the following Block */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "Reading Spreg failed\n"); goto err; } Data |= 0x80; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "writing Spreg failed\n"); goto err; } Data &= ~0x80; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "writing Spreg failed\n"); goto err; } /* End of block to be checked */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "Reading Controlreg failed\n"); goto err; } Data |= 0x08; /* Driver done bit */ Data |= 0x20; /* rx_disable */ status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "writing Controlreg failed\n"); goto err; } /* do register settings here */ /* Set all regs to the device default values. */ /*********************************** * First Disable all interrupts. ***********************************/ Data = 0x00; status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "disabling interrupts failed\n"); goto err; } /* Set FIFO_CONTROL_REGISTER to the default value */ Data = 0x00; status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); goto err; } Data = 0xcf; status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); goto err; } Data = 0x03; status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); mos7840_port->shadowLCR = Data; Data = 0x0b; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); mos7840_port->shadowMCR = Data; Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); mos7840_port->shadowLCR = Data; Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); Data = 0x0c; status = mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data); Data = 0x0; status = mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data); Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); Data = Data & ~SERIAL_LCR_DLAB; status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); mos7840_port->shadowLCR = Data; /* clearing Bulkin and Bulkout Fifo */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); Data = Data | 0x0c; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); Data = Data & ~0x0c; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); /* Finally enable all interrupts */ Data = 0x0c; status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); /* clearing rx_disable */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); Data = Data & ~0x20; status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); /* rx_negate */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); Data = Data | 0x10; status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); /* Check to see if we've set up our endpoint info yet * * (can't set it up in mos7840_startup as the structures * * were not set up at that time.) */ if (port0->open_ports == 1) { if (serial->port[0]->interrupt_in_buffer == NULL) { /* set up interrupt urb */ usb_fill_int_urb(serial->port[0]->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, serial->port[0]->interrupt_in_endpointAddress), serial->port[0]->interrupt_in_buffer, serial->port[0]->interrupt_in_urb-> transfer_buffer_length, mos7840_interrupt_callback, serial, serial->port[0]->interrupt_in_urb->interval); /* start interrupt read for mos7840 * * will continue as long as mos7840 is connected */ response = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); if (response) { dev_err(&port->dev, "%s - Error %d submitting " "interrupt urb\n", __func__, response); } } } /* see if we've set up our endpoint info yet * * (can't set it up in mos7840_startup as the * * structures were not set up at that time.) */ dev_dbg(&port->dev, "port number is %d\n", port->port_number); dev_dbg(&port->dev, "minor number is %d\n", port->minor); dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress); dev_dbg(&port->dev, "BulkOut endpoint is %d\n", port->bulk_out_endpointAddress); dev_dbg(&port->dev, "Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress); dev_dbg(&port->dev, "port's number in the device is %d\n", mos7840_port->port_num); mos7840_port->read_urb = port->read_urb; /* set up our bulk in urb */ if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) { usb_fill_bulk_urb(mos7840_port->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, (port->bulk_in_endpointAddress) + 2), port->bulk_in_buffer, mos7840_port->read_urb->transfer_buffer_length, mos7840_bulk_in_callback, mos7840_port); } else { usb_fill_bulk_urb(mos7840_port->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress), port->bulk_in_buffer, mos7840_port->read_urb->transfer_buffer_length, mos7840_bulk_in_callback, mos7840_port); } dev_dbg(&port->dev, "%s: bulkin endpoint is %d\n", __func__, port->bulk_in_endpointAddress); mos7840_port->read_urb_busy = true; response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL); if (response) { dev_err(&port->dev, "%s - Error %d submitting control urb\n", __func__, response); mos7840_port->read_urb_busy = false; } /* initialize our port settings */ /* Must set to enable ints! */ mos7840_port->shadowMCR = MCR_MASTER_IE; /* send a open port command */ mos7840_port->open = 1; /* mos7840_change_port_settings(mos7840_port,old_termios); */ return 0; err: for (j = 0; j < NUM_URBS; ++j) { urb = mos7840_port->write_urb_pool[j]; if (!urb) continue; kfree(urb->transfer_buffer); usb_free_urb(urb); } return status; } /***************************************************************************** * mos7840_chars_in_buffer * this function is called by the tty driver when it wants to know how many * bytes of data we currently have outstanding in the port (data that has * been written, but hasn't made it out the port yet) * If successful, we return the number of bytes left to be written in the * system, * Otherwise we return zero. *****************************************************************************/ static int mos7840_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int i; int chars = 0; unsigned long flags; struct moschip_port *mos7840_port; if (mos7840_port_paranoia_check(port, __func__)) return 0; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return 0; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (mos7840_port->busy[i]) { struct urb *urb = mos7840_port->write_urb_pool[i]; chars += urb->transfer_buffer_length; } } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } /***************************************************************************** * mos7840_close * this function is called by the tty driver when a port is closed *****************************************************************************/ static void mos7840_close(struct usb_serial_port *port) { struct usb_serial *serial; struct moschip_port *mos7840_port; struct moschip_port *port0; int j; __u16 Data; if (mos7840_port_paranoia_check(port, __func__)) return; serial = mos7840_get_usb_serial(port, __func__); if (!serial) return; mos7840_port = mos7840_get_port_private(port); port0 = mos7840_get_port_private(serial->port[0]); if (mos7840_port == NULL || port0 == NULL) return; for (j = 0; j < NUM_URBS; ++j) usb_kill_urb(mos7840_port->write_urb_pool[j]); /* Freeing Write URBs */ for (j = 0; j < NUM_URBS; ++j) { if (mos7840_port->write_urb_pool[j]) { kfree(mos7840_port->write_urb_pool[j]->transfer_buffer); usb_free_urb(mos7840_port->write_urb_pool[j]); } } usb_kill_urb(mos7840_port->write_urb); usb_kill_urb(mos7840_port->read_urb); mos7840_port->read_urb_busy = false; port0->open_ports--; dev_dbg(&port->dev, "%s in close%d\n", __func__, port0->open_ports); if (port0->open_ports == 0) { if (serial->port[0]->interrupt_in_urb) { dev_dbg(&port->dev, "Shutdown interrupt_in_urb\n"); usb_kill_urb(serial->port[0]->interrupt_in_urb); } } if (mos7840_port->write_urb) { /* if this urb had a transfer buffer already (old tx) free it */ kfree(mos7840_port->write_urb->transfer_buffer); usb_free_urb(mos7840_port->write_urb); } Data = 0x0; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); Data = 0x00; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); mos7840_port->open = 0; } /***************************************************************************** * mos7840_break * this function sends a break to the port *****************************************************************************/ static void mos7840_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; unsigned char data; struct usb_serial *serial; struct moschip_port *mos7840_port; if (mos7840_port_paranoia_check(port, __func__)) return; serial = mos7840_get_usb_serial(port, __func__); if (!serial) return; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return; if (break_state == -1) data = mos7840_port->shadowLCR | LCR_SET_BREAK; else data = mos7840_port->shadowLCR & ~LCR_SET_BREAK; /* FIXME: no locking on shadowLCR anywhere in driver */ mos7840_port->shadowLCR = data; dev_dbg(&port->dev, "%s mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR); mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, mos7840_port->shadowLCR); } /***************************************************************************** * mos7840_write_room * this function is called by the tty driver when it wants to know how many * bytes of data we can accept for a specific port. * If successful, we return the amount of room that we have for this port * Otherwise we return a negative error number. *****************************************************************************/ static int mos7840_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int i; int room = 0; unsigned long flags; struct moschip_port *mos7840_port; if (mos7840_port_paranoia_check(port, __func__)) return -1; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return -1; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (!mos7840_port->busy[i]) room += URB_TRANSFER_BUFFER_SIZE; } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); room = (room == 0) ? 0 : room - URB_TRANSFER_BUFFER_SIZE + 1; dev_dbg(&mos7840_port->port->dev, "%s - returns %d\n", __func__, room); return room; } /***************************************************************************** * mos7840_write * this function is called by the tty driver when data should be written to * the port. * If successful, we return the number of bytes written, otherwise we * return a negative error number. *****************************************************************************/ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { int status; int i; int bytes_sent = 0; int transfer_size; unsigned long flags; struct moschip_port *mos7840_port; struct usb_serial *serial; struct urb *urb; /* __u16 Data; */ const unsigned char *current_position = data; unsigned char *data1; #ifdef NOTMOS7840 Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); mos7840_port->shadowLCR = Data; dev_dbg(&port->dev, "%s: LINE_CONTROL_REGISTER is %x\n", __func__, Data); dev_dbg(&port->dev, "%s: mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR); /* Data = 0x03; */ /* status = mos7840_set_uart_reg(port,LINE_CONTROL_REGISTER,Data); */ /* mos7840_port->shadowLCR=Data;//Need to add later */ Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); /* Data = 0x0c; */ /* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */ Data = 0x00; status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data); dev_dbg(&port->dev, "%s: DLL value is %x\n", __func__, Data); Data = 0x0; status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data); dev_dbg(&port->dev, "%s: DLM value is %x\n", __func__, Data); Data = Data & ~SERIAL_LCR_DLAB; dev_dbg(&port->dev, "%s: mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR); status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); #endif if (mos7840_port_paranoia_check(port, __func__)) return -1; serial = port->serial; if (mos7840_serial_paranoia_check(serial, __func__)) return -1; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return -1; /* try to find a free urb in the list */ urb = NULL; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (!mos7840_port->busy[i]) { mos7840_port->busy[i] = 1; urb = mos7840_port->write_urb_pool[i]; dev_dbg(&port->dev, "URB:%d\n", i); break; } } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); if (urb == NULL) { dev_dbg(&port->dev, "%s - no more free urbs\n", __func__); goto exit; } if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); if (!urb->transfer_buffer) goto exit; } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); memcpy(urb->transfer_buffer, current_position, transfer_size); /* fill urb with data and submit */ if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) { usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, (port->bulk_out_endpointAddress) + 2), urb->transfer_buffer, transfer_size, mos7840_bulk_out_data_callback, mos7840_port); } else { usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), urb->transfer_buffer, transfer_size, mos7840_bulk_out_data_callback, mos7840_port); } data1 = urb->transfer_buffer; dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress); if (mos7840_port->has_led) mos7840_led_activity(port); /* send it down the pipe */ status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { mos7840_port->busy[i] = 0; dev_err_console(port, "%s - usb_submit_urb(write bulk) failed " "with status = %d\n", __func__, status); bytes_sent = status; goto exit; } bytes_sent = transfer_size; port->icount.tx += transfer_size; dev_dbg(&port->dev, "icount.tx is %d:\n", port->icount.tx); exit: return bytes_sent; } /***************************************************************************** * mos7840_throttle * this function is called by the tty driver when it wants to stop the data * being read from the port. *****************************************************************************/ static void mos7840_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port; int status; if (mos7840_port_paranoia_check(port, __func__)) return; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return; if (!mos7840_port->open) { dev_dbg(&port->dev, "%s", "port not opened\n"); return; } /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = mos7840_write(tty, port, &stop_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7840_port->shadowMCR &= ~MCR_RTS; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mos7840_port->shadowMCR); if (status < 0) return; } } /***************************************************************************** * mos7840_unthrottle * this function is called by the tty driver when it wants to resume * the data being read from the port (called after mos7840_throttle is * called) *****************************************************************************/ static void mos7840_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int status; struct moschip_port *mos7840_port = mos7840_get_port_private(port); if (mos7840_port_paranoia_check(port, __func__)) return; if (mos7840_port == NULL) return; if (!mos7840_port->open) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = mos7840_write(tty, port, &start_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7840_port->shadowMCR |= MCR_RTS; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mos7840_port->shadowMCR); if (status < 0) return; } } static int mos7840_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port; unsigned int result; __u16 msr; __u16 mcr; int status; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return -ENODEV; status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr); if (status != 1) return -EIO; status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr); if (status != 1) return -EIO; result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0) | ((msr & MOS7840_MSR_CTS) ? TIOCM_CTS : 0) | ((msr & MOS7840_MSR_CD) ? TIOCM_CAR : 0) | ((msr & MOS7840_MSR_RI) ? TIOCM_RI : 0) | ((msr & MOS7840_MSR_DSR) ? TIOCM_DSR : 0); dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result); return result; } static int mos7840_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port; unsigned int mcr; int status; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return -ENODEV; /* FIXME: What locks the port registers ? */ mcr = mos7840_port->shadowMCR; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; mos7840_port->shadowMCR = mcr; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); if (status < 0) { dev_dbg(&port->dev, "setting MODEM_CONTROL_REGISTER Failed\n"); return status; } return 0; } /***************************************************************************** * mos7840_calc_baud_rate_divisor * this function calculates the proper baud rate divisor for the specified * baud rate. *****************************************************************************/ static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port, int baudRate, int *divisor, __u16 *clk_sel_val) { dev_dbg(&port->dev, "%s - %d\n", __func__, baudRate); if (baudRate <= 115200) { *divisor = 115200 / baudRate; *clk_sel_val = 0x0; } if ((baudRate > 115200) && (baudRate <= 230400)) { *divisor = 230400 / baudRate; *clk_sel_val = 0x10; } else if ((baudRate > 230400) && (baudRate <= 403200)) { *divisor = 403200 / baudRate; *clk_sel_val = 0x20; } else if ((baudRate > 403200) && (baudRate <= 460800)) { *divisor = 460800 / baudRate; *clk_sel_val = 0x30; } else if ((baudRate > 460800) && (baudRate <= 806400)) { *divisor = 806400 / baudRate; *clk_sel_val = 0x40; } else if ((baudRate > 806400) && (baudRate <= 921600)) { *divisor = 921600 / baudRate; *clk_sel_val = 0x50; } else if ((baudRate > 921600) && (baudRate <= 1572864)) { *divisor = 1572864 / baudRate; *clk_sel_val = 0x60; } else if ((baudRate > 1572864) && (baudRate <= 3145728)) { *divisor = 3145728 / baudRate; *clk_sel_val = 0x70; } return 0; #ifdef NOTMCS7840 for (i = 0; i < ARRAY_SIZE(mos7840_divisor_table); i++) { if (mos7840_divisor_table[i].BaudRate == baudrate) { *divisor = mos7840_divisor_table[i].Divisor; return 0; } } /* After trying for all the standard baud rates * * Try calculating the divisor for this baud rate */ if (baudrate > 75 && baudrate < 230400) { /* get the divisor */ custom = (__u16) (230400L / baudrate); /* Check for round off */ round1 = (__u16) (2304000L / baudrate); round = (__u16) (round1 - (custom * 10)); if (round > 4) custom++; *divisor = custom; dev_dbg(&port->dev, " Baud %d = %d\n", baudrate, custom); return 0; } dev_dbg(&port->dev, "%s", " Baud calculation Failed...\n"); return -1; #endif } /***************************************************************************** * mos7840_send_cmd_write_baud_rate * this function sends the proper command to change the baud rate of the * specified port. *****************************************************************************/ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port, int baudRate) { int divisor = 0; int status; __u16 Data; unsigned char number; __u16 clk_sel_val; struct usb_serial_port *port; if (mos7840_port == NULL) return -1; port = mos7840_port->port; if (mos7840_port_paranoia_check(port, __func__)) return -1; if (mos7840_serial_paranoia_check(port->serial, __func__)) return -1; number = mos7840_port->port->port_number; dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate); /* reset clk_uart_sel in spregOffset */ if (baudRate > 115200) { #ifdef HW_flow_control /* NOTE: need to see the pther register to modify */ /* setting h/w flow control bit to 1 */ Data = 0x2b; mos7840_port->shadowMCR = Data; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n"); return -1; } #endif } else { #ifdef HW_flow_control /* setting h/w flow control bit to 0 */ Data = 0xb; mos7840_port->shadowMCR = Data; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n"); return -1; } #endif } if (1) { /* baudRate <= 115200) */ clk_sel_val = 0x0; Data = 0x0; status = mos7840_calc_baud_rate_divisor(port, baudRate, &divisor, &clk_sel_val); status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "reading spreg failed in set_serial_baud\n"); return -1; } Data = (Data & 0x8f) | clk_sel_val; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n"); return -1; } /* Calculate the Divisor */ if (status) { dev_err(&port->dev, "%s - bad baud rate\n", __func__); return status; } /* Enable access to divisor latch */ Data = mos7840_port->shadowLCR | SERIAL_LCR_DLAB; mos7840_port->shadowLCR = Data; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); /* Write the divisor */ Data = (unsigned char)(divisor & 0xff); dev_dbg(&port->dev, "set_serial_baud Value to write DLL is %x\n", Data); mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data); Data = (unsigned char)((divisor & 0xff00) >> 8); dev_dbg(&port->dev, "set_serial_baud Value to write DLM is %x\n", Data); mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data); /* Disable access to divisor latch */ Data = mos7840_port->shadowLCR & ~SERIAL_LCR_DLAB; mos7840_port->shadowLCR = Data; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); } return status; } /***************************************************************************** * mos7840_change_port_settings * This routine is called to set the UART on the device to match * the specified new settings. *****************************************************************************/ static void mos7840_change_port_settings(struct tty_struct *tty, struct moschip_port *mos7840_port, struct ktermios *old_termios) { int baud; unsigned cflag; unsigned iflag; __u8 lData; __u8 lParity; __u8 lStop; int status; __u16 Data; struct usb_serial_port *port; struct usb_serial *serial; if (mos7840_port == NULL) return; port = mos7840_port->port; if (mos7840_port_paranoia_check(port, __func__)) return; if (mos7840_serial_paranoia_check(port->serial, __func__)) return; serial = port->serial; if (!mos7840_port->open) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } lData = LCR_BITS_8; lStop = LCR_STOP_1; lParity = LCR_PAR_NONE; cflag = tty->termios.c_cflag; iflag = tty->termios.c_iflag; /* Change the number of bits */ switch (cflag & CSIZE) { case CS5: lData = LCR_BITS_5; break; case CS6: lData = LCR_BITS_6; break; case CS7: lData = LCR_BITS_7; break; default: case CS8: lData = LCR_BITS_8; break; } /* Change the Parity bit */ if (cflag & PARENB) { if (cflag & PARODD) { lParity = LCR_PAR_ODD; dev_dbg(&port->dev, "%s - parity = odd\n", __func__); } else { lParity = LCR_PAR_EVEN; dev_dbg(&port->dev, "%s - parity = even\n", __func__); } } else { dev_dbg(&port->dev, "%s - parity = none\n", __func__); } if (cflag & CMSPAR) lParity = lParity | 0x20; /* Change the Stop bit */ if (cflag & CSTOPB) { lStop = LCR_STOP_2; dev_dbg(&port->dev, "%s - stop bits = 2\n", __func__); } else { lStop = LCR_STOP_1; dev_dbg(&port->dev, "%s - stop bits = 1\n", __func__); } /* Update the LCR with the correct value */ mos7840_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); mos7840_port->shadowLCR |= (lData | lParity | lStop); dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR); /* Disable Interrupts */ Data = 0x00; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); Data = 0x00; mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); Data = 0xcf; mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); /* Send the updated LCR value to the mos7840 */ Data = mos7840_port->shadowLCR; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); Data = 0x00b; mos7840_port->shadowMCR = Data; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); Data = 0x00b; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); /* set up the MCR register and send it to the mos7840 */ mos7840_port->shadowMCR = MCR_MASTER_IE; if (cflag & CBAUD) mos7840_port->shadowMCR |= (MCR_DTR | MCR_RTS); if (cflag & CRTSCTS) mos7840_port->shadowMCR |= (MCR_XON_ANY); else mos7840_port->shadowMCR &= ~(MCR_XON_ANY); Data = mos7840_port->shadowMCR; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ dev_dbg(&port->dev, "%s", "Picked default baud...\n"); baud = 9600; } dev_dbg(&port->dev, "%s - baud rate = %d\n", __func__, baud); status = mos7840_send_cmd_write_baud_rate(mos7840_port, baud); /* Enable Interrupts */ Data = 0x0c; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); if (mos7840_port->read_urb_busy == false) { mos7840_port->read_urb_busy = true; status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); if (status) { dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status); mos7840_port->read_urb_busy = false; } } dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, mos7840_port->shadowLCR); } /***************************************************************************** * mos7840_set_termios * this function is called by the tty driver when it wants to change * the termios structure *****************************************************************************/ static void mos7840_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { int status; unsigned int cflag; struct usb_serial *serial; struct moschip_port *mos7840_port; if (mos7840_port_paranoia_check(port, __func__)) return; serial = port->serial; if (mos7840_serial_paranoia_check(serial, __func__)) return; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return; if (!mos7840_port->open) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } dev_dbg(&port->dev, "%s", "setting termios - \n"); cflag = tty->termios.c_cflag; dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__, tty->termios.c_cflag, RELEVANT_IFLAG(tty->termios.c_iflag)); dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__, old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag)); /* change the port settings to the new ones specified */ mos7840_change_port_settings(tty, mos7840_port, old_termios); if (!mos7840_port->read_urb) { dev_dbg(&port->dev, "%s", "URB KILLED !!!!!\n"); return; } if (mos7840_port->read_urb_busy == false) { mos7840_port->read_urb_busy = true; status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); if (status) { dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status); mos7840_port->read_urb_busy = false; } } } /***************************************************************************** * mos7840_get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. *****************************************************************************/ static int mos7840_get_lsr_info(struct tty_struct *tty, unsigned int __user *value) { int count; unsigned int result = 0; count = mos7840_chars_in_buffer(tty); if (count == 0) result = TIOCSER_TEMT; if (copy_to_user(value, &result, sizeof(int))) return -EFAULT; return 0; } /***************************************************************************** * mos7840_get_serial_info * function to get information about serial port *****************************************************************************/ static int mos7840_get_serial_info(struct moschip_port *mos7840_port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (mos7840_port == NULL) return -1; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; tmp.line = mos7840_port->port->minor; tmp.port = mos7840_port->port->port_number; tmp.irq = 0; tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE; tmp.baud_base = 9600; tmp.close_delay = 5 * HZ; tmp.closing_wait = 30 * HZ; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } /***************************************************************************** * SerialIoctl * this function handles any ioctl calls to the driver *****************************************************************************/ static int mos7840_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; void __user *argp = (void __user *)arg; struct moschip_port *mos7840_port; if (mos7840_port_paranoia_check(port, __func__)) return -1; mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return -1; switch (cmd) { /* return number of bytes available */ case TIOCSERGETLSR: dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__); return mos7840_get_lsr_info(tty, argp); case TIOCGSERIAL: dev_dbg(&port->dev, "%s TIOCGSERIAL\n", __func__); return mos7840_get_serial_info(mos7840_port, argp); case TIOCSSERIAL: dev_dbg(&port->dev, "%s TIOCSSERIAL\n", __func__); break; default: break; } return -ENOIOCTLCMD; } static int mos7810_check(struct usb_serial *serial) { int i, pass_count = 0; u8 *buf; __u16 data = 0, mcr_data = 0; __u16 test_pattern = 0x55AA; int res; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return 0; /* failed to identify 7810 */ /* Store MCR setting */ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); if (res == VENDOR_READ_LENGTH) mcr_data = *buf; for (i = 0; i < 16; i++) { /* Send the 1-bit test pattern out to MCS7810 test pin */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCS_WRREQ, MCS_WR_RTYPE, (0x0300 | (((test_pattern >> i) & 0x0001) << 1)), MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); /* Read the test pattern back */ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); if (res == VENDOR_READ_LENGTH) data = *buf; /* If this is a MCS7810 device, both test patterns must match */ if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001) break; pass_count++; } /* Restore MCR setting */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCS_WRREQ, MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); kfree(buf); if (pass_count == 16) return 1; return 0; } static int mos7840_probe(struct usb_serial *serial, const struct usb_device_id *id) { u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); u8 *buf; int device_type; if (product == MOSCHIP_DEVICE_ID_7810 || product == MOSCHIP_DEVICE_ID_7820) { device_type = product; goto out; } buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); /* For a MCS7840 device GPIO0 must be set to 1 */ if (buf[0] & 0x01) device_type = MOSCHIP_DEVICE_ID_7840; else if (mos7810_check(serial)) device_type = MOSCHIP_DEVICE_ID_7810; else device_type = MOSCHIP_DEVICE_ID_7820; kfree(buf); out: usb_set_serial_data(serial, (void *)(unsigned long)device_type); return 0; } static int mos7840_calc_num_ports(struct usb_serial *serial) { int device_type = (unsigned long)usb_get_serial_data(serial); int mos7840_num_ports; mos7840_num_ports = (device_type >> 4) & 0x000F; return mos7840_num_ports; } static int mos7840_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; int device_type = (unsigned long)usb_get_serial_data(serial); struct moschip_port *mos7840_port; int status; int pnum; __u16 Data; /* we set up the pointers to the endpoints in the mos7840_open * * function, as the structures aren't created yet. */ pnum = port->port_number; dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum); mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL); if (!mos7840_port) return -ENOMEM; /* Initialize all port interrupt end point to port 0 int * endpoint. Our device has only one interrupt end point * common to all port */ mos7840_port->port = port; mos7840_set_port_private(port, mos7840_port); spin_lock_init(&mos7840_port->pool_lock); /* minor is not initialised until later by * usb-serial.c:get_free_serial() and cannot therefore be used * to index device instances */ mos7840_port->port_num = pnum + 1; dev_dbg(&port->dev, "port->minor = %d\n", port->minor); dev_dbg(&port->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num); if (mos7840_port->port_num == 1) { mos7840_port->SpRegOffset = 0x0; mos7840_port->ControlRegOffset = 0x1; mos7840_port->DcrRegOffset = 0x4; } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 4)) { mos7840_port->SpRegOffset = 0x8; mos7840_port->ControlRegOffset = 0x9; mos7840_port->DcrRegOffset = 0x16; } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 2)) { mos7840_port->SpRegOffset = 0xa; mos7840_port->ControlRegOffset = 0xb; mos7840_port->DcrRegOffset = 0x19; } else if ((mos7840_port->port_num == 3) && (serial->num_ports == 4)) { mos7840_port->SpRegOffset = 0xa; mos7840_port->ControlRegOffset = 0xb; mos7840_port->DcrRegOffset = 0x19; } else if ((mos7840_port->port_num == 4) && (serial->num_ports == 4)) { mos7840_port->SpRegOffset = 0xc; mos7840_port->ControlRegOffset = 0xd; mos7840_port->DcrRegOffset = 0x1c; } mos7840_dump_serial_port(port, mos7840_port); mos7840_set_port_private(port, mos7840_port); /* enable rx_disable bit in control register */ status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status); goto out; } else dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status); Data |= 0x08; /* setting driver done bit */ Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */ /* Data |= 0x20; //rx_disable bit */ status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status); goto out; } else dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status); /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 and 0x24 in DCR3 */ Data = 0x01; status = mos7840_set_reg_sync(port, (__u16) (mos7840_port->DcrRegOffset + 0), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status); goto out; } else dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status); Data = 0x05; status = mos7840_set_reg_sync(port, (__u16) (mos7840_port->DcrRegOffset + 1), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status); goto out; } else dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status); Data = 0x24; status = mos7840_set_reg_sync(port, (__u16) (mos7840_port->DcrRegOffset + 2), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status); goto out; } else dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status); /* write values in clkstart0x0 and clkmulti 0x20 */ Data = 0x0; status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status); goto out; } else dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status); Data = 0x20; status = mos7840_set_reg_sync(port, CLK_MULTI_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status); /* write value 0x0 to scratchpad register */ Data = 0x00; status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status); goto out; } else dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status); /* Zero Length flag register */ if ((mos7840_port->port_num != 1) && (serial->num_ports == 2)) { Data = 0xff; status = mos7840_set_reg_sync(port, (__u16) (ZLP_REG1 + ((__u16)mos7840_port->port_num)), Data); dev_dbg(&port->dev, "ZLIP offset %x\n", (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num))); if (status < 0) { dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status); goto out; } else dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status); } else { Data = 0xff; status = mos7840_set_reg_sync(port, (__u16) (ZLP_REG1 + ((__u16)mos7840_port->port_num) - 0x1), Data); dev_dbg(&port->dev, "ZLIP offset %x\n", (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1)); if (status < 0) { dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status); goto out; } else dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status); } mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL); mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL); mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf || !mos7840_port->dr) { status = -ENOMEM; goto error; } mos7840_port->has_led = false; /* Initialize LED timers */ if (device_type == MOSCHIP_DEVICE_ID_7810) { mos7840_port->has_led = true; mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL); mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr), GFP_KERNEL); if (!mos7840_port->led_urb || !mos7840_port->led_dr) { status = -ENOMEM; goto error; } init_timer(&mos7840_port->led_timer1); mos7840_port->led_timer1.function = mos7840_led_off; mos7840_port->led_timer1.expires = jiffies + msecs_to_jiffies(LED_ON_MS); mos7840_port->led_timer1.data = (unsigned long)mos7840_port; init_timer(&mos7840_port->led_timer2); mos7840_port->led_timer2.function = mos7840_led_flag_off; mos7840_port->led_timer2.expires = jiffies + msecs_to_jiffies(LED_OFF_MS); mos7840_port->led_timer2.data = (unsigned long)mos7840_port; /* Turn off LED */ mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); } out: if (pnum == serial->num_ports - 1) { /* Zero Length flag enable */ Data = 0x0f; status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data); if (status < 0) { dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT); } return 0; error: kfree(mos7840_port->led_dr); usb_free_urb(mos7840_port->led_urb); kfree(mos7840_port->dr); kfree(mos7840_port->ctrl_buf); usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port); return status; } static int mos7840_port_remove(struct usb_serial_port *port) { struct moschip_port *mos7840_port; mos7840_port = mos7840_get_port_private(port); if (mos7840_port->has_led) { /* Turn off LED */ mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); del_timer_sync(&mos7840_port->led_timer1); del_timer_sync(&mos7840_port->led_timer2); usb_kill_urb(mos7840_port->led_urb); usb_free_urb(mos7840_port->led_urb); kfree(mos7840_port->led_dr); } usb_kill_urb(mos7840_port->control_urb); usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port->ctrl_buf); kfree(mos7840_port->dr); kfree(mos7840_port); return 0; } static struct usb_serial_driver moschip7840_4port_device = { .driver = { .owner = THIS_MODULE, .name = "mos7840", }, .description = DRIVER_DESC, .id_table = id_table, .num_ports = 4, .open = mos7840_open, .close = mos7840_close, .write = mos7840_write, .write_room = mos7840_write_room, .chars_in_buffer = mos7840_chars_in_buffer, .throttle = mos7840_throttle, .unthrottle = mos7840_unthrottle, .calc_num_ports = mos7840_calc_num_ports, .probe = mos7840_probe, .ioctl = mos7840_ioctl, .set_termios = mos7840_set_termios, .break_ctl = mos7840_break, .tiocmget = mos7840_tiocmget, .tiocmset = mos7840_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .port_probe = mos7840_port_probe, .port_remove = mos7840_port_remove, .read_bulk_callback = mos7840_bulk_in_callback, .read_int_callback = mos7840_interrupt_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &moschip7840_4port_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
MattCrystal/Nine
security/keys/permission.c
371
1768
/* Key permission checking * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/security.h> #include "internal.h" int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { struct key *key; key_perm_t kperm; int ret; key = key_ref_to_ptr(key_ref); if (key->user->user_ns != cred->user->user_ns) goto use_other_perms; if (key->uid == cred->fsuid) { kperm = key->perm >> 16; goto use_these_perms; } if (key->gid != -1 && key->perm & KEY_GRP_ALL) { if (key->gid == cred->fsgid) { kperm = key->perm >> 8; goto use_these_perms; } ret = groups_search(cred->group_info, key->gid); if (ret) { kperm = key->perm >> 8; goto use_these_perms; } } use_other_perms: kperm = key->perm; use_these_perms: if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; kperm = kperm & perm & KEY_ALL; if (kperm != perm) return -EACCES; return security_key_permission(key_ref, cred, perm); } EXPORT_SYMBOL(key_task_permission); int key_validate(struct key *key) { struct timespec now; int ret = 0; if (key) { ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &key->flags) || test_bit(KEY_FLAG_DEAD, &key->flags)) goto error; ret = 0; if (key->expiry) { now = current_kernel_time(); if (now.tv_sec >= key->expiry) ret = -EKEYEXPIRED; } } error: return ret; } EXPORT_SYMBOL(key_validate);
gpl-2.0
longqiany/linux
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
371
2300
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" #include "chan.h" #include "nvsw.h" #include <nvif/ioctl.h> /******************************************************************************* * software context ******************************************************************************/ static const struct nvkm_sw_chan_func nv10_sw_chan = { }; static int nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { struct nvkm_sw_chan *chan; if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; *pobject = &chan->object; return nvkm_sw_chan_ctor(&nv10_sw_chan, sw, fifo, oclass, chan); } /******************************************************************************* * software engine/subdev functions ******************************************************************************/ static const struct nvkm_sw_func nv10_sw = { .chan_new = nv10_sw_chan_new, .sclass = { { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV10 } }, {} } }; int nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw) { return nvkm_sw_new_(&nv10_sw, device, index, psw); }
gpl-2.0
htc-msm8960/android_kernel_htc_msm8930
net/sched/ematch.c
371
10692
/* * net/sched/ematch.c Extended Match API * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== * * An extended match (ematch) is a small classification tool not worth * writing a full classifier for. Ematches can be interconnected to form * a logic expression and get attached to classifiers to extend their * functionatlity. * * The userspace part transforms the logic expressions into an array * consisting of multiple sequences of interconnected ematches separated * by markers. Precedence is implemented by a special ematch kind * referencing a sequence beyond the marker of the current sequence * causing the current position in the sequence to be pushed onto a stack * to allow the current position to be overwritten by the position referenced * in the special ematch. Matching continues in the new sequence until a * marker is reached causing the position to be restored from the stack. * * Example: * A AND (B1 OR B2) AND C AND D * * ------->-PUSH------- * -->-- / -->-- \ -->-- * / \ / / \ \ / \ * +-------+-------+-------+-------+-------+--------+ * | A AND | B AND | C AND | D END | B1 OR | B2 END | * +-------+-------+-------+-------+-------+--------+ * \ / * --------<-POP--------- * * where B is a virtual ematch referencing to sequence starting with B1. * * ========================================================================== * * How to write an ematch in 60 seconds * ------------------------------------ * * 1) Provide a matcher function: * static int my_match(struct sk_buff *skb, struct tcf_ematch *m, * struct tcf_pkt_info *info) * { * struct mydata *d = (struct mydata *) m->data; * * if (...matching goes here...) * return 1; * else * return 0; * } * * 2) Fill out a struct tcf_ematch_ops: * static struct tcf_ematch_ops my_ops = { * .kind = unique id, * .datalen = sizeof(struct mydata), * .match = my_match, * .owner = THIS_MODULE, * }; * * 3) Register/Unregister your ematch: * static int __init init_my_ematch(void) * { * return tcf_em_register(&my_ops); * } * * static void __exit exit_my_ematch(void) * { * tcf_em_unregister(&my_ops); * } * * module_init(init_my_ematch); * module_exit(exit_my_ematch); * * 4) By now you should have two more seconds left, barely enough to * open up a beer to watch the compilation going. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/pkt_cls.h> static LIST_HEAD(ematch_ops); static DEFINE_RWLOCK(ematch_mod_lock); static struct tcf_ematch_ops *tcf_em_lookup(u16 kind) { struct tcf_ematch_ops *e = NULL; read_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) { if (kind == e->kind) { if (!try_module_get(e->owner)) e = NULL; read_unlock(&ematch_mod_lock); return e; } } read_unlock(&ematch_mod_lock); return NULL; } int tcf_em_register(struct tcf_ematch_ops *ops) { int err = -EEXIST; struct tcf_ematch_ops *e; if (ops->match == NULL) return -EINVAL; write_lock(&ematch_mod_lock); list_for_each_entry(e, &ematch_ops, link) if (ops->kind == e->kind) goto errout; list_add_tail(&ops->link, &ematch_ops); err = 0; errout: write_unlock(&ematch_mod_lock); return err; } EXPORT_SYMBOL(tcf_em_register); void tcf_em_unregister(struct tcf_ematch_ops *ops) { write_lock(&ematch_mod_lock); list_del(&ops->link); write_unlock(&ematch_mod_lock); } EXPORT_SYMBOL(tcf_em_unregister); static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, int index) { return &tree->matches[index]; } static int tcf_em_validate(struct tcf_proto *tp, struct tcf_ematch_tree_hdr *tree_hdr, struct tcf_ematch *em, struct nlattr *nla, int idx) { int err = -EINVAL; struct tcf_ematch_hdr *em_hdr = nla_data(nla); int data_len = nla_len(nla) - sizeof(*em_hdr); void *data = (void *) em_hdr + sizeof(*em_hdr); if (!TCF_EM_REL_VALID(em_hdr->flags)) goto errout; if (em_hdr->kind == TCF_EM_CONTAINER) { u32 ref; if (data_len < sizeof(ref)) goto errout; ref = *(u32 *) data; if (ref >= tree_hdr->nmatches) goto errout; if (ref <= idx) goto errout; em->data = ref; } else { em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops == NULL) { err = -ENOENT; #ifdef CONFIG_MODULES __rtnl_unlock(); request_module("ematch-kind-%u", em_hdr->kind); rtnl_lock(); em->ops = tcf_em_lookup(em_hdr->kind); if (em->ops) { module_put(em->ops->owner); err = -EAGAIN; } #endif goto errout; } if (em->ops->datalen && data_len < em->ops->datalen) goto errout; if (em->ops->change) { err = em->ops->change(tp, data, data_len, em); if (err < 0) goto errout; } else if (data_len > 0) { if (em_hdr->flags & TCF_EM_SIMPLE) { if (data_len < sizeof(u32)) goto errout; em->data = *(u32 *) data; } else { void *v = kmemdup(data, data_len, GFP_KERNEL); if (v == NULL) { err = -ENOBUFS; goto errout; } em->data = (unsigned long) v; } } } em->matchid = em_hdr->matchid; em->flags = em_hdr->flags; em->datalen = data_len; err = 0; errout: return err; } static const struct nla_policy em_policy[TCA_EMATCH_TREE_MAX + 1] = { [TCA_EMATCH_TREE_HDR] = { .len = sizeof(struct tcf_ematch_tree_hdr) }, [TCA_EMATCH_TREE_LIST] = { .type = NLA_NESTED }, }; int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, struct tcf_ematch_tree *tree) { int idx, list_len, matches_len, err; struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1]; struct nlattr *rt_match, *rt_hdr, *rt_list; struct tcf_ematch_tree_hdr *tree_hdr; struct tcf_ematch *em; memset(tree, 0, sizeof(*tree)); if (!nla) return 0; err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy); if (err < 0) goto errout; err = -EINVAL; rt_hdr = tb[TCA_EMATCH_TREE_HDR]; rt_list = tb[TCA_EMATCH_TREE_LIST]; if (rt_hdr == NULL || rt_list == NULL) goto errout; tree_hdr = nla_data(rt_hdr); memcpy(&tree->hdr, tree_hdr, sizeof(*tree_hdr)); rt_match = nla_data(rt_list); list_len = nla_len(rt_list); matches_len = tree_hdr->nmatches * sizeof(*em); tree->matches = kzalloc(matches_len, GFP_KERNEL); if (tree->matches == NULL) goto errout; for (idx = 0; nla_ok(rt_match, list_len); idx++) { err = -EINVAL; if (rt_match->nla_type != (idx + 1)) goto errout_abort; if (idx >= tree_hdr->nmatches) goto errout_abort; if (nla_len(rt_match) < sizeof(struct tcf_ematch_hdr)) goto errout_abort; em = tcf_em_get_match(tree, idx); err = tcf_em_validate(tp, tree_hdr, em, rt_match, idx); if (err < 0) goto errout_abort; rt_match = nla_next(rt_match, &list_len); } if (idx != tree_hdr->nmatches) { err = -EINVAL; goto errout_abort; } err = 0; errout: return err; errout_abort: tcf_em_tree_destroy(tp, tree); return err; } EXPORT_SYMBOL(tcf_em_tree_validate); void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree) { int i; if (tree->matches == NULL) return; for (i = 0; i < tree->hdr.nmatches; i++) { struct tcf_ematch *em = tcf_em_get_match(tree, i); if (em->ops) { if (em->ops->destroy) em->ops->destroy(tp, em); else if (!tcf_em_is_simple(em)) kfree((void *) em->data); module_put(em->ops->owner); } } tree->hdr.nmatches = 0; kfree(tree->matches); tree->matches = NULL; } EXPORT_SYMBOL(tcf_em_tree_destroy); int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) { int i; u8 *tail; struct nlattr *top_start; struct nlattr *list_start; top_start = nla_nest_start(skb, tlv); if (top_start == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); if (list_start == NULL) goto nla_put_failure; tail = skb_tail_pointer(skb); for (i = 0; i < tree->hdr.nmatches; i++) { struct nlattr *match_start = (struct nlattr *)tail; struct tcf_ematch *em = tcf_em_get_match(tree, i); struct tcf_ematch_hdr em_hdr = { .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER, .matchid = em->matchid, .flags = em->flags }; NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); if (em->ops && em->ops->dump) { if (em->ops->dump(skb, em) < 0) goto nla_put_failure; } else if (tcf_em_is_container(em) || tcf_em_is_simple(em)) { u32 u = em->data; nla_put_nohdr(skb, sizeof(u), &u); } else if (em->datalen > 0) nla_put_nohdr(skb, em->datalen, (void *) em->data); tail = skb_tail_pointer(skb); match_start->nla_len = tail - (u8 *)match_start; } nla_nest_end(skb, list_start); nla_nest_end(skb, top_start); return 0; nla_put_failure: return -1; } EXPORT_SYMBOL(tcf_em_tree_dump); static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { int r = em->ops->match(skb, em, info); return tcf_em_is_inverted(em) ? !r : r; } int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree, struct tcf_pkt_info *info) { int stackp = 0, match_idx = 0, res = 0; struct tcf_ematch *cur_match; int stack[CONFIG_NET_EMATCH_STACK]; proceed: while (match_idx < tree->hdr.nmatches) { cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_is_container(cur_match)) { if (unlikely(stackp >= CONFIG_NET_EMATCH_STACK)) goto stack_overflow; stack[stackp++] = match_idx; match_idx = cur_match->data; goto proceed; } res = tcf_em_match(skb, cur_match, info); if (tcf_em_early_end(cur_match, res)) break; match_idx++; } pop_stack: if (stackp > 0) { match_idx = stack[--stackp]; cur_match = tcf_em_get_match(tree, match_idx); if (tcf_em_early_end(cur_match, res)) goto pop_stack; else { match_idx++; goto proceed; } } return res; stack_overflow: if (net_ratelimit()) pr_warning("tc ematch: local stack overflow," " increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match);
gpl-2.0
ISTweak/android_kernel_sony_fuji_hayate
drivers/input/misc/isa1200-ff-memless.c
883
10734
/* * Copyright (C) 2009 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pwm.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/i2c/isa1200.h> #define ISA1200_HCTRL0 0x30 #define HCTRL0_MODE_CTRL_BIT (3) #define HCTRL0_OVERDRIVE_HIGH_BIT (5) #define HCTRL0_OVERDRIVE_EN_BIT (6) #define HCTRL0_HAP_EN (7) #define HCTRL0_RESET 0x01 #define HCTRL1_RESET 0x4B #define ISA1200_HCTRL1 0x31 #define HCTRL1_SMART_ENABLE_BIT (3) #define HCTRL1_ERM_BIT (5) #define HCTRL1_EXT_CLK_ENABLE_BIT (7) #define ISA1200_HCTRL5 0x35 #define HCTRL5_VIB_STRT 0xD5 #define HCTRL5_VIB_STOP 0x6B #define DIVIDER_128 (128) #define DIVIDER_1024 (1024) #define DIVIDE_SHIFTER_128 (7) #define FREQ_22400 (22400) #define FREQ_172600 (172600) #define POR_DELAY_USEC 250 struct isa1200_chip { const struct isa1200_platform_data *pdata; struct i2c_client *client; struct input_dev *input_device; struct pwm_device *pwm; unsigned int period_ns; unsigned int state; struct work_struct work; }; static void isa1200_vib_set(struct isa1200_chip *haptic, int enable) { int rc; if (enable) { if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) { int period_us = haptic->period_ns / NSEC_PER_USEC; rc = pwm_config(haptic->pwm, (period_us * haptic->pdata->duty) / 100, period_us); if (rc < 0) pr_err("pwm_config fail\n"); rc = pwm_enable(haptic->pwm); if (rc < 0) pr_err("pwm_enable fail\n"); } else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) { rc = i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL5, HCTRL5_VIB_STRT); if (rc < 0) pr_err("start vibration fail\n"); } } else { if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) pwm_disable(haptic->pwm); else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) { rc = i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL5, HCTRL5_VIB_STOP); if (rc < 0) pr_err("stop vibration fail\n"); } } } static int isa1200_setup(struct i2c_client *client) { struct isa1200_chip *haptic = i2c_get_clientdata(client); int value, temp, rc; gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0); udelay(POR_DELAY_USEC); gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1); value = (haptic->pdata->smart_en << HCTRL1_SMART_ENABLE_BIT) | (haptic->pdata->is_erm << HCTRL1_ERM_BIT) | (haptic->pdata->ext_clk_en << HCTRL1_EXT_CLK_ENABLE_BIT); rc = i2c_smbus_write_byte_data(client, ISA1200_HCTRL1, value); if (rc < 0) { pr_err("i2c write failure\n"); return rc; } if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) { temp = haptic->pdata->pwm_fd.pwm_div; if (temp < DIVIDER_128 || temp > DIVIDER_1024 || temp % DIVIDER_128) { pr_err("Invalid divider\n"); rc = -EINVAL; goto reset_hctrl1; } value = ((temp >> DIVIDE_SHIFTER_128) - 1); } else if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) { temp = haptic->pdata->pwm_fd.pwm_freq; if (temp < FREQ_22400 || temp > FREQ_172600 || temp % FREQ_22400) { pr_err("Invalid frequency\n"); rc = -EINVAL; goto reset_hctrl1; } value = ((temp / FREQ_22400) - 1); haptic->period_ns = NSEC_PER_SEC / temp; } value |= (haptic->pdata->mode_ctrl << HCTRL0_MODE_CTRL_BIT) | (haptic->pdata->overdrive_high << HCTRL0_OVERDRIVE_HIGH_BIT) | (haptic->pdata->overdrive_en << HCTRL0_OVERDRIVE_EN_BIT) | (haptic->pdata->chip_en << HCTRL0_HAP_EN); rc = i2c_smbus_write_byte_data(client, ISA1200_HCTRL0, value); if (rc < 0) { pr_err("i2c write failure\n"); goto reset_hctrl1; } return 0; reset_hctrl1: i2c_smbus_write_byte_data(client, ISA1200_HCTRL1, HCTRL1_RESET); return rc; } static void isa1200_worker(struct work_struct *work) { struct isa1200_chip *haptic; haptic = container_of(work, struct isa1200_chip, work); isa1200_vib_set(haptic, !!haptic->state); } static int isa1200_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct isa1200_chip *haptic = input_get_drvdata(dev); /* support basic vibration */ haptic->state = effect->u.rumble.strong_magnitude >> 8; if (!haptic->state) haptic->state = effect->u.rumble.weak_magnitude >> 9; schedule_work(&haptic->work); return 0; } #ifdef CONFIG_PM static int isa1200_suspend(struct device *dev) { struct isa1200_chip *haptic = dev_get_drvdata(dev); int rc; cancel_work_sync(&haptic->work); /* turn-off current vibration */ isa1200_vib_set(haptic, 0); if (haptic->pdata->power_on) { rc = haptic->pdata->power_on(0); if (rc) { pr_err("power-down failed\n"); return rc; } } return 0; } static int isa1200_resume(struct device *dev) { struct isa1200_chip *haptic = dev_get_drvdata(dev); int rc; if (haptic->pdata->power_on) { rc = haptic->pdata->power_on(1); if (rc) { pr_err("power-up failed\n"); return rc; } } isa1200_setup(haptic->client); return 0; } #else #define isa1200_suspend NULL #define isa1200_resume NULL #endif static int isa1200_open(struct input_dev *dev) { struct isa1200_chip *haptic = input_get_drvdata(dev); int rc; /* device setup */ if (haptic->pdata->dev_setup) { rc = haptic->pdata->dev_setup(true); if (rc < 0) { pr_err("setup failed!\n"); return rc; } } /* power on */ if (haptic->pdata->power_on) { rc = haptic->pdata->power_on(true); if (rc < 0) { pr_err("power failed\n"); goto err_setup; } } /* request gpio */ rc = gpio_is_valid(haptic->pdata->hap_en_gpio); if (rc) { rc = gpio_request(haptic->pdata->hap_en_gpio, "haptic_gpio"); if (rc) { pr_err("gpio %d request failed\n", haptic->pdata->hap_en_gpio); goto err_power_on; } } else { pr_err("Invalid gpio %d\n", haptic->pdata->hap_en_gpio); goto err_power_on; } rc = gpio_direction_output(haptic->pdata->hap_en_gpio, 0); if (rc) { pr_err("gpio %d set direction failed\n", haptic->pdata->hap_en_gpio); goto err_gpio_free; } /* setup registers */ rc = isa1200_setup(haptic->client); if (rc < 0) { pr_err("setup fail %d\n", rc); goto err_gpio_free; } if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) { haptic->pwm = pwm_request(haptic->pdata->pwm_ch_id, haptic->client->driver->id_table->name); if (IS_ERR(haptic->pwm)) { pr_err("pwm request failed\n"); rc = PTR_ERR(haptic->pwm); goto err_reset_hctrl0; } } /* init workqeueue */ INIT_WORK(&haptic->work, isa1200_worker); return 0; err_reset_hctrl0: i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL0, HCTRL0_RESET); err_gpio_free: gpio_free(haptic->pdata->hap_en_gpio); err_power_on: if (haptic->pdata->power_on) haptic->pdata->power_on(0); err_setup: if (haptic->pdata->dev_setup) haptic->pdata->dev_setup(false); return rc; } static void isa1200_close(struct input_dev *dev) { struct isa1200_chip *haptic = input_get_drvdata(dev); /* turn-off current vibration */ isa1200_vib_set(haptic, 0); if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) pwm_free(haptic->pwm); gpio_free(haptic->pdata->hap_en_gpio); /* reset hardware registers */ i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL0, HCTRL0_RESET); i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL1, HCTRL1_RESET); if (haptic->pdata->dev_setup) haptic->pdata->dev_setup(false); /* power-off the chip */ if (haptic->pdata->power_on) haptic->pdata->power_on(0); } static int __devinit isa1200_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct isa1200_chip *haptic; int rc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_err("i2c is not supported\n"); return -EIO; } if (!client->dev.platform_data) { pr_err("pdata is not avaiable\n"); return -EINVAL; } haptic = kzalloc(sizeof(struct isa1200_chip), GFP_KERNEL); if (!haptic) { pr_err("no memory\n"); return -ENOMEM; } haptic->pdata = client->dev.platform_data; haptic->client = client; i2c_set_clientdata(client, haptic); haptic->input_device = input_allocate_device(); if (!haptic->input_device) { pr_err("input device alloc failed\n"); rc = -ENOMEM; goto err_mem_alloc; } input_set_drvdata(haptic->input_device, haptic); haptic->input_device->name = haptic->pdata->name ? : "isa1200-ff-memless"; haptic->input_device->dev.parent = &client->dev; input_set_capability(haptic->input_device, EV_FF, FF_RUMBLE); haptic->input_device->open = isa1200_open; haptic->input_device->close = isa1200_close; rc = input_ff_create_memless(haptic->input_device, NULL, isa1200_play_effect); if (rc < 0) { pr_err("unable to register with ff\n"); goto err_free_dev; } rc = input_register_device(haptic->input_device); if (rc < 0) { pr_err("unable to register input device\n"); goto err_ff_destroy; } return 0; err_ff_destroy: input_ff_destroy(haptic->input_device); err_free_dev: input_free_device(haptic->input_device); err_mem_alloc: kfree(haptic); return rc; } static int __devexit isa1200_remove(struct i2c_client *client) { struct isa1200_chip *haptic = i2c_get_clientdata(client); input_unregister_device(haptic->input_device); kfree(haptic); return 0; } static const struct i2c_device_id isa1200_id_table[] = { {"isa1200_1", 0}, { }, }; MODULE_DEVICE_TABLE(i2c, isa1200_id_table); static const struct dev_pm_ops isa1200_pm_ops = { .suspend = isa1200_suspend, .resume = isa1200_resume, }; static struct i2c_driver isa1200_driver = { .driver = { .name = "isa1200-ff-memless", .owner = THIS_MODULE, .pm = &isa1200_pm_ops, }, .probe = isa1200_probe, .remove = __devexit_p(isa1200_remove), .id_table = isa1200_id_table, }; static int __init isa1200_init(void) { return i2c_add_driver(&isa1200_driver); } module_init(isa1200_init); static void __exit isa1200_exit(void) { i2c_del_driver(&isa1200_driver); } module_exit(isa1200_exit); MODULE_DESCRIPTION("isa1200 based vibrator chip driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
gpl-2.0
XtheOne/enrc2b-3.1.10-42105bd
drivers/pci/pci-acpi.c
1139
10662
/* * File: pci-acpi.c * Purpose: Provide PCI support in ACPI * * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> * Copyright (C) 2004 Intel Corp. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/pci-aspm.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <linux/pci-acpi.h> #include <linux/pm_runtime.h> #include "pci.h" static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); /** * pci_acpi_wake_bus - Wake-up notification handler for root buses. * @handle: ACPI handle of a device the notification is for. * @event: Type of the signaled event. * @context: PCI root bus to wake up devices on. */ static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) { struct pci_bus *pci_bus = context; if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) pci_pme_wakeup_bus(pci_bus); } /** * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. * @handle: ACPI handle of a device the notification is for. * @event: Type of the signaled event. * @context: PCI device object to wake up. */ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) { struct pci_dev *pci_dev = context; if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { pci_wakeup_event(pci_dev); pci_check_pme_status(pci_dev); pm_runtime_resume(&pci_dev->dev); if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); } } /** * add_pm_notifier - Register PM notifier for given ACPI device. * @dev: ACPI device to add the notifier for. * @context: PCI device or bus to check for PME status if an event is signaled. * * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of * PM wake-up events. For example, wake-up events may be generated for bridges * if one of the devices below the bridge is signaling PME, even if the bridge * itself doesn't have a wake-up GPE associated with it. */ static acpi_status add_pm_notifier(struct acpi_device *dev, acpi_notify_handler handler, void *context) { acpi_status status = AE_ALREADY_EXISTS; mutex_lock(&pci_acpi_pm_notify_mtx); if (dev->wakeup.flags.notifier_present) goto out; status = acpi_install_notify_handler(dev->handle, ACPI_SYSTEM_NOTIFY, handler, context); if (ACPI_FAILURE(status)) goto out; dev->wakeup.flags.notifier_present = true; out: mutex_unlock(&pci_acpi_pm_notify_mtx); return status; } /** * remove_pm_notifier - Unregister PM notifier from given ACPI device. * @dev: ACPI device to remove the notifier from. */ static acpi_status remove_pm_notifier(struct acpi_device *dev, acpi_notify_handler handler) { acpi_status status = AE_BAD_PARAMETER; mutex_lock(&pci_acpi_pm_notify_mtx); if (!dev->wakeup.flags.notifier_present) goto out; status = acpi_remove_notify_handler(dev->handle, ACPI_SYSTEM_NOTIFY, handler); if (ACPI_FAILURE(status)) goto out; dev->wakeup.flags.notifier_present = false; out: mutex_unlock(&pci_acpi_pm_notify_mtx); return status; } /** * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. * @dev: ACPI device to add the notifier for. * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. */ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, struct pci_bus *pci_bus) { return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); } /** * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. * @dev: ACPI device to remove the notifier from. */ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) { return remove_pm_notifier(dev, pci_acpi_wake_bus); } /** * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. * @dev: ACPI device to add the notifier for. * @pci_dev: PCI device to check for the PME status if an event is signaled. */ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, struct pci_dev *pci_dev) { return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); } /** * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. * @dev: ACPI device to remove the notifier from. */ acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) { return remove_pm_notifier(dev, pci_acpi_wake_dev); } /* * _SxD returns the D-state with the highest power * (lowest D-state number) supported in the S-state "x". * * If the devices does not have a _PRW * (Power Resources for Wake) supporting system wakeup from "x" * then the OS is free to choose a lower power (higher number * D-state) than the return value from _SxD. * * But if _PRW is enabled at S-state "x", the OS * must not choose a power lower than _SxD -- * unless the device has an _SxW method specifying * the lowest power (highest D-state number) the device * may enter while still able to wake the system. * * ie. depending on global OS policy: * * if (_PRW at S-state x) * choose from highest power _SxD to lowest power _SxW * else // no _PRW at S-state x * choose highest power _SxD or any lower power */ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) { int acpi_state; acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL); if (acpi_state < 0) return PCI_POWER_ERROR; switch (acpi_state) { case ACPI_STATE_D0: return PCI_D0; case ACPI_STATE_D1: return PCI_D1; case ACPI_STATE_D2: return PCI_D2; case ACPI_STATE_D3: return PCI_D3hot; case ACPI_STATE_D3_COLD: return PCI_D3cold; } return PCI_POWER_ERROR; } static bool acpi_pci_power_manageable(struct pci_dev *dev) { acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_power_manageable(handle) : false; } static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); acpi_handle tmp; static const u8 state_conv[] = { [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, [PCI_D2] = ACPI_STATE_D2, [PCI_D3hot] = ACPI_STATE_D3, [PCI_D3cold] = ACPI_STATE_D3 }; int error = -EINVAL; /* If the ACPI device has _EJ0, ignore the device */ if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) return -ENODEV; switch (state) { case PCI_D0: case PCI_D1: case PCI_D2: case PCI_D3hot: case PCI_D3cold: error = acpi_bus_set_power(handle, state_conv[state]); } if (!error) dev_printk(KERN_INFO, &dev->dev, "power state changed by ACPI to D%d\n", state); return error; } static bool acpi_pci_can_wakeup(struct pci_dev *dev) { acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_can_wakeup(handle) : false; } static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) { while (bus->parent) { if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable)) return; bus = bus->parent; } /* We have reached the root bus. */ if (bus->bridge) acpi_pm_device_sleep_wake(bus->bridge, enable); } static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) { if (acpi_pci_can_wakeup(dev)) return acpi_pm_device_sleep_wake(&dev->dev, enable); acpi_pci_propagate_wakeup_enable(dev->bus, enable); return 0; } /** * acpi_dev_run_wake - Enable/disable wake-up for given device. * @phys_dev: Device to enable/disable the platform to wake-up the system for. * @enable: Whether enable or disable the wake-up functionality. * * Find the ACPI device object corresponding to @pci_dev and try to * enable/disable the GPE associated with it. */ static int acpi_dev_run_wake(struct device *phys_dev, bool enable) { struct acpi_device *dev; acpi_handle handle; int error = -ENODEV; if (!device_run_wake(phys_dev)) return -EINVAL; handle = DEVICE_ACPI_HANDLE(phys_dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", __func__); return -ENODEV; } if (enable) { acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); } else { acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); acpi_disable_wakeup_device_power(dev); } return error; } static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) { while (bus->parent) { struct pci_dev *bridge = bus->self; if (bridge->pme_interrupt) return; if (!acpi_dev_run_wake(&bridge->dev, enable)) return; bus = bus->parent; } /* We have reached the root bus. */ if (bus->bridge) acpi_dev_run_wake(bus->bridge, enable); } static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) { if (dev->pme_interrupt) return 0; if (!acpi_dev_run_wake(&dev->dev, enable)) return 0; acpi_pci_propagate_run_wake(dev->bus, enable); return 0; } static struct pci_platform_pm_ops acpi_pci_platform_pm = { .is_manageable = acpi_pci_power_manageable, .set_state = acpi_pci_set_power_state, .choose_state = acpi_pci_choose_state, .can_wakeup = acpi_pci_can_wakeup, .sleep_wake = acpi_pci_sleep_wake, .run_wake = acpi_pci_run_wake, }; /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) { struct pci_dev * pci_dev; u64 addr; pci_dev = to_pci_dev(dev); /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); if (!*handle) return -ENODEV; return 0; } static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle) { int num; unsigned int seg, bus; /* * The string should be the same as root bridge's name * Please look at 'pci_scan_bus_parented' */ num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus); if (num != 2) return -ENODEV; *handle = acpi_get_pci_rootbridge_handle(seg, bus); if (!*handle) return -ENODEV; return 0; } static struct acpi_bus_type acpi_pci_bus = { .bus = &pci_bus_type, .find_device = acpi_pci_find_device, .find_bridge = acpi_pci_find_root_bridge, }; static int __init acpi_pci_init(void) { int ret; if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n"); pci_no_msi(); } if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); pcie_clear_aspm(); pcie_no_aspm(); } ret = register_acpi_bus_type(&acpi_pci_bus); if (ret) return 0; pci_set_platform_pm(&acpi_pci_platform_pm); return 0; } arch_initcall(acpi_pci_init);
gpl-2.0
Abhinav1997/kernel_cyanogen_msm8916
arch/s390/pci/pci_event.c
2163
2205
/* * Copyright IBM Corp. 2012 * * Author(s): * Jan Glauber <jang@linux.vnet.ibm.com> */ #define COMPONENT "zPCI" #define pr_fmt(fmt) COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/pci.h> /* Content Code Description for PCI Function Error */ struct zpci_ccdf_err { u32 reserved1; u32 fh; /* function handle */ u32 fid; /* function id */ u32 ett : 4; /* expected table type */ u32 mvn : 12; /* MSI vector number */ u32 dmaas : 8; /* DMA address space */ u32 : 6; u32 q : 1; /* event qualifier */ u32 rw : 1; /* read/write */ u64 faddr; /* failing address */ u32 reserved3; u16 reserved4; u16 pec; /* PCI event code */ } __packed; /* Content Code Description for PCI Function Availability */ struct zpci_ccdf_avail { u32 reserved1; u32 fh; /* function handle */ u32 fid; /* function id */ u32 reserved2; u32 reserved3; u32 reserved4; u32 reserved5; u16 reserved6; u16 pec; /* PCI event code */ } __packed; static void zpci_event_log_err(struct zpci_ccdf_err *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); zpci_err("SEI error CCD:\n"); zpci_err_hex(ccdf, sizeof(*ccdf)); dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec); } static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:", (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?", (zdev) ? dev_name(&zdev->pdev->dev) : "?", ccdf->fh, ccdf->fid, ccdf->pec); print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET, 16, 1, ccdf, sizeof(*ccdf), false); switch (ccdf->pec) { case 0x0301: zpci_enable_device(zdev); break; case 0x0302: clp_add_pci_device(ccdf->fid, ccdf->fh, 0); break; case 0x0306: clp_find_pci_devices(); break; default: break; } } void zpci_event_error(void *data) { struct zpci_ccdf_err *ccdf = data; struct zpci_dev *zdev; zpci_event_log_err(ccdf); zdev = get_zdev_by_fid(ccdf->fid); if (!zdev) { pr_err("Error event for unknown fid: %x", ccdf->fid); return; } } void zpci_event_availability(void *data) { zpci_event_log_avail(data); }
gpl-2.0
faux123/Note_4_SM-N910T
arch/mips/kernel/smtc-proc.c
2419
1797
/* * /proc hooks for SMTC kernel * Copyright (C) 2005 Mips Technologies, Inc */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/interrupt.h> #include <asm/cpu.h> #include <asm/processor.h> #include <linux/atomic.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/smtc_proc.h> /* * /proc diagnostic and statistics hooks */ /* * Statistics gathered */ unsigned long selfipis[NR_CPUS]; struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; atomic_t smtc_fpu_recoveries; static int smtc_proc_show(struct seq_file *m, void *v) { int i; extern unsigned long ebase; seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status); seq_printf(m, "Config7: 0x%08x\n", read_c0_config7()); seq_printf(m, "EBASE: 0x%08lx\n", ebase); seq_printf(m, "Counter Interrupts taken per CPU (TC)\n"); for (i=0; i < NR_CPUS; i++) seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); seq_printf(m, "Self-IPIs by CPU:\n"); for(i = 0; i < NR_CPUS; i++) seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", atomic_read(&smtc_fpu_recoveries)); return 0; } static int smtc_proc_open(struct inode *inode, struct file *file) { return single_open(file, smtc_proc_show, NULL); } static const struct file_operations smtc_proc_fops = { .open = smtc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void init_smtc_stats(void) { int i; for (i=0; i<NR_CPUS; i++) { smtc_cpu_stats[i].timerints = 0; smtc_cpu_stats[i].selfipis = 0; } atomic_set(&smtc_fpu_recoveries, 0); proc_create("smtc", 0444, NULL, &smtc_proc_fops); }
gpl-2.0
rdesfo/kernel
drivers/staging/dgrp/dgrp_mon_ops.c
2675
6442
/***************************************************************************** * * Copyright 1999 Digi International (www.digi.com) * James Puzzo <jamesp at digi dot com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * */ /* * * Filename: * * dgrp_mon_ops.c * * Description: * * Handle the file operations required for the "monitor" devices. * Includes those functions required to register the "mon" devices * in "/proc". * * Author: * * James A. Puzzo * */ #include <linux/module.h> #include <linux/tty.h> #include <linux/sched.h> #include <asm/unaligned.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include "dgrp_common.h" /* File operation declarations */ static int dgrp_mon_open(struct inode *, struct file *); static int dgrp_mon_release(struct inode *, struct file *); static ssize_t dgrp_mon_read(struct file *, char __user *, size_t, loff_t *); static long dgrp_mon_ioctl(struct file *file, unsigned int cmd, unsigned long arg); const struct file_operations dgrp_mon_ops = { .owner = THIS_MODULE, .read = dgrp_mon_read, .unlocked_ioctl = dgrp_mon_ioctl, .open = dgrp_mon_open, .release = dgrp_mon_release, }; /** * dgrp_mon_open() -- open /proc/dgrp/ports device for a PortServer * @inode: struct inode * * @file: struct file * * * Open function to open the /proc/dgrp/ports device for a PortServer. */ static int dgrp_mon_open(struct inode *inode, struct file *file) { struct nd_struct *nd; struct timeval tv; uint32_t time; u8 *buf; int rtn; rtn = try_module_get(THIS_MODULE); if (!rtn) return -ENXIO; rtn = 0; if (!capable(CAP_SYS_ADMIN)) { rtn = -EPERM; goto done; } /* * Make sure that the "private_data" field hasn't already been used. */ if (file->private_data) { rtn = -EINVAL; goto done; } /* * Get the node pointer, and fail if it doesn't exist. */ nd = PDE_DATA(inode); if (!nd) { rtn = -ENXIO; goto done; } file->private_data = (void *) nd; /* * Allocate the monitor buffer. */ /* * Grab the MON lock. */ down(&nd->nd_mon_semaphore); if (nd->nd_mon_buf) { rtn = -EBUSY; goto done_up; } nd->nd_mon_buf = kmalloc(MON_MAX, GFP_KERNEL); if (!nd->nd_mon_buf) { rtn = -ENOMEM; goto done_up; } /* * Enter an RPDUMP file header into the buffer. */ buf = nd->nd_mon_buf; strcpy(buf, RPDUMP_MAGIC); buf += strlen(buf) + 1; do_gettimeofday(&tv); /* * tv.tv_sec might be a 64 bit quantity. Pare * it down to 32 bits before attempting to encode * it. */ time = (uint32_t) (tv.tv_sec & 0xffffffff); put_unaligned_be32(time, buf); put_unaligned_be16(0, buf + 4); buf += 6; if (nd->nd_tx_module) { buf[0] = RPDUMP_CLIENT; put_unaligned_be32(0, buf + 1); put_unaligned_be16(1, buf + 5); buf[7] = 0xf0 + nd->nd_tx_module; buf += 8; } if (nd->nd_rx_module) { buf[0] = RPDUMP_SERVER; put_unaligned_be32(0, buf + 1); put_unaligned_be16(1, buf + 5); buf[7] = 0xf0 + nd->nd_rx_module; buf += 8; } nd->nd_mon_out = 0; nd->nd_mon_in = buf - nd->nd_mon_buf; nd->nd_mon_lbolt = jiffies; done_up: up(&nd->nd_mon_semaphore); done: if (rtn) module_put(THIS_MODULE); return rtn; } /** * dgrp_mon_release() - Close the MON device for a particular PortServer * @inode: struct inode * * @file: struct file * */ static int dgrp_mon_release(struct inode *inode, struct file *file) { struct nd_struct *nd; /* * Get the node pointer, and quit if it doesn't exist. */ nd = (struct nd_struct *)(file->private_data); if (!nd) goto done; /* * Free the monitor buffer. */ down(&nd->nd_mon_semaphore); kfree(nd->nd_mon_buf); nd->nd_mon_buf = NULL; nd->nd_mon_out = nd->nd_mon_in; /* * Wakeup any thread waiting for buffer space. */ if (nd->nd_mon_flag & MON_WAIT_SPACE) { nd->nd_mon_flag &= ~MON_WAIT_SPACE; wake_up_interruptible(&nd->nd_mon_wqueue); } up(&nd->nd_mon_semaphore); /* * Make sure there is no thread in the middle of writing a packet. */ down(&nd->nd_net_semaphore); up(&nd->nd_net_semaphore); done: module_put(THIS_MODULE); file->private_data = NULL; return 0; } /** * dgrp_mon_read() -- Copy data from the monitoring buffer to the user */ static ssize_t dgrp_mon_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct nd_struct *nd; int r; int offset = 0; int res = 0; ssize_t rtn; /* * Get the node pointer, and quit if it doesn't exist. */ nd = (struct nd_struct *)(file->private_data); if (!nd) return -ENXIO; /* * Wait for some data to appear in the buffer. */ down(&nd->nd_mon_semaphore); for (;;) { res = (nd->nd_mon_in - nd->nd_mon_out) & MON_MASK; if (res) break; nd->nd_mon_flag |= MON_WAIT_DATA; up(&nd->nd_mon_semaphore); /* * Go to sleep waiting until the condition becomes true. */ rtn = wait_event_interruptible(nd->nd_mon_wqueue, ((nd->nd_mon_flag & MON_WAIT_DATA) == 0)); if (rtn) return rtn; down(&nd->nd_mon_semaphore); } /* * Read whatever is there. */ if (res > count) res = count; r = MON_MAX - nd->nd_mon_out; if (r <= res) { rtn = copy_to_user((void __user *)buf, nd->nd_mon_buf + nd->nd_mon_out, r); if (rtn) { up(&nd->nd_mon_semaphore); return -EFAULT; } nd->nd_mon_out = 0; res -= r; offset = r; } rtn = copy_to_user((void __user *) buf + offset, nd->nd_mon_buf + nd->nd_mon_out, res); if (rtn) { up(&nd->nd_mon_semaphore); return -EFAULT; } nd->nd_mon_out += res; *ppos += res; up(&nd->nd_mon_semaphore); /* * Wakeup any thread waiting for buffer space. */ if (nd->nd_mon_flag & MON_WAIT_SPACE) { nd->nd_mon_flag &= ~MON_WAIT_SPACE; wake_up_interruptible(&nd->nd_mon_wqueue); } return res; } /* ioctl is not valid on monitor device */ static long dgrp_mon_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return -EINVAL; }
gpl-2.0
compulab/trimslice-android-kernel
drivers/media/video/hexium_gemini.c
2931
15402
/* hexium_gemini.c - v4l2 driver for Hexium Gemini frame grabber cards Visit http://www.mihu.de/linux/saa7146/ and follow the link to "hexium" for further details about this card. Copyright (C) 2003 Michael Hunold <michael@mihu.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define DEBUG_VARIABLE debug #include <media/saa7146_vv.h> static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "debug verbosity"); /* global variables */ static int hexium_num; #define HEXIUM_GEMINI 4 #define HEXIUM_GEMINI_DUAL 5 #define HEXIUM_INPUTS 9 static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = { { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, }; #define HEXIUM_AUDIOS 0 struct hexium_data { s8 adr; u8 byte; }; #define HEXIUM_CONTROLS 1 static struct v4l2_queryctrl hexium_controls[] = { { V4L2_CID_PRIVATE_BASE, V4L2_CTRL_TYPE_BOOLEAN, "B/W", 0, 1, 1, 0, 0 }, }; #define HEXIUM_GEMINI_V_1_0 1 #define HEXIUM_GEMINI_DUAL_V_1_0 2 struct hexium { int type; struct video_device *video_dev; struct i2c_adapter i2c_adapter; int cur_input; /* current input */ v4l2_std_id cur_std; /* current standard */ int cur_bw; /* current black/white status */ }; /* Samsung KS0127B decoder default registers */ static u8 hexium_ks0127b[0x100]={ /*00*/ 0x00,0x52,0x30,0x40,0x01,0x0C,0x2A,0x10, /*08*/ 0x00,0x00,0x00,0x60,0x00,0x00,0x0F,0x06, /*10*/ 0x00,0x00,0xE4,0xC0,0x00,0x00,0x00,0x00, /*18*/ 0x14,0x9B,0xFE,0xFF,0xFC,0xFF,0x03,0x22, /*20*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*28*/ 0x00,0x00,0x00,0x00,0x00,0x2C,0x9B,0x00, /*30*/ 0x00,0x00,0x10,0x80,0x80,0x10,0x80,0x80, /*38*/ 0x01,0x04,0x00,0x00,0x00,0x29,0xC0,0x00, /*40*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*48*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*50*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*58*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*60*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*68*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*70*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*78*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*80*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*88*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*90*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*98*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*A0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*A8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*B0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*B8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*C0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*C8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*D0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*D8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*E0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*E8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*F0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*F8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }; static struct hexium_data hexium_pal[] = { { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF } }; static struct hexium_data hexium_pal_bw[] = { { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF } }; static struct hexium_data hexium_ntsc[] = { { 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF } }; static struct hexium_data hexium_ntsc_bw[] = { { 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF } }; static struct hexium_data hexium_secam[] = { { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF } }; static struct hexium_data hexium_input_select[] = { { 0x02, 0x60 }, { 0x02, 0x64 }, { 0x02, 0x61 }, { 0x02, 0x65 }, { 0x02, 0x62 }, { 0x02, 0x66 }, { 0x02, 0x68 }, { 0x02, 0x69 }, { 0x02, 0x6A }, }; /* fixme: h_offset = 0 for Hexium Gemini *Dual*, which are currently *not* supported*/ static struct saa7146_standard hexium_standards[] = { { .name = "PAL", .id = V4L2_STD_PAL, .v_offset = 28, .v_field = 288, .h_offset = 1, .h_pixels = 680, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 28, .v_field = 240, .h_offset = 1, .h_pixels = 640, .v_max_out = 480, .h_max_out = 640, }, { .name = "SECAM", .id = V4L2_STD_SECAM, .v_offset = 28, .v_field = 288, .h_offset = 1, .h_pixels = 720, .v_max_out = 576, .h_max_out = 768, } }; /* bring hardware to a sane state. this has to be done, just in case someone wants to capture from this device before it has been properly initialized. the capture engine would badly fail, because no valid signal arrives on the saa7146, thus leading to timeouts and stuff. */ static int hexium_init_done(struct saa7146_dev *dev) { struct hexium *hexium = (struct hexium *) dev->ext_priv; union i2c_smbus_data data; int i = 0; DEB_D(("hexium_init_done called.\n")); /* initialize the helper ics to useful values */ for (i = 0; i < sizeof(hexium_ks0127b); i++) { data.byte = hexium_ks0127b[i]; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) { printk("hexium_gemini: hexium_init_done() failed for address 0x%02x\n", i); } } return 0; } static int hexium_set_input(struct hexium *hexium, int input) { union i2c_smbus_data data; DEB_D((".\n")); data.byte = hexium_input_select[input].byte; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, hexium_input_select[input].adr, I2C_SMBUS_BYTE_DATA, &data)) { return -1; } return 0; } static int hexium_set_standard(struct hexium *hexium, struct hexium_data *vdec) { union i2c_smbus_data data; int i = 0; DEB_D((".\n")); while (vdec[i].adr != -1) { data.byte = vdec[i].byte; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, vdec[i].adr, I2C_SMBUS_BYTE_DATA, &data)) { printk("hexium_init_done: hexium_set_standard() failed for address 0x%02x\n", i); return -1; } i++; } return 0; } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) { DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index)); if (i->index >= HEXIUM_INPUTS) return -EINVAL; memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input)); DEB_D(("v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n", i->index)); return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; *input = hexium->cur_input; DEB_D(("VIDIOC_G_INPUT: %d\n", *input)); return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; DEB_EE(("VIDIOC_S_INPUT %d.\n", input)); if (input >= HEXIUM_INPUTS) return -EINVAL; hexium->cur_input = input; hexium_set_input(hexium, input); return 0; } /* the saa7146 provides some controls (brightness, contrast, saturation) which gets registered *after* this function. because of this we have to return with a value != 0 even if the function succeeded.. */ static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; int i; for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) { if (hexium_controls[i].id == qc->id) { *qc = hexium_controls[i]; DEB_D(("VIDIOC_QUERYCTRL %d.\n", qc->id)); return 0; } } return dev->ext_vv_data->core_ops->vidioc_queryctrl(file, fh, qc); } static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; int i; for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) { if (hexium_controls[i].id == vc->id) break; } if (i < 0) return dev->ext_vv_data->core_ops->vidioc_g_ctrl(file, fh, vc); if (vc->id == V4L2_CID_PRIVATE_BASE) { vc->value = hexium->cur_bw; DEB_D(("VIDIOC_G_CTRL BW:%d.\n", vc->value)); return 0; } return -EINVAL; } static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; int i = 0; for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) { if (hexium_controls[i].id == vc->id) break; } if (i < 0) return dev->ext_vv_data->core_ops->vidioc_s_ctrl(file, fh, vc); if (vc->id == V4L2_CID_PRIVATE_BASE) hexium->cur_bw = vc->value; DEB_D(("VIDIOC_S_CTRL BW:%d.\n", hexium->cur_bw)); if (0 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) { hexium_set_standard(hexium, hexium_pal); return 0; } if (0 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) { hexium_set_standard(hexium, hexium_ntsc); return 0; } if (0 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std) { hexium_set_standard(hexium, hexium_secam); return 0; } if (1 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) { hexium_set_standard(hexium, hexium_pal_bw); return 0; } if (1 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) { hexium_set_standard(hexium, hexium_ntsc_bw); return 0; } if (1 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std) /* fixme: is there no bw secam mode? */ return -EINVAL; return -EINVAL; } static struct saa7146_ext_vv vv_data; /* this function only gets called when the probing was successful */ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct hexium *hexium = (struct hexium *) dev->ext_priv; int ret; DEB_EE((".\n")); hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); if (NULL == hexium) { printk("hexium_gemini: not enough kernel memory in hexium_attach().\n"); return -ENOMEM; } dev->ext_priv = hexium; /* enable i2c-port pins */ saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26)); hexium->i2c_adapter = (struct i2c_adapter) { .name = "hexium gemini", }; saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if (i2c_add_adapter(&hexium->i2c_adapter) < 0) { DEB_S(("cannot register i2c-device. skipping.\n")); kfree(hexium); return -EFAULT; } /* set HWControl GPIO number 2 */ saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI); saa7146_write(dev, DD1_INIT, 0x07000700); saa7146_write(dev, DD1_STREAM_B, 0x00000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); /* the rest */ hexium->cur_input = 0; hexium_init_done(dev); hexium_set_standard(hexium, hexium_pal); hexium->cur_std = V4L2_STD_PAL; hexium_set_input(hexium, 0); hexium->cur_input = 0; saa7146_vv_init(dev, &vv_data); vv_data.ops.vidioc_queryctrl = vidioc_queryctrl; vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl; vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl; vv_data.ops.vidioc_enum_input = vidioc_enum_input; vv_data.ops.vidioc_g_input = vidioc_g_input; vv_data.ops.vidioc_s_input = vidioc_s_input; ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER); if (ret < 0) { printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n"); return ret; } printk("hexium_gemini: found 'hexium gemini' frame grabber-%d.\n", hexium_num); hexium_num++; return 0; } static int hexium_detach(struct saa7146_dev *dev) { struct hexium *hexium = (struct hexium *) dev->ext_priv; DEB_EE(("dev:%p\n", dev)); saa7146_unregister_device(&hexium->video_dev, dev); saa7146_vv_release(dev); hexium_num--; i2c_del_adapter(&hexium->i2c_adapter); kfree(hexium); return 0; } static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *std) { struct hexium *hexium = (struct hexium *) dev->ext_priv; if (V4L2_STD_PAL == std->id) { hexium_set_standard(hexium, hexium_pal); hexium->cur_std = V4L2_STD_PAL; return 0; } else if (V4L2_STD_NTSC == std->id) { hexium_set_standard(hexium, hexium_ntsc); hexium->cur_std = V4L2_STD_NTSC; return 0; } else if (V4L2_STD_SECAM == std->id) { hexium_set_standard(hexium, hexium_secam); hexium->cur_std = V4L2_STD_SECAM; return 0; } return -1; } static struct saa7146_extension hexium_extension; static struct saa7146_pci_extension_data hexium_gemini_4bnc = { .ext_priv = "Hexium Gemini (4 BNC)", .ext = &hexium_extension, }; static struct saa7146_pci_extension_data hexium_gemini_dual_4bnc = { .ext_priv = "Hexium Gemini Dual (4 BNC)", .ext = &hexium_extension, }; static struct pci_device_id pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x17c8, .subdevice = 0x2401, .driver_data = (unsigned long) &hexium_gemini_4bnc, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x17c8, .subdevice = 0x2402, .driver_data = (unsigned long) &hexium_gemini_dual_4bnc, }, { .vendor = 0, } }; MODULE_DEVICE_TABLE(pci, pci_tbl); static struct saa7146_ext_vv vv_data = { .inputs = HEXIUM_INPUTS, .capabilities = 0, .stds = &hexium_standards[0], .num_stds = sizeof(hexium_standards) / sizeof(struct saa7146_standard), .std_callback = &std_callback, }; static struct saa7146_extension hexium_extension = { .name = "hexium gemini", .flags = SAA7146_USE_I2C_IRQ, .pci_tbl = &pci_tbl[0], .module = THIS_MODULE, .attach = hexium_attach, .detach = hexium_detach, .irq_mask = 0, .irq_func = NULL, }; static int __init hexium_init_module(void) { if (0 != saa7146_register_extension(&hexium_extension)) { DEB_S(("failed to register extension.\n")); return -ENODEV; } return 0; } static void __exit hexium_cleanup_module(void) { saa7146_unregister_extension(&hexium_extension); } module_init(hexium_init_module); module_exit(hexium_cleanup_module); MODULE_DESCRIPTION("video4linux-2 driver for Hexium Gemini frame grabber cards"); MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_LICENSE("GPL");
gpl-2.0
bigzz/sc7715-kernel
arch/arm/mach-s3c24xx/iotiming-s3c2410.c
4211
11959
/* * Copyright (c) 2006-2009 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX CPU Frequency scaling - IO timing for S3C2410/S3C2440/S3C2442 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/cpufreq.h> #include <linux/seq_file.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <plat/cpu-freq-core.h> #include "regs-mem.h" #define print_ns(x) ((x) / 10), ((x) % 10) /** * s3c2410_print_timing - print bank timing data for debug purposes * @pfx: The prefix to put on the output * @timings: The timing inforamtion to print. */ static void s3c2410_print_timing(const char *pfx, struct s3c_iotimings *timings) { struct s3c2410_iobank_timing *bt; int bank; for (bank = 0; bank < MAX_BANKS; bank++) { bt = timings->bank[bank].io_2410; if (!bt) continue; printk(KERN_DEBUG "%s %d: Tacs=%d.%d, Tcos=%d.%d, Tacc=%d.%d, " "Tcoh=%d.%d, Tcah=%d.%d\n", pfx, bank, print_ns(bt->tacs), print_ns(bt->tcos), print_ns(bt->tacc), print_ns(bt->tcoh), print_ns(bt->tcah)); } } /** * bank_reg - convert bank number to pointer to the control register. * @bank: The IO bank number. */ static inline void __iomem *bank_reg(unsigned int bank) { return S3C2410_BANKCON0 + (bank << 2); } /** * bank_is_io - test whether bank is used for IO * @bankcon: The bank control register. * * This is a simplistic test to see if any BANKCON[x] is not an IO * bank. It currently does not take into account whether BWSCON has * an illegal width-setting in it, or if the pin connected to nCS[x] * is actually being handled as a chip-select. */ static inline int bank_is_io(unsigned long bankcon) { return !(bankcon & S3C2410_BANKCON_SDRAM); } /** * to_div - convert cycle time to divisor * @cyc: The cycle time, in 10ths of nanoseconds. * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * * Convert the given cycle time into the divisor to use to obtain it from * HCLK. */ static inline unsigned int to_div(unsigned int cyc, unsigned int hclk_tns) { if (cyc == 0) return 0; return DIV_ROUND_UP(cyc, hclk_tns); } /** * calc_0124 - calculate divisor control for divisors that do /0, /1. /2 and /4 * @cyc: The cycle time, in 10ths of nanoseconds. * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @v: Pointer to register to alter. * @shift: The shift to get to the control bits. * * Calculate the divisor, and turn it into the correct control bits to * set in the result, @v. */ static unsigned int calc_0124(unsigned int cyc, unsigned long hclk_tns, unsigned long *v, int shift) { unsigned int div = to_div(cyc, hclk_tns); unsigned long val; s3c_freq_iodbg("%s: cyc=%d, hclk=%lu, shift=%d => div %d\n", __func__, cyc, hclk_tns, shift, div); switch (div) { case 0: val = 0; break; case 1: val = 1; break; case 2: val = 2; break; case 3: case 4: val = 3; break; default: return -1; } *v |= val << shift; return 0; } int calc_tacp(unsigned int cyc, unsigned long hclk, unsigned long *v) { /* Currently no support for Tacp calculations. */ return 0; } /** * calc_tacc - calculate divisor control for tacc. * @cyc: The cycle time, in 10ths of nanoseconds. * @nwait_en: IS nWAIT enabled for this bank. * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @v: Pointer to register to alter. * * Calculate the divisor control for tACC, taking into account whether * the bank has nWAIT enabled. The result is used to modify the value * pointed to by @v. */ static int calc_tacc(unsigned int cyc, int nwait_en, unsigned long hclk_tns, unsigned long *v) { unsigned int div = to_div(cyc, hclk_tns); unsigned long val; s3c_freq_iodbg("%s: cyc=%u, nwait=%d, hclk=%lu => div=%u\n", __func__, cyc, nwait_en, hclk_tns, div); /* if nWait enabled on an bank, Tacc must be at-least 4 cycles. */ if (nwait_en && div < 4) div = 4; switch (div) { case 0: val = 0; break; case 1: case 2: case 3: case 4: val = div - 1; break; case 5: case 6: val = 4; break; case 7: case 8: val = 5; break; case 9: case 10: val = 6; break; case 11: case 12: case 13: case 14: val = 7; break; default: return -1; } *v |= val << 8; return 0; } /** * s3c2410_calc_bank - calculate bank timing infromation * @cfg: The configuration we need to calculate for. * @bt: The bank timing information. * * Given the cycle timine for a bank @bt, calculate the new BANKCON * setting for the @cfg timing. This updates the timing information * ready for the cpu frequency change. */ static int s3c2410_calc_bank(struct s3c_cpufreq_config *cfg, struct s3c2410_iobank_timing *bt) { unsigned long hclk = cfg->freq.hclk_tns; unsigned long res; int ret; res = bt->bankcon; res &= (S3C2410_BANKCON_SDRAM | S3C2410_BANKCON_PMC16); /* tacp: 2,3,4,5 */ /* tcah: 0,1,2,4 */ /* tcoh: 0,1,2,4 */ /* tacc: 1,2,3,4,6,7,10,14 (>4 for nwait) */ /* tcos: 0,1,2,4 */ /* tacs: 0,1,2,4 */ ret = calc_0124(bt->tacs, hclk, &res, S3C2410_BANKCON_Tacs_SHIFT); ret |= calc_0124(bt->tcos, hclk, &res, S3C2410_BANKCON_Tcos_SHIFT); ret |= calc_0124(bt->tcah, hclk, &res, S3C2410_BANKCON_Tcah_SHIFT); ret |= calc_0124(bt->tcoh, hclk, &res, S3C2410_BANKCON_Tcoh_SHIFT); if (ret) return -EINVAL; ret |= calc_tacp(bt->tacp, hclk, &res); ret |= calc_tacc(bt->tacc, bt->nwait_en, hclk, &res); if (ret) return -EINVAL; bt->bankcon = res; return 0; } static unsigned int tacc_tab[] = { [0] = 1, [1] = 2, [2] = 3, [3] = 4, [4] = 6, [5] = 9, [6] = 10, [7] = 14, }; /** * get_tacc - turn tACC value into cycle time * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @val: The bank timing register value, shifed down. */ static unsigned int get_tacc(unsigned long hclk_tns, unsigned long val) { val &= 7; return hclk_tns * tacc_tab[val]; } /** * get_0124 - turn 0/1/2/4 divider into cycle time * @hclk_tns: The cycle time for HCLK, in 10ths of nanoseconds. * @val: The bank timing register value, shifed down. */ static unsigned int get_0124(unsigned long hclk_tns, unsigned long val) { val &= 3; return hclk_tns * ((val == 3) ? 4 : val); } /** * s3c2410_iotiming_getbank - turn BANKCON into cycle time information * @cfg: The frequency configuration * @bt: The bank timing to fill in (uses cached BANKCON) * * Given the BANKCON setting in @bt and the current frequency settings * in @cfg, update the cycle timing information. */ void s3c2410_iotiming_getbank(struct s3c_cpufreq_config *cfg, struct s3c2410_iobank_timing *bt) { unsigned long bankcon = bt->bankcon; unsigned long hclk = cfg->freq.hclk_tns; bt->tcah = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcah_SHIFT); bt->tcoh = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcoh_SHIFT); bt->tcos = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcos_SHIFT); bt->tacs = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tacs_SHIFT); bt->tacc = get_tacc(hclk, bankcon >> S3C2410_BANKCON_Tacc_SHIFT); } /** * s3c2410_iotiming_debugfs - debugfs show io bank timing information * @seq: The seq_file to write output to using seq_printf(). * @cfg: The current configuration. * @iob: The IO bank information to decode. */ void s3c2410_iotiming_debugfs(struct seq_file *seq, struct s3c_cpufreq_config *cfg, union s3c_iobank *iob) { struct s3c2410_iobank_timing *bt = iob->io_2410; unsigned long bankcon = bt->bankcon; unsigned long hclk = cfg->freq.hclk_tns; unsigned int tacs; unsigned int tcos; unsigned int tacc; unsigned int tcoh; unsigned int tcah; seq_printf(seq, "BANKCON=0x%08lx\n", bankcon); tcah = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcah_SHIFT); tcoh = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcoh_SHIFT); tcos = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tcos_SHIFT); tacs = get_0124(hclk, bankcon >> S3C2410_BANKCON_Tacs_SHIFT); tacc = get_tacc(hclk, bankcon >> S3C2410_BANKCON_Tacc_SHIFT); seq_printf(seq, "\tRead: Tacs=%d.%d, Tcos=%d.%d, Tacc=%d.%d, Tcoh=%d.%d, Tcah=%d.%d\n", print_ns(bt->tacs), print_ns(bt->tcos), print_ns(bt->tacc), print_ns(bt->tcoh), print_ns(bt->tcah)); seq_printf(seq, "\t Set: Tacs=%d.%d, Tcos=%d.%d, Tacc=%d.%d, Tcoh=%d.%d, Tcah=%d.%d\n", print_ns(tacs), print_ns(tcos), print_ns(tacc), print_ns(tcoh), print_ns(tcah)); } /** * s3c2410_iotiming_calc - Calculate bank timing for frequency change. * @cfg: The frequency configuration * @iot: The IO timing information to fill out. * * Calculate the new values for the banks in @iot based on the new * frequency information in @cfg. This is then used by s3c2410_iotiming_set() * to update the timing when necessary. */ int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot) { struct s3c2410_iobank_timing *bt; unsigned long bankcon; int bank; int ret; for (bank = 0; bank < MAX_BANKS; bank++) { bankcon = __raw_readl(bank_reg(bank)); bt = iot->bank[bank].io_2410; if (!bt) continue; bt->bankcon = bankcon; ret = s3c2410_calc_bank(cfg, bt); if (ret) { printk(KERN_ERR "%s: cannot calculate bank %d io\n", __func__, bank); goto err; } s3c_freq_iodbg("%s: bank %d: con=%08lx\n", __func__, bank, bt->bankcon); } return 0; err: return ret; } /** * s3c2410_iotiming_set - set the IO timings from the given setup. * @cfg: The frequency configuration * @iot: The IO timing information to use. * * Set all the currently used IO bank timing information generated * by s3c2410_iotiming_calc() once the core has validated that all * the new values are within permitted bounds. */ void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot) { struct s3c2410_iobank_timing *bt; int bank; /* set the io timings from the specifier */ for (bank = 0; bank < MAX_BANKS; bank++) { bt = iot->bank[bank].io_2410; if (!bt) continue; __raw_writel(bt->bankcon, bank_reg(bank)); } } /** * s3c2410_iotiming_get - Get the timing information from current registers. * @cfg: The frequency configuration * @timings: The IO timing information to fill out. * * Calculate the @timings timing information from the current frequency * information in @cfg, and the new frequency configur * through all the IO banks, reading the state and then updating @iot * as necessary. * * This is used at the moment on initialisation to get the current * configuration so that boards do not have to carry their own setup * if the timings are correct on initialisation. */ int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *timings) { struct s3c2410_iobank_timing *bt; unsigned long bankcon; unsigned long bwscon; int bank; bwscon = __raw_readl(S3C2410_BWSCON); /* look through all banks to see what is currently set. */ for (bank = 0; bank < MAX_BANKS; bank++) { bankcon = __raw_readl(bank_reg(bank)); if (!bank_is_io(bankcon)) continue; s3c_freq_iodbg("%s: bank %d: con %08lx\n", __func__, bank, bankcon); bt = kzalloc(sizeof(struct s3c2410_iobank_timing), GFP_KERNEL); if (!bt) { printk(KERN_ERR "%s: no memory for bank\n", __func__); return -ENOMEM; } /* find out in nWait is enabled for bank. */ if (bank != 0) { unsigned long tmp = S3C2410_BWSCON_GET(bwscon, bank); if (tmp & S3C2410_BWSCON_WS) bt->nwait_en = 1; } timings->bank[bank].io_2410 = bt; bt->bankcon = bankcon; s3c2410_iotiming_getbank(cfg, bt); } s3c2410_print_timing("get", timings); return 0; }
gpl-2.0
GoldRenard/android_kernel_lenovo_msm8926
fs/eventfd.c
4467
11232
/* * fs/eventfd.c * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * */ #include <linux/file.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/anon_inodes.h> #include <linux/syscalls.h> #include <linux/export.h> #include <linux/kref.h> #include <linux/eventfd.h> struct eventfd_ctx { struct kref kref; wait_queue_head_t wqh; /* * Every time that a write(2) is performed on an eventfd, the * value of the __u64 being written is added to "count" and a * wakeup is performed on "wqh". A read(2) will return the "count" * value to userspace, and will reset "count" to zero. The kernel * side eventfd_signal() also, adds to the "count" counter and * issue a wakeup. */ __u64 count; unsigned int flags; }; /** * eventfd_signal - Adds @n to the eventfd counter. * @ctx: [in] Pointer to the eventfd context. * @n: [in] Value of the counter to be added to the eventfd internal counter. * The value cannot be negative. * * This function is supposed to be called by the kernel in paths that do not * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX * value, and we signal this as overflow condition by returining a POLLERR * to poll(2). * * Returns @n in case of success, a non-negative number lower than @n in case * of overflow, or the following error codes: * * -EINVAL : The value of @n is negative. */ int eventfd_signal(struct eventfd_ctx *ctx, int n) { unsigned long flags; if (n < 0) return -EINVAL; spin_lock_irqsave(&ctx->wqh.lock, flags); if (ULLONG_MAX - ctx->count < n) n = (int) (ULLONG_MAX - ctx->count); ctx->count += n; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, POLLIN); spin_unlock_irqrestore(&ctx->wqh.lock, flags); return n; } EXPORT_SYMBOL_GPL(eventfd_signal); static void eventfd_free_ctx(struct eventfd_ctx *ctx) { kfree(ctx); } static void eventfd_free(struct kref *kref) { struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); eventfd_free_ctx(ctx); } /** * eventfd_ctx_get - Acquires a reference to the internal eventfd context. * @ctx: [in] Pointer to the eventfd context. * * Returns: In case of success, returns a pointer to the eventfd context. */ struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx) { kref_get(&ctx->kref); return ctx; } EXPORT_SYMBOL_GPL(eventfd_ctx_get); /** * eventfd_ctx_put - Releases a reference to the internal eventfd context. * @ctx: [in] Pointer to eventfd context. * * The eventfd context reference must have been previously acquired either * with eventfd_ctx_get() or eventfd_ctx_fdget(). */ void eventfd_ctx_put(struct eventfd_ctx *ctx) { kref_put(&ctx->kref, eventfd_free); } EXPORT_SYMBOL_GPL(eventfd_ctx_put); static int eventfd_release(struct inode *inode, struct file *file) { struct eventfd_ctx *ctx = file->private_data; wake_up_poll(&ctx->wqh, POLLHUP); eventfd_ctx_put(ctx); return 0; } static unsigned int eventfd_poll(struct file *file, poll_table *wait) { struct eventfd_ctx *ctx = file->private_data; unsigned int events = 0; unsigned long flags; poll_wait(file, &ctx->wqh, wait); spin_lock_irqsave(&ctx->wqh.lock, flags); if (ctx->count > 0) events |= POLLIN; if (ctx->count == ULLONG_MAX) events |= POLLERR; if (ULLONG_MAX - 1 > ctx->count) events |= POLLOUT; spin_unlock_irqrestore(&ctx->wqh.lock, flags); return events; } static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) { *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; ctx->count -= *cnt; } /** * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. * @ctx: [in] Pointer to eventfd context. * @wait: [in] Wait queue to be removed. * @cnt: [out] Pointer to the 64-bit counter value. * * Returns %0 if successful, or the following error codes: * * -EAGAIN : The operation would have blocked. * * This is used to atomically remove a wait queue entry from the eventfd wait * queue head, and read/reset the counter value. */ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, __u64 *cnt) { unsigned long flags; spin_lock_irqsave(&ctx->wqh.lock, flags); eventfd_ctx_do_read(ctx, cnt); __remove_wait_queue(&ctx->wqh, wait); if (*cnt != 0 && waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, POLLOUT); spin_unlock_irqrestore(&ctx->wqh.lock, flags); return *cnt != 0 ? 0 : -EAGAIN; } EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); /** * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. * @ctx: [in] Pointer to eventfd context. * @no_wait: [in] Different from zero if the operation should not block. * @cnt: [out] Pointer to the 64-bit counter value. * * Returns %0 if successful, or the following error codes: * * -EAGAIN : The operation would have blocked but @no_wait was non-zero. * -ERESTARTSYS : A signal interrupted the wait operation. * * If @no_wait is zero, the function might sleep until the eventfd internal * counter becomes greater than zero. */ ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt) { ssize_t res; DECLARE_WAITQUEUE(wait, current); spin_lock_irq(&ctx->wqh.lock); *cnt = 0; res = -EAGAIN; if (ctx->count > 0) res = 0; else if (!no_wait) { __add_wait_queue(&ctx->wqh, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (ctx->count > 0) { res = 0; break; } if (signal_pending(current)) { res = -ERESTARTSYS; break; } spin_unlock_irq(&ctx->wqh.lock); schedule(); spin_lock_irq(&ctx->wqh.lock); } __remove_wait_queue(&ctx->wqh, &wait); __set_current_state(TASK_RUNNING); } if (likely(res == 0)) { eventfd_ctx_do_read(ctx, cnt); if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, POLLOUT); } spin_unlock_irq(&ctx->wqh.lock); return res; } EXPORT_SYMBOL_GPL(eventfd_ctx_read); static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct eventfd_ctx *ctx = file->private_data; ssize_t res; __u64 cnt; if (count < sizeof(cnt)) return -EINVAL; res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt); if (res < 0) return res; return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt); } static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct eventfd_ctx *ctx = file->private_data; ssize_t res; __u64 ucnt; DECLARE_WAITQUEUE(wait, current); if (count < sizeof(ucnt)) return -EINVAL; if (copy_from_user(&ucnt, buf, sizeof(ucnt))) return -EFAULT; if (ucnt == ULLONG_MAX) return -EINVAL; spin_lock_irq(&ctx->wqh.lock); res = -EAGAIN; if (ULLONG_MAX - ctx->count > ucnt) res = sizeof(ucnt); else if (!(file->f_flags & O_NONBLOCK)) { __add_wait_queue(&ctx->wqh, &wait); for (res = 0;;) { set_current_state(TASK_INTERRUPTIBLE); if (ULLONG_MAX - ctx->count > ucnt) { res = sizeof(ucnt); break; } if (signal_pending(current)) { res = -ERESTARTSYS; break; } spin_unlock_irq(&ctx->wqh.lock); schedule(); spin_lock_irq(&ctx->wqh.lock); } __remove_wait_queue(&ctx->wqh, &wait); __set_current_state(TASK_RUNNING); } if (likely(res > 0)) { ctx->count += ucnt; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, POLLIN); } spin_unlock_irq(&ctx->wqh.lock); return res; } static const struct file_operations eventfd_fops = { .release = eventfd_release, .poll = eventfd_poll, .read = eventfd_read, .write = eventfd_write, .llseek = noop_llseek, }; /** * eventfd_fget - Acquire a reference of an eventfd file descriptor. * @fd: [in] Eventfd file descriptor. * * Returns a pointer to the eventfd file structure in case of success, or the * following error pointer: * * -EBADF : Invalid @fd file descriptor. * -EINVAL : The @fd file descriptor is not an eventfd file. */ struct file *eventfd_fget(int fd) { struct file *file; file = fget(fd); if (!file) return ERR_PTR(-EBADF); if (file->f_op != &eventfd_fops) { fput(file); return ERR_PTR(-EINVAL); } return file; } EXPORT_SYMBOL_GPL(eventfd_fget); /** * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. * @fd: [in] Eventfd file descriptor. * * Returns a pointer to the internal eventfd context, otherwise the error * pointers returned by the following functions: * * eventfd_fget */ struct eventfd_ctx *eventfd_ctx_fdget(int fd) { struct file *file; struct eventfd_ctx *ctx; file = eventfd_fget(fd); if (IS_ERR(file)) return (struct eventfd_ctx *) file; ctx = eventfd_ctx_get(file->private_data); fput(file); return ctx; } EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); /** * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. * @file: [in] Eventfd file pointer. * * Returns a pointer to the internal eventfd context, otherwise the error * pointer: * * -EINVAL : The @fd file descriptor is not an eventfd file. */ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) { if (file->f_op != &eventfd_fops) return ERR_PTR(-EINVAL); return eventfd_ctx_get(file->private_data); } EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); /** * eventfd_file_create - Creates an eventfd file pointer. * @count: Initial eventfd counter value. * @flags: Flags for the eventfd file. * * This function creates an eventfd file pointer, w/out installing it into * the fd table. This is useful when the eventfd file is used during the * initialization of data structures that require extra setup after the eventfd * creation. So the eventfd creation is split into the file pointer creation * phase, and the file descriptor installation phase. * In this way races with userspace closing the newly installed file descriptor * can be avoided. * Returns an eventfd file pointer, or a proper error pointer. */ struct file *eventfd_file_create(unsigned int count, int flags) { struct file *file; struct eventfd_ctx *ctx; /* Check the EFD_* constants for consistency. */ BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); if (flags & ~EFD_FLAGS_SET) return ERR_PTR(-EINVAL); ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); kref_init(&ctx->kref); init_waitqueue_head(&ctx->wqh); ctx->count = count; ctx->flags = flags; file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); if (IS_ERR(file)) eventfd_free_ctx(ctx); return file; } SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) { int fd, error; struct file *file; error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS); if (error < 0) return error; fd = error; file = eventfd_file_create(count, flags); if (IS_ERR(file)) { error = PTR_ERR(file); goto err_put_unused_fd; } fd_install(fd, file); return fd; err_put_unused_fd: put_unused_fd(fd); return error; } SYSCALL_DEFINE1(eventfd, unsigned int, count) { return sys_eventfd2(count, 0); }
gpl-2.0
loli10K/linux-sunxi
arch/m68k/platform/527x/config.c
4467
2630
/***************************************************************************/ /* * linux/arch/m68knommu/platform/527x/config.c * * Sub-architcture dependent initialization code for the Freescale * 5270/5271 CPUs. * * Copyright (C) 1999-2004, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2001-2004, SnapGear Inc. (www.snapgear.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfuart.h> /***************************************************************************/ #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m527x_qspi_init(void) { #if defined(CONFIG_M5271) u16 par; /* setup QSPS pins for QSPI with gpio CS control */ writeb(0x1f, MCFGPIO_PAR_QSPI); /* and CS2 & CS3 as gpio */ par = readw(MCFGPIO_PAR_TIMER); par &= 0x3f3f; writew(par, MCFGPIO_PAR_TIMER); #elif defined(CONFIG_M5275) /* setup QSPS pins for QSPI with gpio CS control */ writew(0x003e, MCFGPIO_PAR_QSPI); #endif } #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ static void __init m527x_uarts_init(void) { u16 sepmask; /* * External Pin Mask Setting & Enable External Pin for Interface */ sepmask = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART); sepmask |= UART0_ENABLE_MASK | UART1_ENABLE_MASK | UART2_ENABLE_MASK; writew(sepmask, MCF_IPSBAR + MCF_GPIO_PAR_UART); } /***************************************************************************/ static void __init m527x_fec_init(void) { u16 par; u8 v; /* Set multi-function pins to ethernet mode for fec0 */ #if defined(CONFIG_M5271) v = readb(MCF_IPSBAR + 0x100047); writeb(v | 0xf0, MCF_IPSBAR + 0x100047); #else par = readw(MCF_IPSBAR + 0x100082); writew(par | 0xf00, MCF_IPSBAR + 0x100082); v = readb(MCF_IPSBAR + 0x100078); writeb(v | 0xc0, MCF_IPSBAR + 0x100078); /* Set multi-function pins to ethernet mode for fec1 */ par = readw(MCF_IPSBAR + 0x100082); writew(par | 0xa0, MCF_IPSBAR + 0x100082); v = readb(MCF_IPSBAR + 0x100079); writeb(v | 0xc0, MCF_IPSBAR + 0x100079); #endif } /***************************************************************************/ void __init config_BSP(char *commandp, int size) { mach_sched_init = hw_timer_init; m527x_uarts_init(); m527x_fec_init(); #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m527x_qspi_init(); #endif } /***************************************************************************/
gpl-2.0
vinay94185vinay/Flamingo-kernel
drivers/video/neofb.c
4979
56721
/* * linux/drivers/video/neofb.c -- NeoMagic Framebuffer Driver * * Copyright (c) 2001-2002 Denis Oliver Kropp <dok@directfb.org> * * * Card specific code is based on XFree86's neomagic driver. * Framebuffer framework code is based on code of cyber2000fb. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. * * * 0.4.1 * - Cosmetic changes (dok) * * 0.4 * - Toshiba Libretto support, allow modes larger than LCD size if * LCD is disabled, keep BIOS settings if internal/external display * haven't been enabled explicitly * (Thomas J. Moore <dark@mama.indstate.edu>) * * 0.3.3 * - Porting over to new fbdev api. (jsimmons) * * 0.3.2 * - got rid of all floating point (dok) * * 0.3.1 * - added module license (dok) * * 0.3 * - hardware accelerated clear and move for 2200 and above (dok) * - maximum allowed dotclock is handled now (dok) * * 0.2.1 * - correct panning after X usage (dok) * - added module and kernel parameters (dok) * - no stretching if external display is enabled (dok) * * 0.2 * - initial version (dok) * * * TODO * - ioctl for internal/external switching * - blanking * - 32bit depth support, maybe impossible * - disable pan-on-sync, need specs * * BUGS * - white margin on bootup like with tdfxfb (colormap problem?) * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/pci.h> #include <linux/init.h> #ifdef CONFIG_TOSHIBA #include <linux/toshiba.h> #endif #include <asm/io.h> #include <asm/irq.h> #include <asm/pgtable.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include <video/vga.h> #include <video/neomagic.h> #define NEOFB_VERSION "0.4.2" /* --------------------------------------------------------------------- */ static bool internal; static bool external; static bool libretto; static bool nostretch; static bool nopciburst; static char *mode_option __devinitdata = NULL; #ifdef MODULE MODULE_AUTHOR("(c) 2001-2002 Denis Oliver Kropp <dok@convergence.de>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FBDev driver for NeoMagic PCI Chips"); module_param(internal, bool, 0); MODULE_PARM_DESC(internal, "Enable output on internal LCD Display."); module_param(external, bool, 0); MODULE_PARM_DESC(external, "Enable output on external CRT."); module_param(libretto, bool, 0); MODULE_PARM_DESC(libretto, "Force Libretto 100/110 800x480 LCD."); module_param(nostretch, bool, 0); MODULE_PARM_DESC(nostretch, "Disable stretching of modes smaller than LCD."); module_param(nopciburst, bool, 0); MODULE_PARM_DESC(nopciburst, "Disable PCI burst mode."); module_param(mode_option, charp, 0); MODULE_PARM_DESC(mode_option, "Preferred video mode ('640x480-8@60', etc)"); #endif /* --------------------------------------------------------------------- */ static biosMode bios8[] = { {320, 240, 0x40}, {300, 400, 0x42}, {640, 400, 0x20}, {640, 480, 0x21}, {800, 600, 0x23}, {1024, 768, 0x25}, }; static biosMode bios16[] = { {320, 200, 0x2e}, {320, 240, 0x41}, {300, 400, 0x43}, {640, 480, 0x31}, {800, 600, 0x34}, {1024, 768, 0x37}, }; static biosMode bios24[] = { {640, 480, 0x32}, {800, 600, 0x35}, {1024, 768, 0x38} }; #ifdef NO_32BIT_SUPPORT_YET /* FIXME: guessed values, wrong */ static biosMode bios32[] = { {640, 480, 0x33}, {800, 600, 0x36}, {1024, 768, 0x39} }; #endif static inline void write_le32(int regindex, u32 val, const struct neofb_par *par) { writel(val, par->neo2200 + par->cursorOff + regindex); } static int neoFindMode(int xres, int yres, int depth) { int xres_s; int i, size; biosMode *mode; switch (depth) { case 8: size = ARRAY_SIZE(bios8); mode = bios8; break; case 16: size = ARRAY_SIZE(bios16); mode = bios16; break; case 24: size = ARRAY_SIZE(bios24); mode = bios24; break; #ifdef NO_32BIT_SUPPORT_YET case 32: size = ARRAY_SIZE(bios32); mode = bios32; break; #endif default: return 0; } for (i = 0; i < size; i++) { if (xres <= mode[i].x_res) { xres_s = mode[i].x_res; for (; i < size; i++) { if (mode[i].x_res != xres_s) return mode[i - 1].mode; if (yres <= mode[i].y_res) return mode[i].mode; } } } return mode[size - 1].mode; } /* * neoCalcVCLK -- * * Determine the closest clock frequency to the one requested. */ #define MAX_N 127 #define MAX_D 31 #define MAX_F 1 static void neoCalcVCLK(const struct fb_info *info, struct neofb_par *par, long freq) { int n, d, f; int n_best = 0, d_best = 0, f_best = 0; long f_best_diff = 0x7ffff; for (f = 0; f <= MAX_F; f++) for (d = 0; d <= MAX_D; d++) for (n = 0; n <= MAX_N; n++) { long f_out; long f_diff; f_out = ((14318 * (n + 1)) / (d + 1)) >> f; f_diff = abs(f_out - freq); if (f_diff <= f_best_diff) { f_best_diff = f_diff; n_best = n; d_best = d; f_best = f; } if (f_out > freq) break; } if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) { /* NOT_DONE: We are trying the full range of the 2200 clock. We should be able to try n up to 2047 */ par->VCLK3NumeratorLow = n_best; par->VCLK3NumeratorHigh = (f_best << 7); } else par->VCLK3NumeratorLow = n_best | (f_best << 7); par->VCLK3Denominator = d_best; #ifdef NEOFB_DEBUG printk(KERN_DEBUG "neoVCLK: f:%ld NumLow=%d NumHi=%d Den=%d Df=%ld\n", freq, par->VCLK3NumeratorLow, par->VCLK3NumeratorHigh, par->VCLK3Denominator, f_best_diff); #endif } /* * vgaHWInit -- * Handle the initialization, etc. of a screen. * Return FALSE on failure. */ static int vgaHWInit(const struct fb_var_screeninfo *var, struct neofb_par *par) { int hsync_end = var->xres + var->right_margin + var->hsync_len; int htotal = (hsync_end + var->left_margin) >> 3; int vsync_start = var->yres + var->lower_margin; int vsync_end = vsync_start + var->vsync_len; int vtotal = vsync_end + var->upper_margin; par->MiscOutReg = 0x23; if (!(var->sync & FB_SYNC_HOR_HIGH_ACT)) par->MiscOutReg |= 0x40; if (!(var->sync & FB_SYNC_VERT_HIGH_ACT)) par->MiscOutReg |= 0x80; /* * Time Sequencer */ par->Sequencer[0] = 0x00; par->Sequencer[1] = 0x01; par->Sequencer[2] = 0x0F; par->Sequencer[3] = 0x00; /* Font select */ par->Sequencer[4] = 0x0E; /* Misc */ /* * CRTC Controller */ par->CRTC[0] = htotal - 5; par->CRTC[1] = (var->xres >> 3) - 1; par->CRTC[2] = (var->xres >> 3) - 1; par->CRTC[3] = ((htotal - 1) & 0x1F) | 0x80; par->CRTC[4] = ((var->xres + var->right_margin) >> 3); par->CRTC[5] = (((htotal - 1) & 0x20) << 2) | (((hsync_end >> 3)) & 0x1F); par->CRTC[6] = (vtotal - 2) & 0xFF; par->CRTC[7] = (((vtotal - 2) & 0x100) >> 8) | (((var->yres - 1) & 0x100) >> 7) | ((vsync_start & 0x100) >> 6) | (((var->yres - 1) & 0x100) >> 5) | 0x10 | (((vtotal - 2) & 0x200) >> 4) | (((var->yres - 1) & 0x200) >> 3) | ((vsync_start & 0x200) >> 2); par->CRTC[8] = 0x00; par->CRTC[9] = (((var->yres - 1) & 0x200) >> 4) | 0x40; if (var->vmode & FB_VMODE_DOUBLE) par->CRTC[9] |= 0x80; par->CRTC[10] = 0x00; par->CRTC[11] = 0x00; par->CRTC[12] = 0x00; par->CRTC[13] = 0x00; par->CRTC[14] = 0x00; par->CRTC[15] = 0x00; par->CRTC[16] = vsync_start & 0xFF; par->CRTC[17] = (vsync_end & 0x0F) | 0x20; par->CRTC[18] = (var->yres - 1) & 0xFF; par->CRTC[19] = var->xres_virtual >> 4; par->CRTC[20] = 0x00; par->CRTC[21] = (var->yres - 1) & 0xFF; par->CRTC[22] = (vtotal - 1) & 0xFF; par->CRTC[23] = 0xC3; par->CRTC[24] = 0xFF; /* * are these unnecessary? * vgaHWHBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO); * vgaHWVBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO); */ /* * Graphics Display Controller */ par->Graphics[0] = 0x00; par->Graphics[1] = 0x00; par->Graphics[2] = 0x00; par->Graphics[3] = 0x00; par->Graphics[4] = 0x00; par->Graphics[5] = 0x40; par->Graphics[6] = 0x05; /* only map 64k VGA memory !!!! */ par->Graphics[7] = 0x0F; par->Graphics[8] = 0xFF; par->Attribute[0] = 0x00; /* standard colormap translation */ par->Attribute[1] = 0x01; par->Attribute[2] = 0x02; par->Attribute[3] = 0x03; par->Attribute[4] = 0x04; par->Attribute[5] = 0x05; par->Attribute[6] = 0x06; par->Attribute[7] = 0x07; par->Attribute[8] = 0x08; par->Attribute[9] = 0x09; par->Attribute[10] = 0x0A; par->Attribute[11] = 0x0B; par->Attribute[12] = 0x0C; par->Attribute[13] = 0x0D; par->Attribute[14] = 0x0E; par->Attribute[15] = 0x0F; par->Attribute[16] = 0x41; par->Attribute[17] = 0xFF; par->Attribute[18] = 0x0F; par->Attribute[19] = 0x00; par->Attribute[20] = 0x00; return 0; } static void vgaHWLock(struct vgastate *state) { /* Protect CRTC[0-7] */ vga_wcrt(state->vgabase, 0x11, vga_rcrt(state->vgabase, 0x11) | 0x80); } static void vgaHWUnlock(void) { /* Unprotect CRTC[0-7] */ vga_wcrt(NULL, 0x11, vga_rcrt(NULL, 0x11) & ~0x80); } static void neoLock(struct vgastate *state) { vga_wgfx(state->vgabase, 0x09, 0x00); vgaHWLock(state); } static void neoUnlock(void) { vgaHWUnlock(); vga_wgfx(NULL, 0x09, 0x26); } /* * VGA Palette management */ static int paletteEnabled = 0; static inline void VGAenablePalette(void) { vga_r(NULL, VGA_IS1_RC); vga_w(NULL, VGA_ATT_W, 0x00); paletteEnabled = 1; } static inline void VGAdisablePalette(void) { vga_r(NULL, VGA_IS1_RC); vga_w(NULL, VGA_ATT_W, 0x20); paletteEnabled = 0; } static inline void VGAwATTR(u8 index, u8 value) { if (paletteEnabled) index &= ~0x20; else index |= 0x20; vga_r(NULL, VGA_IS1_RC); vga_wattr(NULL, index, value); } static void vgaHWProtect(int on) { unsigned char tmp; tmp = vga_rseq(NULL, 0x01); if (on) { /* * Turn off screen and disable sequencer. */ vga_wseq(NULL, 0x00, 0x01); /* Synchronous Reset */ vga_wseq(NULL, 0x01, tmp | 0x20); /* disable the display */ VGAenablePalette(); } else { /* * Reenable sequencer, then turn on screen. */ vga_wseq(NULL, 0x01, tmp & ~0x20); /* reenable display */ vga_wseq(NULL, 0x00, 0x03); /* clear synchronousreset */ VGAdisablePalette(); } } static void vgaHWRestore(const struct fb_info *info, const struct neofb_par *par) { int i; vga_w(NULL, VGA_MIS_W, par->MiscOutReg); for (i = 1; i < 5; i++) vga_wseq(NULL, i, par->Sequencer[i]); /* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 or CRTC[17] */ vga_wcrt(NULL, 17, par->CRTC[17] & ~0x80); for (i = 0; i < 25; i++) vga_wcrt(NULL, i, par->CRTC[i]); for (i = 0; i < 9; i++) vga_wgfx(NULL, i, par->Graphics[i]); VGAenablePalette(); for (i = 0; i < 21; i++) VGAwATTR(i, par->Attribute[i]); VGAdisablePalette(); } /* -------------------- Hardware specific routines ------------------------- */ /* * Hardware Acceleration for Neo2200+ */ static inline int neo2200_sync(struct fb_info *info) { struct neofb_par *par = info->par; while (readl(&par->neo2200->bltStat) & 1) cpu_relax(); return 0; } static inline void neo2200_wait_fifo(struct fb_info *info, int requested_fifo_space) { // ndev->neo.waitfifo_calls++; // ndev->neo.waitfifo_sum += requested_fifo_space; /* FIXME: does not work if (neo_fifo_space < requested_fifo_space) { neo_fifo_waitcycles++; while (1) { neo_fifo_space = (neo2200->bltStat >> 8); if (neo_fifo_space >= requested_fifo_space) break; } } else { neo_fifo_cache_hits++; } neo_fifo_space -= requested_fifo_space; */ neo2200_sync(info); } static inline void neo2200_accel_init(struct fb_info *info, struct fb_var_screeninfo *var) { struct neofb_par *par = info->par; Neo2200 __iomem *neo2200 = par->neo2200; u32 bltMod, pitch; neo2200_sync(info); switch (var->bits_per_pixel) { case 8: bltMod = NEO_MODE1_DEPTH8; pitch = var->xres_virtual; break; case 15: case 16: bltMod = NEO_MODE1_DEPTH16; pitch = var->xres_virtual * 2; break; case 24: bltMod = NEO_MODE1_DEPTH24; pitch = var->xres_virtual * 3; break; default: printk(KERN_ERR "neofb: neo2200_accel_init: unexpected bits per pixel!\n"); return; } writel(bltMod << 16, &neo2200->bltStat); writel((pitch << 16) | pitch, &neo2200->pitch); } /* --------------------------------------------------------------------- */ static int neofb_open(struct fb_info *info, int user) { struct neofb_par *par = info->par; if (!par->ref_count) { memset(&par->state, 0, sizeof(struct vgastate)); par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS; save_vga(&par->state); } par->ref_count++; return 0; } static int neofb_release(struct fb_info *info, int user) { struct neofb_par *par = info->par; if (!par->ref_count) return -EINVAL; if (par->ref_count == 1) { restore_vga(&par->state); } par->ref_count--; return 0; } static int neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct neofb_par *par = info->par; int memlen, vramlen; int mode_ok = 0; DBG("neofb_check_var"); if (PICOS2KHZ(var->pixclock) > par->maxClock) return -EINVAL; /* Is the mode larger than the LCD panel? */ if (par->internal_display && ((var->xres > par->NeoPanelWidth) || (var->yres > par->NeoPanelHeight))) { printk(KERN_INFO "Mode (%dx%d) larger than the LCD panel (%dx%d)\n", var->xres, var->yres, par->NeoPanelWidth, par->NeoPanelHeight); return -EINVAL; } /* Is the mode one of the acceptable sizes? */ if (!par->internal_display) mode_ok = 1; else { switch (var->xres) { case 1280: if (var->yres == 1024) mode_ok = 1; break; case 1024: if (var->yres == 768) mode_ok = 1; break; case 800: if (var->yres == (par->libretto ? 480 : 600)) mode_ok = 1; break; case 640: if (var->yres == 480) mode_ok = 1; break; } } if (!mode_ok) { printk(KERN_INFO "Mode (%dx%d) won't display properly on LCD\n", var->xres, var->yres); return -EINVAL; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; switch (var->bits_per_pixel) { case 8: /* PSEUDOCOLOUR, 256 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; case 16: /* DIRECTCOLOUR, 64k */ var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; break; case 24: /* TRUECOLOUR, 16m */ var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; #ifdef NO_32BIT_SUPPORT_YET case 32: /* TRUECOLOUR, 16m */ var->transp.offset = 24; var->transp.length = 8; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; #endif default: printk(KERN_WARNING "neofb: no support for %dbpp\n", var->bits_per_pixel); return -EINVAL; } vramlen = info->fix.smem_len; if (vramlen > 4 * 1024 * 1024) vramlen = 4 * 1024 * 1024; if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; memlen = var->xres_virtual * var->bits_per_pixel * var->yres_virtual >> 3; if (memlen > vramlen) { var->yres_virtual = vramlen * 8 / (var->xres_virtual * var->bits_per_pixel); memlen = var->xres_virtual * var->bits_per_pixel * var->yres_virtual / 8; } /* we must round yres/xres down, we already rounded y/xres_virtual up if it was possible. We should return -EINVAL, but I disagree */ if (var->yres_virtual < var->yres) var->yres = var->yres_virtual; if (var->xoffset + var->xres > var->xres_virtual) var->xoffset = var->xres_virtual - var->xres; if (var->yoffset + var->yres > var->yres_virtual) var->yoffset = var->yres_virtual - var->yres; var->nonstd = 0; var->height = -1; var->width = -1; if (var->bits_per_pixel >= 24 || !par->neo2200) var->accel_flags &= ~FB_ACCELF_TEXT; return 0; } static int neofb_set_par(struct fb_info *info) { struct neofb_par *par = info->par; unsigned char temp; int i, clock_hi = 0; int lcd_stretch; int hoffset, voffset; int vsync_start, vtotal; DBG("neofb_set_par"); neoUnlock(); vgaHWProtect(1); /* Blank the screen */ vsync_start = info->var.yres + info->var.lower_margin; vtotal = vsync_start + info->var.vsync_len + info->var.upper_margin; /* * This will allocate the datastructure and initialize all of the * generic VGA registers. */ if (vgaHWInit(&info->var, par)) return -EINVAL; /* * The default value assigned by vgaHW.c is 0x41, but this does * not work for NeoMagic. */ par->Attribute[16] = 0x01; switch (info->var.bits_per_pixel) { case 8: par->CRTC[0x13] = info->var.xres_virtual >> 3; par->ExtCRTOffset = info->var.xres_virtual >> 11; par->ExtColorModeSelect = 0x11; break; case 16: par->CRTC[0x13] = info->var.xres_virtual >> 2; par->ExtCRTOffset = info->var.xres_virtual >> 10; par->ExtColorModeSelect = 0x13; break; case 24: par->CRTC[0x13] = (info->var.xres_virtual * 3) >> 3; par->ExtCRTOffset = (info->var.xres_virtual * 3) >> 11; par->ExtColorModeSelect = 0x14; break; #ifdef NO_32BIT_SUPPORT_YET case 32: /* FIXME: guessed values */ par->CRTC[0x13] = info->var.xres_virtual >> 1; par->ExtCRTOffset = info->var.xres_virtual >> 9; par->ExtColorModeSelect = 0x15; break; #endif default: break; } par->ExtCRTDispAddr = 0x10; /* Vertical Extension */ par->VerticalExt = (((vtotal - 2) & 0x400) >> 10) | (((info->var.yres - 1) & 0x400) >> 9) | (((vsync_start) & 0x400) >> 8) | (((vsync_start) & 0x400) >> 7); /* Fast write bursts on unless disabled. */ if (par->pci_burst) par->SysIfaceCntl1 = 0x30; else par->SysIfaceCntl1 = 0x00; par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */ /* Initialize: by default, we want display config register to be read */ par->PanelDispCntlRegRead = 1; /* Enable any user specified display devices. */ par->PanelDispCntlReg1 = 0x00; if (par->internal_display) par->PanelDispCntlReg1 |= 0x02; if (par->external_display) par->PanelDispCntlReg1 |= 0x01; /* If the user did not specify any display devices, then... */ if (par->PanelDispCntlReg1 == 0x00) { /* Default to internal (i.e., LCD) only. */ par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03; } /* If we are using a fixed mode, then tell the chip we are. */ switch (info->var.xres) { case 1280: par->PanelDispCntlReg1 |= 0x60; break; case 1024: par->PanelDispCntlReg1 |= 0x40; break; case 800: par->PanelDispCntlReg1 |= 0x20; break; case 640: default: break; } /* Setup shadow register locking. */ switch (par->PanelDispCntlReg1 & 0x03) { case 0x01: /* External CRT only mode: */ par->GeneralLockReg = 0x00; /* We need to program the VCLK for external display only mode. */ par->ProgramVCLK = 1; break; case 0x02: /* Internal LCD only mode: */ case 0x03: /* Simultaneous internal/external (LCD/CRT) mode: */ par->GeneralLockReg = 0x01; /* Don't program the VCLK when using the LCD. */ par->ProgramVCLK = 0; break; } /* * If the screen is to be stretched, turn on stretching for the * various modes. * * OPTION_LCD_STRETCH means stretching should be turned off! */ par->PanelDispCntlReg2 = 0x00; par->PanelDispCntlReg3 = 0x00; if (par->lcd_stretch && (par->PanelDispCntlReg1 == 0x02) && /* LCD only */ (info->var.xres != par->NeoPanelWidth)) { switch (info->var.xres) { case 320: /* Needs testing. KEM -- 24 May 98 */ case 400: /* Needs testing. KEM -- 24 May 98 */ case 640: case 800: case 1024: lcd_stretch = 1; par->PanelDispCntlReg2 |= 0xC6; break; default: lcd_stretch = 0; /* No stretching in these modes. */ } } else lcd_stretch = 0; /* * If the screen is to be centerd, turn on the centering for the * various modes. */ par->PanelVertCenterReg1 = 0x00; par->PanelVertCenterReg2 = 0x00; par->PanelVertCenterReg3 = 0x00; par->PanelVertCenterReg4 = 0x00; par->PanelVertCenterReg5 = 0x00; par->PanelHorizCenterReg1 = 0x00; par->PanelHorizCenterReg2 = 0x00; par->PanelHorizCenterReg3 = 0x00; par->PanelHorizCenterReg4 = 0x00; par->PanelHorizCenterReg5 = 0x00; if (par->PanelDispCntlReg1 & 0x02) { if (info->var.xres == par->NeoPanelWidth) { /* * No centering required when the requested display width * equals the panel width. */ } else { par->PanelDispCntlReg2 |= 0x01; par->PanelDispCntlReg3 |= 0x10; /* Calculate the horizontal and vertical offsets. */ if (!lcd_stretch) { hoffset = ((par->NeoPanelWidth - info->var.xres) >> 4) - 1; voffset = ((par->NeoPanelHeight - info->var.yres) >> 1) - 2; } else { /* Stretched modes cannot be centered. */ hoffset = 0; voffset = 0; } switch (info->var.xres) { case 320: /* Needs testing. KEM -- 24 May 98 */ par->PanelHorizCenterReg3 = hoffset; par->PanelVertCenterReg2 = voffset; break; case 400: /* Needs testing. KEM -- 24 May 98 */ par->PanelHorizCenterReg4 = hoffset; par->PanelVertCenterReg1 = voffset; break; case 640: par->PanelHorizCenterReg1 = hoffset; par->PanelVertCenterReg3 = voffset; break; case 800: par->PanelHorizCenterReg2 = hoffset; par->PanelVertCenterReg4 = voffset; break; case 1024: par->PanelHorizCenterReg5 = hoffset; par->PanelVertCenterReg5 = voffset; break; case 1280: default: /* No centering in these modes. */ break; } } } par->biosMode = neoFindMode(info->var.xres, info->var.yres, info->var.bits_per_pixel); /* * Calculate the VCLK that most closely matches the requested dot * clock. */ neoCalcVCLK(info, par, PICOS2KHZ(info->var.pixclock)); /* Since we program the clocks ourselves, always use VCLK3. */ par->MiscOutReg |= 0x0C; /* alread unlocked above */ /* BOGUS vga_wgfx(NULL, 0x09, 0x26); */ /* don't know what this is, but it's 0 from bootup anyway */ vga_wgfx(NULL, 0x15, 0x00); /* was set to 0x01 by my bios in text and vesa modes */ vga_wgfx(NULL, 0x0A, par->GeneralLockReg); /* * The color mode needs to be set before calling vgaHWRestore * to ensure the DAC is initialized properly. * * NOTE: Make sure we don't change bits make sure we don't change * any reserved bits. */ temp = vga_rgfx(NULL, 0x90); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: temp &= 0xF0; /* Save bits 7:4 */ temp |= (par->ExtColorModeSelect & ~0xF0); break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2160: case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: temp &= 0x70; /* Save bits 6:4 */ temp |= (par->ExtColorModeSelect & ~0x70); break; } vga_wgfx(NULL, 0x90, temp); /* * In some rare cases a lockup might occur if we don't delay * here. (Reported by Miles Lane) */ //mdelay(200); /* * Disable horizontal and vertical graphics and text expansions so * that vgaHWRestore works properly. */ temp = vga_rgfx(NULL, 0x25); temp &= 0x39; vga_wgfx(NULL, 0x25, temp); /* * Sleep for 200ms to make sure that the two operations above have * had time to take effect. */ mdelay(200); /* * This function handles restoring the generic VGA registers. */ vgaHWRestore(info, par); /* linear colormap for non palettized modes */ switch (info->var.bits_per_pixel) { case 8: /* PseudoColor, 256 */ info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 16: /* TrueColor, 64k */ info->fix.visual = FB_VISUAL_TRUECOLOR; for (i = 0; i < 64; i++) { outb(i, 0x3c8); outb(i << 1, 0x3c9); outb(i, 0x3c9); outb(i << 1, 0x3c9); } break; case 24: #ifdef NO_32BIT_SUPPORT_YET case 32: #endif /* TrueColor, 16m */ info->fix.visual = FB_VISUAL_TRUECOLOR; for (i = 0; i < 256; i++) { outb(i, 0x3c8); outb(i, 0x3c9); outb(i, 0x3c9); outb(i, 0x3c9); } break; } vga_wgfx(NULL, 0x0E, par->ExtCRTDispAddr); vga_wgfx(NULL, 0x0F, par->ExtCRTOffset); temp = vga_rgfx(NULL, 0x10); temp &= 0x0F; /* Save bits 3:0 */ temp |= (par->SysIfaceCntl1 & ~0x0F); /* VESA Bios sets bit 1! */ vga_wgfx(NULL, 0x10, temp); vga_wgfx(NULL, 0x11, par->SysIfaceCntl2); vga_wgfx(NULL, 0x15, 0 /*par->SingleAddrPage */ ); vga_wgfx(NULL, 0x16, 0 /*par->DualAddrPage */ ); temp = vga_rgfx(NULL, 0x20); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: temp &= 0xFC; /* Save bits 7:2 */ temp |= (par->PanelDispCntlReg1 & ~0xFC); break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2160: temp &= 0xDC; /* Save bits 7:6,4:2 */ temp |= (par->PanelDispCntlReg1 & ~0xDC); break; case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: temp &= 0x98; /* Save bits 7,4:3 */ temp |= (par->PanelDispCntlReg1 & ~0x98); break; } vga_wgfx(NULL, 0x20, temp); temp = vga_rgfx(NULL, 0x25); temp &= 0x38; /* Save bits 5:3 */ temp |= (par->PanelDispCntlReg2 & ~0x38); vga_wgfx(NULL, 0x25, temp); if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) { temp = vga_rgfx(NULL, 0x30); temp &= 0xEF; /* Save bits 7:5 and bits 3:0 */ temp |= (par->PanelDispCntlReg3 & ~0xEF); vga_wgfx(NULL, 0x30, temp); } vga_wgfx(NULL, 0x28, par->PanelVertCenterReg1); vga_wgfx(NULL, 0x29, par->PanelVertCenterReg2); vga_wgfx(NULL, 0x2a, par->PanelVertCenterReg3); if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) { vga_wgfx(NULL, 0x32, par->PanelVertCenterReg4); vga_wgfx(NULL, 0x33, par->PanelHorizCenterReg1); vga_wgfx(NULL, 0x34, par->PanelHorizCenterReg2); vga_wgfx(NULL, 0x35, par->PanelHorizCenterReg3); } if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2160) vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4); if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) { vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4); vga_wgfx(NULL, 0x37, par->PanelVertCenterReg5); vga_wgfx(NULL, 0x38, par->PanelHorizCenterReg5); clock_hi = 1; } /* Program VCLK3 if needed. */ if (par->ProgramVCLK && ((vga_rgfx(NULL, 0x9B) != par->VCLK3NumeratorLow) || (vga_rgfx(NULL, 0x9F) != par->VCLK3Denominator) || (clock_hi && ((vga_rgfx(NULL, 0x8F) & ~0x0f) != (par->VCLK3NumeratorHigh & ~0x0F))))) { vga_wgfx(NULL, 0x9B, par->VCLK3NumeratorLow); if (clock_hi) { temp = vga_rgfx(NULL, 0x8F); temp &= 0x0F; /* Save bits 3:0 */ temp |= (par->VCLK3NumeratorHigh & ~0x0F); vga_wgfx(NULL, 0x8F, temp); } vga_wgfx(NULL, 0x9F, par->VCLK3Denominator); } if (par->biosMode) vga_wcrt(NULL, 0x23, par->biosMode); vga_wgfx(NULL, 0x93, 0xc0); /* Gives 5x faster framebuffer writes !!! */ /* Program vertical extension register */ if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) { vga_wcrt(NULL, 0x70, par->VerticalExt); } vgaHWProtect(0); /* Turn on screen */ /* Calling this also locks offset registers required in update_start */ neoLock(&par->state); info->fix.line_length = info->var.xres_virtual * (info->var.bits_per_pixel >> 3); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_accel_init(info, &info->var); break; default: break; } return 0; } /* * Pan or Wrap the Display */ static int neofb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct neofb_par *par = info->par; struct vgastate *state = &par->state; int oldExtCRTDispAddr; int Base; DBG("neofb_update_start"); Base = (var->yoffset * info->var.xres_virtual + var->xoffset) >> 2; Base *= (info->var.bits_per_pixel + 7) / 8; neoUnlock(); /* * These are the generic starting address registers. */ vga_wcrt(state->vgabase, 0x0C, (Base & 0x00FF00) >> 8); vga_wcrt(state->vgabase, 0x0D, (Base & 0x00FF)); /* * Make sure we don't clobber some other bits that might already * have been set. NOTE: NM2200 has a writable bit 3, but it shouldn't * be needed. */ oldExtCRTDispAddr = vga_rgfx(NULL, 0x0E); vga_wgfx(state->vgabase, 0x0E, (((Base >> 16) & 0x0f) | (oldExtCRTDispAddr & 0xf0))); neoLock(state); return 0; } static int neofb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *fb) { if (regno >= fb->cmap.len || regno > 255) return -EINVAL; if (fb->var.bits_per_pixel <= 8) { outb(regno, 0x3c8); outb(red >> 10, 0x3c9); outb(green >> 10, 0x3c9); outb(blue >> 10, 0x3c9); } else if (regno < 16) { switch (fb->var.bits_per_pixel) { case 16: ((u32 *) fb->pseudo_palette)[regno] = ((red & 0xf800)) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); break; case 24: ((u32 *) fb->pseudo_palette)[regno] = ((red & 0xff00) << 8) | ((green & 0xff00)) | ((blue & 0xff00) >> 8); break; #ifdef NO_32BIT_SUPPORT_YET case 32: ((u32 *) fb->pseudo_palette)[regno] = ((transp & 0xff00) << 16) | ((red & 0xff00) << 8) | ((green & 0xff00)) | ((blue & 0xff00) >> 8); break; #endif default: return 1; } } return 0; } /* * (Un)Blank the display. */ static int neofb_blank(int blank_mode, struct fb_info *info) { /* * Blank the screen if blank_mode != 0, else unblank. * Return 0 if blanking succeeded, != 0 if un-/blanking failed due to * e.g. a video mode which doesn't support it. Implements VESA suspend * and powerdown modes for monitors, and backlight control on LCDs. * blank_mode == 0: unblanked (backlight on) * blank_mode == 1: blank (backlight on) * blank_mode == 2: suspend vsync (backlight off) * blank_mode == 3: suspend hsync (backlight off) * blank_mode == 4: powerdown (backlight off) * * wms...Enable VESA DPMS compatible powerdown mode * run "setterm -powersave powerdown" to take advantage */ struct neofb_par *par = info->par; int seqflags, lcdflags, dpmsflags, reg, tmpdisp; /* * Read back the register bits related to display configuration. They might * have been changed underneath the driver via Fn key stroke. */ neoUnlock(); tmpdisp = vga_rgfx(NULL, 0x20) & 0x03; neoLock(&par->state); /* In case we blank the screen, we want to store the possibly new * configuration in the driver. During un-blank, we re-apply this setting, * since the LCD bit will be cleared in order to switch off the backlight. */ if (par->PanelDispCntlRegRead) { par->PanelDispCntlReg1 = tmpdisp; } par->PanelDispCntlRegRead = !blank_mode; switch (blank_mode) { case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ lcdflags = 0; /* LCD off */ dpmsflags = NEO_GR01_SUPPRESS_HSYNC | NEO_GR01_SUPPRESS_VSYNC; #ifdef CONFIG_TOSHIBA /* Do we still need this ? */ /* attempt to turn off backlight on toshiba; also turns off external */ { SMMRegisters regs; regs.eax = 0xff00; /* HCI_SET */ regs.ebx = 0x0002; /* HCI_BACKLIGHT */ regs.ecx = 0x0000; /* HCI_DISABLE */ tosh_smm(&regs); } #endif break; case FB_BLANK_HSYNC_SUSPEND: /* hsync off */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ lcdflags = 0; /* LCD off */ dpmsflags = NEO_GR01_SUPPRESS_HSYNC; break; case FB_BLANK_VSYNC_SUSPEND: /* vsync off */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ lcdflags = 0; /* LCD off */ dpmsflags = NEO_GR01_SUPPRESS_VSYNC; break; case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ /* * During a blank operation with the LID shut, we might store "LCD off" * by mistake. Due to timing issues, the BIOS may switch the lights * back on, and we turn it back off once we "unblank". * * So here is an attempt to implement ">=" - if we are in the process * of unblanking, and the LCD bit is unset in the driver but set in the * register, we must keep it. */ lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */ dpmsflags = 0x00; /* no hsync/vsync suppression */ break; case FB_BLANK_UNBLANK: /* unblank */ seqflags = 0; /* Enable sequencer */ lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */ dpmsflags = 0x00; /* no hsync/vsync suppression */ #ifdef CONFIG_TOSHIBA /* Do we still need this ? */ /* attempt to re-enable backlight/external on toshiba */ { SMMRegisters regs; regs.eax = 0xff00; /* HCI_SET */ regs.ebx = 0x0002; /* HCI_BACKLIGHT */ regs.ecx = 0x0001; /* HCI_ENABLE */ tosh_smm(&regs); } #endif break; default: /* Anything else we don't understand; return 1 to tell * fb_blank we didn't aactually do anything */ return 1; } neoUnlock(); reg = (vga_rseq(NULL, 0x01) & ~0x20) | seqflags; vga_wseq(NULL, 0x01, reg); reg = (vga_rgfx(NULL, 0x20) & ~0x02) | lcdflags; vga_wgfx(NULL, 0x20, reg); reg = (vga_rgfx(NULL, 0x01) & ~0xF0) | 0x80 | dpmsflags; vga_wgfx(NULL, 0x01, reg); neoLock(&par->state); return 0; } static void neo2200_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct neofb_par *par = info->par; u_long dst, rop; dst = rect->dx + rect->dy * info->var.xres_virtual; rop = rect->rop ? 0x060000 : 0x0c0000; neo2200_wait_fifo(info, 4); /* set blt control */ writel(NEO_BC3_FIFO_EN | NEO_BC0_SRC_IS_FG | NEO_BC3_SKIP_MAPPING | // NEO_BC3_DST_XY_ADDR | // NEO_BC3_SRC_XY_ADDR | rop, &par->neo2200->bltCntl); switch (info->var.bits_per_pixel) { case 8: writel(rect->color, &par->neo2200->fgColor); break; case 16: case 24: writel(((u32 *) (info->pseudo_palette))[rect->color], &par->neo2200->fgColor); break; } writel(dst * ((info->var.bits_per_pixel + 7) >> 3), &par->neo2200->dstStart); writel((rect->height << 16) | (rect->width & 0xffff), &par->neo2200->xyExt); } static void neo2200_copyarea(struct fb_info *info, const struct fb_copyarea *area) { u32 sx = area->sx, sy = area->sy, dx = area->dx, dy = area->dy; struct neofb_par *par = info->par; u_long src, dst, bltCntl; bltCntl = NEO_BC3_FIFO_EN | NEO_BC3_SKIP_MAPPING | 0x0C0000; if ((dy > sy) || ((dy == sy) && (dx > sx))) { /* Start with the lower right corner */ sy += (area->height - 1); dy += (area->height - 1); sx += (area->width - 1); dx += (area->width - 1); bltCntl |= NEO_BC0_X_DEC | NEO_BC0_DST_Y_DEC | NEO_BC0_SRC_Y_DEC; } src = sx * (info->var.bits_per_pixel >> 3) + sy*info->fix.line_length; dst = dx * (info->var.bits_per_pixel >> 3) + dy*info->fix.line_length; neo2200_wait_fifo(info, 4); /* set blt control */ writel(bltCntl, &par->neo2200->bltCntl); writel(src, &par->neo2200->srcStart); writel(dst, &par->neo2200->dstStart); writel((area->height << 16) | (area->width & 0xffff), &par->neo2200->xyExt); } static void neo2200_imageblit(struct fb_info *info, const struct fb_image *image) { struct neofb_par *par = info->par; int s_pitch = (image->width * image->depth + 7) >> 3; int scan_align = info->pixmap.scan_align - 1; int buf_align = info->pixmap.buf_align - 1; int bltCntl_flags, d_pitch, data_len; // The data is padded for the hardware d_pitch = (s_pitch + scan_align) & ~scan_align; data_len = ((d_pitch * image->height) + buf_align) & ~buf_align; neo2200_sync(info); if (image->depth == 1) { if (info->var.bits_per_pixel == 24 && image->width < 16) { /* FIXME. There is a bug with accelerated color-expanded * transfers in 24 bit mode if the image being transferred * is less than 16 bits wide. This is due to insufficient * padding when writing the image. We need to adjust * struct fb_pixmap. Not yet done. */ cfb_imageblit(info, image); return; } bltCntl_flags = NEO_BC0_SRC_MONO; } else if (image->depth == info->var.bits_per_pixel) { bltCntl_flags = 0; } else { /* We don't currently support hardware acceleration if image * depth is different from display */ cfb_imageblit(info, image); return; } switch (info->var.bits_per_pixel) { case 8: writel(image->fg_color, &par->neo2200->fgColor); writel(image->bg_color, &par->neo2200->bgColor); break; case 16: case 24: writel(((u32 *) (info->pseudo_palette))[image->fg_color], &par->neo2200->fgColor); writel(((u32 *) (info->pseudo_palette))[image->bg_color], &par->neo2200->bgColor); break; } writel(NEO_BC0_SYS_TO_VID | NEO_BC3_SKIP_MAPPING | bltCntl_flags | // NEO_BC3_DST_XY_ADDR | 0x0c0000, &par->neo2200->bltCntl); writel(0, &par->neo2200->srcStart); // par->neo2200->dstStart = (image->dy << 16) | (image->dx & 0xffff); writel(((image->dx & 0xffff) * (info->var.bits_per_pixel >> 3) + image->dy * info->fix.line_length), &par->neo2200->dstStart); writel((image->height << 16) | (image->width & 0xffff), &par->neo2200->xyExt); memcpy_toio(par->mmio_vbase + 0x100000, image->data, data_len); } static void neofb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_fillrect(info, rect); break; default: cfb_fillrect(info, rect); break; } } static void neofb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_copyarea(info, area); break; default: cfb_copyarea(info, area); break; } } static void neofb_imageblit(struct fb_info *info, const struct fb_image *image) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_imageblit(info, image); break; default: cfb_imageblit(info, image); break; } } static int neofb_sync(struct fb_info *info) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_sync(info); break; default: break; } return 0; } /* static void neofb_draw_cursor(struct fb_info *info, u8 *dst, u8 *src, unsigned int width) { //memset_io(info->sprite.addr, 0xff, 1); } static int neofb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct neofb_par *par = (struct neofb_par *) info->par; * Disable cursor * write_le32(NEOREG_CURSCNTL, ~NEO_CURS_ENABLE, par); if (cursor->set & FB_CUR_SETPOS) { u32 x = cursor->image.dx; u32 y = cursor->image.dy; info->cursor.image.dx = x; info->cursor.image.dy = y; write_le32(NEOREG_CURSX, x, par); write_le32(NEOREG_CURSY, y, par); } if (cursor->set & FB_CUR_SETSIZE) { info->cursor.image.height = cursor->image.height; info->cursor.image.width = cursor->image.width; } if (cursor->set & FB_CUR_SETHOT) info->cursor.hot = cursor->hot; if (cursor->set & FB_CUR_SETCMAP) { if (cursor->image.depth == 1) { u32 fg = cursor->image.fg_color; u32 bg = cursor->image.bg_color; info->cursor.image.fg_color = fg; info->cursor.image.bg_color = bg; fg = ((fg & 0xff0000) >> 16) | ((fg & 0xff) << 16) | (fg & 0xff00); bg = ((bg & 0xff0000) >> 16) | ((bg & 0xff) << 16) | (bg & 0xff00); write_le32(NEOREG_CURSFGCOLOR, fg, par); write_le32(NEOREG_CURSBGCOLOR, bg, par); } } if (cursor->set & FB_CUR_SETSHAPE) fb_load_cursor_image(info); if (info->cursor.enable) write_le32(NEOREG_CURSCNTL, NEO_CURS_ENABLE, par); return 0; } */ static struct fb_ops neofb_ops = { .owner = THIS_MODULE, .fb_open = neofb_open, .fb_release = neofb_release, .fb_check_var = neofb_check_var, .fb_set_par = neofb_set_par, .fb_setcolreg = neofb_setcolreg, .fb_pan_display = neofb_pan_display, .fb_blank = neofb_blank, .fb_sync = neofb_sync, .fb_fillrect = neofb_fillrect, .fb_copyarea = neofb_copyarea, .fb_imageblit = neofb_imageblit, }; /* --------------------------------------------------------------------- */ static struct fb_videomode __devinitdata mode800x480 = { .xres = 800, .yres = 480, .pixclock = 25000, .left_margin = 88, .right_margin = 40, .upper_margin = 23, .lower_margin = 1, .hsync_len = 128, .vsync_len = 4, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }; static int __devinit neo_map_mmio(struct fb_info *info, struct pci_dev *dev) { struct neofb_par *par = info->par; DBG("neo_map_mmio"); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: info->fix.mmio_start = pci_resource_start(dev, 0)+ 0x100000; break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: info->fix.mmio_start = pci_resource_start(dev, 0)+ 0x200000; break; case FB_ACCEL_NEOMAGIC_NM2160: case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: info->fix.mmio_start = pci_resource_start(dev, 1); break; default: info->fix.mmio_start = pci_resource_start(dev, 0); } info->fix.mmio_len = MMIO_SIZE; if (!request_mem_region (info->fix.mmio_start, MMIO_SIZE, "memory mapped I/O")) { printk("neofb: memory mapped IO in use\n"); return -EBUSY; } par->mmio_vbase = ioremap(info->fix.mmio_start, MMIO_SIZE); if (!par->mmio_vbase) { printk("neofb: unable to map memory mapped IO\n"); release_mem_region(info->fix.mmio_start, info->fix.mmio_len); return -ENOMEM; } else printk(KERN_INFO "neofb: mapped io at %p\n", par->mmio_vbase); return 0; } static void neo_unmap_mmio(struct fb_info *info) { struct neofb_par *par = info->par; DBG("neo_unmap_mmio"); iounmap(par->mmio_vbase); par->mmio_vbase = NULL; release_mem_region(info->fix.mmio_start, info->fix.mmio_len); } static int __devinit neo_map_video(struct fb_info *info, struct pci_dev *dev, int video_len) { //unsigned long addr; DBG("neo_map_video"); info->fix.smem_start = pci_resource_start(dev, 0); info->fix.smem_len = video_len; if (!request_mem_region(info->fix.smem_start, info->fix.smem_len, "frame buffer")) { printk("neofb: frame buffer in use\n"); return -EBUSY; } info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) { printk("neofb: unable to map screen memory\n"); release_mem_region(info->fix.smem_start, info->fix.smem_len); return -ENOMEM; } else printk(KERN_INFO "neofb: mapped framebuffer at %p\n", info->screen_base); #ifdef CONFIG_MTRR ((struct neofb_par *)(info->par))->mtrr = mtrr_add(info->fix.smem_start, pci_resource_len(dev, 0), MTRR_TYPE_WRCOMB, 1); #endif /* Clear framebuffer, it's all white in memory after boot */ memset_io(info->screen_base, 0, info->fix.smem_len); /* Allocate Cursor drawing pad. info->fix.smem_len -= PAGE_SIZE; addr = info->fix.smem_start + info->fix.smem_len; write_le32(NEOREG_CURSMEMPOS, ((0x000f & (addr >> 10)) << 8) | ((0x0ff0 & (addr >> 10)) >> 4), par); addr = (unsigned long) info->screen_base + info->fix.smem_len; info->sprite.addr = (u8 *) addr; */ return 0; } static void neo_unmap_video(struct fb_info *info) { DBG("neo_unmap_video"); #ifdef CONFIG_MTRR { struct neofb_par *par = info->par; mtrr_del(par->mtrr, info->fix.smem_start, info->fix.smem_len); } #endif iounmap(info->screen_base); info->screen_base = NULL; release_mem_region(info->fix.smem_start, info->fix.smem_len); } static int __devinit neo_scan_monitor(struct fb_info *info) { struct neofb_par *par = info->par; unsigned char type, display; int w; // Eventually we will have i2c support. info->monspecs.modedb = kmalloc(sizeof(struct fb_videomode), GFP_KERNEL); if (!info->monspecs.modedb) return -ENOMEM; info->monspecs.modedb_len = 1; /* Determine the panel type */ vga_wgfx(NULL, 0x09, 0x26); type = vga_rgfx(NULL, 0x21); display = vga_rgfx(NULL, 0x20); if (!par->internal_display && !par->external_display) { par->internal_display = display & 2 || !(display & 3) ? 1 : 0; par->external_display = display & 1; printk (KERN_INFO "Autodetected %s display\n", par->internal_display && par->external_display ? "simultaneous" : par->internal_display ? "internal" : "external"); } /* Determine panel width -- used in NeoValidMode. */ w = vga_rgfx(NULL, 0x20); vga_wgfx(NULL, 0x09, 0x00); switch ((w & 0x18) >> 3) { case 0x00: // 640x480@60 par->NeoPanelWidth = 640; par->NeoPanelHeight = 480; memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode)); break; case 0x01: par->NeoPanelWidth = 800; if (par->libretto) { par->NeoPanelHeight = 480; memcpy(info->monspecs.modedb, &mode800x480, sizeof(struct fb_videomode)); } else { // 800x600@60 par->NeoPanelHeight = 600; memcpy(info->monspecs.modedb, &vesa_modes[8], sizeof(struct fb_videomode)); } break; case 0x02: // 1024x768@60 par->NeoPanelWidth = 1024; par->NeoPanelHeight = 768; memcpy(info->monspecs.modedb, &vesa_modes[13], sizeof(struct fb_videomode)); break; case 0x03: /* 1280x1024@60 panel support needs to be added */ #ifdef NOT_DONE par->NeoPanelWidth = 1280; par->NeoPanelHeight = 1024; memcpy(info->monspecs.modedb, &vesa_modes[20], sizeof(struct fb_videomode)); break; #else printk(KERN_ERR "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n"); return -1; #endif default: // 640x480@60 par->NeoPanelWidth = 640; par->NeoPanelHeight = 480; memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode)); break; } printk(KERN_INFO "Panel is a %dx%d %s %s display\n", par->NeoPanelWidth, par->NeoPanelHeight, (type & 0x02) ? "color" : "monochrome", (type & 0x10) ? "TFT" : "dual scan"); return 0; } static int __devinit neo_init_hw(struct fb_info *info) { struct neofb_par *par = info->par; int videoRam = 896; int maxClock = 65000; int CursorMem = 1024; int CursorOff = 0x100; DBG("neo_init_hw"); neoUnlock(); #if 0 printk(KERN_DEBUG "--- Neo extended register dump ---\n"); for (int w = 0; w < 0x85; w++) printk(KERN_DEBUG "CR %p: %p\n", (void *) w, (void *) vga_rcrt(NULL, w)); for (int w = 0; w < 0xC7; w++) printk(KERN_DEBUG "GR %p: %p\n", (void *) w, (void *) vga_rgfx(NULL, w)); #endif switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: videoRam = 896; maxClock = 65000; break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: case FB_ACCEL_NEOMAGIC_NM2097: videoRam = 1152; maxClock = 80000; break; case FB_ACCEL_NEOMAGIC_NM2160: videoRam = 2048; maxClock = 90000; break; case FB_ACCEL_NEOMAGIC_NM2200: videoRam = 2560; maxClock = 110000; break; case FB_ACCEL_NEOMAGIC_NM2230: videoRam = 3008; maxClock = 110000; break; case FB_ACCEL_NEOMAGIC_NM2360: videoRam = 4096; maxClock = 110000; break; case FB_ACCEL_NEOMAGIC_NM2380: videoRam = 6144; maxClock = 110000; break; } switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: CursorMem = 2048; CursorOff = 0x100; break; case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2160: CursorMem = 1024; CursorOff = 0x100; break; case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: CursorMem = 1024; CursorOff = 0x1000; par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase; break; } /* info->sprite.size = CursorMem; info->sprite.scan_align = 1; info->sprite.buf_align = 1; info->sprite.flags = FB_PIXMAP_IO; info->sprite.outbuf = neofb_draw_cursor; */ par->maxClock = maxClock; par->cursorOff = CursorOff; return videoRam * 1024; } static struct fb_info *__devinit neo_alloc_fb_info(struct pci_dev *dev, const struct pci_device_id *id) { struct fb_info *info; struct neofb_par *par; info = framebuffer_alloc(sizeof(struct neofb_par), &dev->dev); if (!info) return NULL; par = info->par; info->fix.accel = id->driver_data; par->pci_burst = !nopciburst; par->lcd_stretch = !nostretch; par->libretto = libretto; par->internal_display = internal; par->external_display = external; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128"); break; case FB_ACCEL_NEOMAGIC_NM2090: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128V"); break; case FB_ACCEL_NEOMAGIC_NM2093: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128ZV"); break; case FB_ACCEL_NEOMAGIC_NM2097: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128ZV+"); break; case FB_ACCEL_NEOMAGIC_NM2160: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128XD"); break; case FB_ACCEL_NEOMAGIC_NM2200: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256AV"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; case FB_ACCEL_NEOMAGIC_NM2230: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256AV+"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; case FB_ACCEL_NEOMAGIC_NM2360: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256ZX"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; case FB_ACCEL_NEOMAGIC_NM2380: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256XL+"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; } info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 4; info->fix.ywrapstep = 0; info->fix.accel = id->driver_data; info->fbops = &neofb_ops; info->pseudo_palette = par->palette; return info; } static void neo_free_fb_info(struct fb_info *info) { if (info) { /* * Free the colourmap */ fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } } /* --------------------------------------------------------------------- */ static int __devinit neofb_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct fb_info *info; u_int h_sync, v_sync; int video_len, err; DBG("neofb_probe"); err = pci_enable_device(dev); if (err) return err; err = -ENOMEM; info = neo_alloc_fb_info(dev, id); if (!info) return err; err = neo_map_mmio(info, dev); if (err) goto err_map_mmio; err = neo_scan_monitor(info); if (err) goto err_scan_monitor; video_len = neo_init_hw(info); if (video_len < 0) { err = video_len; goto err_init_hw; } err = neo_map_video(info, dev, video_len); if (err) goto err_init_hw; if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, info->monspecs.modedb, 16)) { printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); goto err_map_video; } /* * Calculate the hsync and vsync frequencies. Note that * we split the 1e12 constant up so that we can preserve * the precision and fit the results into 32-bit registers. * (1953125000 * 512 = 1e12) */ h_sync = 1953125000 / info->var.pixclock; h_sync = h_sync * 512 / (info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len); v_sync = h_sync / (info->var.yres + info->var.upper_margin + info->var.lower_margin + info->var.vsync_len); printk(KERN_INFO "neofb v" NEOFB_VERSION ": %dkB VRAM, using %dx%d, %d.%03dkHz, %dHz\n", info->fix.smem_len >> 10, info->var.xres, info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) goto err_map_video; err = register_framebuffer(info); if (err < 0) goto err_reg_fb; printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); /* * Our driver data */ pci_set_drvdata(dev, info); return 0; err_reg_fb: fb_dealloc_cmap(&info->cmap); err_map_video: neo_unmap_video(info); err_init_hw: fb_destroy_modedb(info->monspecs.modedb); err_scan_monitor: neo_unmap_mmio(info); err_map_mmio: neo_free_fb_info(info); return err; } static void __devexit neofb_remove(struct pci_dev *dev) { struct fb_info *info = pci_get_drvdata(dev); DBG("neofb_remove"); if (info) { /* * If unregister_framebuffer fails, then * we will be leaving hooks that could cause * oopsen laying around. */ if (unregister_framebuffer(info)) printk(KERN_WARNING "neofb: danger danger! Oopsen imminent!\n"); neo_unmap_video(info); fb_destroy_modedb(info->monspecs.modedb); neo_unmap_mmio(info); neo_free_fb_info(info); /* * Ensure that the driver data is no longer * valid. */ pci_set_drvdata(dev, NULL); } } static struct pci_device_id neofb_devices[] = { {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2070, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2070}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2090, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2090}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2093, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2093}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2097, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2097}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2160}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2200}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2230}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2360}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2380, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2380}, {0, 0, 0, 0, 0, 0, 0} }; MODULE_DEVICE_TABLE(pci, neofb_devices); static struct pci_driver neofb_driver = { .name = "neofb", .id_table = neofb_devices, .probe = neofb_probe, .remove = __devexit_p(neofb_remove) }; /* ************************* init in-kernel code ************************** */ #ifndef MODULE static int __init neofb_setup(char *options) { char *this_opt; DBG("neofb_setup"); if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "internal", 8)) internal = 1; else if (!strncmp(this_opt, "external", 8)) external = 1; else if (!strncmp(this_opt, "nostretch", 9)) nostretch = 1; else if (!strncmp(this_opt, "nopciburst", 10)) nopciburst = 1; else if (!strncmp(this_opt, "libretto", 8)) libretto = 1; else mode_option = this_opt; } return 0; } #endif /* MODULE */ static int __init neofb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("neofb", &option)) return -ENODEV; neofb_setup(option); #endif return pci_register_driver(&neofb_driver); } module_init(neofb_init); #ifdef MODULE static void __exit neofb_exit(void) { pci_unregister_driver(&neofb_driver); } module_exit(neofb_exit); #endif /* MODULE */
gpl-2.0
garwynn/android_kernel_samsung_jflte
sound/mips/au1x00.c
5235
19210
/* * BRIEF MODULE DESCRIPTION * Driver for AMD Au1000 MIPS Processor, AC'97 Sound Port * * Copyright 2004 Cooper Street Innovations Inc. * Author: Charles Eidsness <charles@cooper-street.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * History: * * 2004-09-09 Charles Eidsness -- Original verion -- based on * sa11xx-uda1341.c ALSA driver and the * au1000.c OSS driver. * 2004-09-09 Matt Porter -- Added support for ALSA 1.0.6 * */ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/ac97_codec.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1000_dma.h> MODULE_AUTHOR("Charles Eidsness <charles@cooper-street.com>"); MODULE_DESCRIPTION("Au1000 AC'97 ALSA Driver"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{AMD,Au1000 AC'97}}"); #define PLAYBACK 0 #define CAPTURE 1 #define AC97_SLOT_3 0x01 #define AC97_SLOT_4 0x02 #define AC97_SLOT_6 0x08 #define AC97_CMD_IRQ 31 #define READ 0 #define WRITE 1 #define READ_WAIT 2 #define RW_DONE 3 struct au1000_period { u32 start; u32 relative_end; /*realtive to start of buffer*/ struct au1000_period * next; }; /*Au1000 AC97 Port Control Reisters*/ struct au1000_ac97_reg { u32 volatile config; u32 volatile status; u32 volatile data; u32 volatile cmd; u32 volatile cntrl; }; struct audio_stream { struct snd_pcm_substream *substream; int dma; spinlock_t dma_lock; struct au1000_period * buffer; unsigned int period_size; unsigned int periods; }; struct snd_au1000 { struct snd_card *card; struct au1000_ac97_reg volatile *ac97_ioport; struct resource *ac97_res_port; spinlock_t ac97_lock; struct snd_ac97 *ac97; struct snd_pcm *pcm; struct audio_stream *stream[2]; /* playback & capture */ }; /*--------------------------- Local Functions --------------------------------*/ static void au1000_set_ac97_xmit_slots(struct snd_au1000 *au1000, long xmit_slots) { u32 volatile ac97_config; spin_lock(&au1000->ac97_lock); ac97_config = au1000->ac97_ioport->config; ac97_config = ac97_config & ~AC97C_XMIT_SLOTS_MASK; ac97_config |= (xmit_slots << AC97C_XMIT_SLOTS_BIT); au1000->ac97_ioport->config = ac97_config; spin_unlock(&au1000->ac97_lock); } static void au1000_set_ac97_recv_slots(struct snd_au1000 *au1000, long recv_slots) { u32 volatile ac97_config; spin_lock(&au1000->ac97_lock); ac97_config = au1000->ac97_ioport->config; ac97_config = ac97_config & ~AC97C_RECV_SLOTS_MASK; ac97_config |= (recv_slots << AC97C_RECV_SLOTS_BIT); au1000->ac97_ioport->config = ac97_config; spin_unlock(&au1000->ac97_lock); } static void au1000_release_dma_link(struct audio_stream *stream) { struct au1000_period * pointer; struct au1000_period * pointer_next; stream->period_size = 0; stream->periods = 0; pointer = stream->buffer; if (! pointer) return; do { pointer_next = pointer->next; kfree(pointer); pointer = pointer_next; } while (pointer != stream->buffer); stream->buffer = NULL; } static int au1000_setup_dma_link(struct audio_stream *stream, unsigned int period_bytes, unsigned int periods) { struct snd_pcm_substream *substream = stream->substream; struct snd_pcm_runtime *runtime = substream->runtime; struct au1000_period *pointer; unsigned long dma_start; int i; dma_start = virt_to_phys(runtime->dma_area); if (stream->period_size == period_bytes && stream->periods == periods) return 0; /* not changed */ au1000_release_dma_link(stream); stream->period_size = period_bytes; stream->periods = periods; stream->buffer = kmalloc(sizeof(struct au1000_period), GFP_KERNEL); if (! stream->buffer) return -ENOMEM; pointer = stream->buffer; for (i = 0; i < periods; i++) { pointer->start = (u32)(dma_start + (i * period_bytes)); pointer->relative_end = (u32) (((i+1) * period_bytes) - 0x1); if (i < periods - 1) { pointer->next = kmalloc(sizeof(struct au1000_period), GFP_KERNEL); if (! pointer->next) { au1000_release_dma_link(stream); return -ENOMEM; } pointer = pointer->next; } } pointer->next = stream->buffer; return 0; } static void au1000_dma_stop(struct audio_stream *stream) { if (snd_BUG_ON(!stream->buffer)) return; disable_dma(stream->dma); } static void au1000_dma_start(struct audio_stream *stream) { if (snd_BUG_ON(!stream->buffer)) return; init_dma(stream->dma); if (get_dma_active_buffer(stream->dma) == 0) { clear_dma_done0(stream->dma); set_dma_addr0(stream->dma, stream->buffer->start); set_dma_count0(stream->dma, stream->period_size >> 1); set_dma_addr1(stream->dma, stream->buffer->next->start); set_dma_count1(stream->dma, stream->period_size >> 1); } else { clear_dma_done1(stream->dma); set_dma_addr1(stream->dma, stream->buffer->start); set_dma_count1(stream->dma, stream->period_size >> 1); set_dma_addr0(stream->dma, stream->buffer->next->start); set_dma_count0(stream->dma, stream->period_size >> 1); } enable_dma_buffers(stream->dma); start_dma(stream->dma); } static irqreturn_t au1000_dma_interrupt(int irq, void *dev_id) { struct audio_stream *stream = (struct audio_stream *) dev_id; struct snd_pcm_substream *substream = stream->substream; spin_lock(&stream->dma_lock); switch (get_dma_buffer_done(stream->dma)) { case DMA_D0: stream->buffer = stream->buffer->next; clear_dma_done0(stream->dma); set_dma_addr0(stream->dma, stream->buffer->next->start); set_dma_count0(stream->dma, stream->period_size >> 1); enable_dma_buffer0(stream->dma); break; case DMA_D1: stream->buffer = stream->buffer->next; clear_dma_done1(stream->dma); set_dma_addr1(stream->dma, stream->buffer->next->start); set_dma_count1(stream->dma, stream->period_size >> 1); enable_dma_buffer1(stream->dma); break; case (DMA_D0 | DMA_D1): printk(KERN_ERR "DMA %d missed interrupt.\n",stream->dma); au1000_dma_stop(stream); au1000_dma_start(stream); break; case (~DMA_D0 & ~DMA_D1): printk(KERN_ERR "DMA %d empty irq.\n",stream->dma); } spin_unlock(&stream->dma_lock); snd_pcm_period_elapsed(substream); return IRQ_HANDLED; } /*-------------------------- PCM Audio Streams -------------------------------*/ static unsigned int rates[] = {8000, 11025, 16000, 22050}; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; static struct snd_pcm_hardware snd_au1000_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | \ SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050), .rate_min = 8000, .rate_max = 22050, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 128*1024, .period_bytes_min = 32, .period_bytes_max = 16*1024, .periods_min = 8, .periods_max = 255, .fifo_size = 16, }; static int snd_au1000_playback_open(struct snd_pcm_substream *substream) { struct snd_au1000 *au1000 = substream->pcm->private_data; au1000->stream[PLAYBACK]->substream = substream; au1000->stream[PLAYBACK]->buffer = NULL; substream->private_data = au1000->stream[PLAYBACK]; substream->runtime->hw = snd_au1000_hw; return (snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates) < 0); } static int snd_au1000_capture_open(struct snd_pcm_substream *substream) { struct snd_au1000 *au1000 = substream->pcm->private_data; au1000->stream[CAPTURE]->substream = substream; au1000->stream[CAPTURE]->buffer = NULL; substream->private_data = au1000->stream[CAPTURE]; substream->runtime->hw = snd_au1000_hw; return (snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates) < 0); } static int snd_au1000_playback_close(struct snd_pcm_substream *substream) { struct snd_au1000 *au1000 = substream->pcm->private_data; au1000->stream[PLAYBACK]->substream = NULL; return 0; } static int snd_au1000_capture_close(struct snd_pcm_substream *substream) { struct snd_au1000 *au1000 = substream->pcm->private_data; au1000->stream[CAPTURE]->substream = NULL; return 0; } static int snd_au1000_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct audio_stream *stream = substream->private_data; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; return au1000_setup_dma_link(stream, params_period_bytes(hw_params), params_periods(hw_params)); } static int snd_au1000_hw_free(struct snd_pcm_substream *substream) { struct audio_stream *stream = substream->private_data; au1000_release_dma_link(stream); return snd_pcm_lib_free_pages(substream); } static int snd_au1000_playback_prepare(struct snd_pcm_substream *substream) { struct snd_au1000 *au1000 = substream->pcm->private_data; struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->channels == 1) au1000_set_ac97_xmit_slots(au1000, AC97_SLOT_4); else au1000_set_ac97_xmit_slots(au1000, AC97_SLOT_3 | AC97_SLOT_4); snd_ac97_set_rate(au1000->ac97, AC97_PCM_FRONT_DAC_RATE, runtime->rate); return 0; } static int snd_au1000_capture_prepare(struct snd_pcm_substream *substream) { struct snd_au1000 *au1000 = substream->pcm->private_data; struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->channels == 1) au1000_set_ac97_recv_slots(au1000, AC97_SLOT_4); else au1000_set_ac97_recv_slots(au1000, AC97_SLOT_3 | AC97_SLOT_4); snd_ac97_set_rate(au1000->ac97, AC97_PCM_LR_ADC_RATE, runtime->rate); return 0; } static int snd_au1000_trigger(struct snd_pcm_substream *substream, int cmd) { struct audio_stream *stream = substream->private_data; int err = 0; spin_lock(&stream->dma_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: au1000_dma_start(stream); break; case SNDRV_PCM_TRIGGER_STOP: au1000_dma_stop(stream); break; default: err = -EINVAL; break; } spin_unlock(&stream->dma_lock); return err; } static snd_pcm_uframes_t snd_au1000_pointer(struct snd_pcm_substream *substream) { struct audio_stream *stream = substream->private_data; struct snd_pcm_runtime *runtime = substream->runtime; long location; spin_lock(&stream->dma_lock); location = get_dma_residue(stream->dma); spin_unlock(&stream->dma_lock); location = stream->buffer->relative_end - location; if (location == -1) location = 0; return bytes_to_frames(runtime,location); } static struct snd_pcm_ops snd_card_au1000_playback_ops = { .open = snd_au1000_playback_open, .close = snd_au1000_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_au1000_hw_params, .hw_free = snd_au1000_hw_free, .prepare = snd_au1000_playback_prepare, .trigger = snd_au1000_trigger, .pointer = snd_au1000_pointer, }; static struct snd_pcm_ops snd_card_au1000_capture_ops = { .open = snd_au1000_capture_open, .close = snd_au1000_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_au1000_hw_params, .hw_free = snd_au1000_hw_free, .prepare = snd_au1000_capture_prepare, .trigger = snd_au1000_trigger, .pointer = snd_au1000_pointer, }; static int __devinit snd_au1000_pcm_new(struct snd_au1000 *au1000) { struct snd_pcm *pcm; int err; unsigned long flags; if ((err = snd_pcm_new(au1000->card, "AU1000 AC97 PCM", 0, 1, 1, &pcm)) < 0) return err; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 128*1024, 128*1024); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_card_au1000_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_card_au1000_capture_ops); pcm->private_data = au1000; pcm->info_flags = 0; strcpy(pcm->name, "Au1000 AC97 PCM"); spin_lock_init(&au1000->stream[PLAYBACK]->dma_lock); spin_lock_init(&au1000->stream[CAPTURE]->dma_lock); flags = claim_dma_lock(); if ((au1000->stream[PLAYBACK]->dma = request_au1000_dma(DMA_ID_AC97C_TX, "AC97 TX", au1000_dma_interrupt, 0, au1000->stream[PLAYBACK])) < 0) { release_dma_lock(flags); return -EBUSY; } if ((au1000->stream[CAPTURE]->dma = request_au1000_dma(DMA_ID_AC97C_RX, "AC97 RX", au1000_dma_interrupt, 0, au1000->stream[CAPTURE])) < 0){ release_dma_lock(flags); return -EBUSY; } /* enable DMA coherency in read/write DMA channels */ set_dma_mode(au1000->stream[PLAYBACK]->dma, get_dma_mode(au1000->stream[PLAYBACK]->dma) & ~DMA_NC); set_dma_mode(au1000->stream[CAPTURE]->dma, get_dma_mode(au1000->stream[CAPTURE]->dma) & ~DMA_NC); release_dma_lock(flags); au1000->pcm = pcm; return 0; } /*-------------------------- AC97 CODEC Control ------------------------------*/ static unsigned short snd_au1000_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_au1000 *au1000 = ac97->private_data; u32 volatile cmd; u16 volatile data; int i; spin_lock(&au1000->ac97_lock); /* would rather use the interrupt than this polling but it works and I can't get the interrupt driven case to work efficiently */ for (i = 0; i < 0x5000; i++) if (!(au1000->ac97_ioport->status & AC97C_CP)) break; if (i == 0x5000) printk(KERN_ERR "au1000 AC97: AC97 command read timeout\n"); cmd = (u32) reg & AC97C_INDEX_MASK; cmd |= AC97C_READ; au1000->ac97_ioport->cmd = cmd; /* now wait for the data */ for (i = 0; i < 0x5000; i++) if (!(au1000->ac97_ioport->status & AC97C_CP)) break; if (i == 0x5000) { printk(KERN_ERR "au1000 AC97: AC97 command read timeout\n"); spin_unlock(&au1000->ac97_lock); return 0; } data = au1000->ac97_ioport->cmd & 0xffff; spin_unlock(&au1000->ac97_lock); return data; } static void snd_au1000_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_au1000 *au1000 = ac97->private_data; u32 cmd; int i; spin_lock(&au1000->ac97_lock); /* would rather use the interrupt than this polling but it works and I can't get the interrupt driven case to work efficiently */ for (i = 0; i < 0x5000; i++) if (!(au1000->ac97_ioport->status & AC97C_CP)) break; if (i == 0x5000) printk(KERN_ERR "au1000 AC97: AC97 command write timeout\n"); cmd = (u32) reg & AC97C_INDEX_MASK; cmd &= ~AC97C_READ; cmd |= ((u32) val << AC97C_WD_BIT); au1000->ac97_ioport->cmd = cmd; spin_unlock(&au1000->ac97_lock); } static int __devinit snd_au1000_ac97_new(struct snd_au1000 *au1000) { int err; struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = snd_au1000_ac97_write, .read = snd_au1000_ac97_read, }; if ((au1000->ac97_res_port = request_mem_region(CPHYSADDR(AC97C_CONFIG), 0x100000, "Au1x00 AC97")) == NULL) { snd_printk(KERN_ERR "ALSA AC97: can't grap AC97 port\n"); return -EBUSY; } au1000->ac97_ioport = (struct au1000_ac97_reg *) KSEG1ADDR(au1000->ac97_res_port->start); spin_lock_init(&au1000->ac97_lock); /* configure pins for AC'97 TODO: move to board_setup.c */ au_writel(au_readl(SYS_PINFUNC) & ~0x02, SYS_PINFUNC); /* Initialise Au1000's AC'97 Control Block */ au1000->ac97_ioport->cntrl = AC97C_RS | AC97C_CE; udelay(10); au1000->ac97_ioport->cntrl = AC97C_CE; udelay(10); /* Initialise External CODEC -- cold reset */ au1000->ac97_ioport->config = AC97C_RESET; udelay(10); au1000->ac97_ioport->config = 0x0; mdelay(5); /* Initialise AC97 middle-layer */ if ((err = snd_ac97_bus(au1000->card, 0, &ops, au1000, &pbus)) < 0) return err; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = au1000; if ((err = snd_ac97_mixer(pbus, &ac97, &au1000->ac97)) < 0) return err; return 0; } /*------------------------------ Setup / Destroy ----------------------------*/ void snd_au1000_free(struct snd_card *card) { struct snd_au1000 *au1000 = card->private_data; if (au1000->ac97_res_port) { /* put internal AC97 block into reset */ au1000->ac97_ioport->cntrl = AC97C_RS; au1000->ac97_ioport = NULL; release_and_free_resource(au1000->ac97_res_port); } if (au1000->stream[PLAYBACK]) { if (au1000->stream[PLAYBACK]->dma >= 0) free_au1000_dma(au1000->stream[PLAYBACK]->dma); kfree(au1000->stream[PLAYBACK]); } if (au1000->stream[CAPTURE]) { if (au1000->stream[CAPTURE]->dma >= 0) free_au1000_dma(au1000->stream[CAPTURE]->dma); kfree(au1000->stream[CAPTURE]); } } static struct snd_card *au1000_card; static int __init au1000_init(void) { int err; struct snd_card *card; struct snd_au1000 *au1000; err = snd_card_create(-1, "AC97", THIS_MODULE, sizeof(struct snd_au1000), &card); if (err < 0) return err; card->private_free = snd_au1000_free; au1000 = card->private_data; au1000->card = card; au1000->stream[PLAYBACK] = kmalloc(sizeof(struct audio_stream), GFP_KERNEL); au1000->stream[CAPTURE ] = kmalloc(sizeof(struct audio_stream), GFP_KERNEL); /* so that snd_au1000_free will work as intended */ au1000->ac97_res_port = NULL; if (au1000->stream[PLAYBACK]) au1000->stream[PLAYBACK]->dma = -1; if (au1000->stream[CAPTURE ]) au1000->stream[CAPTURE ]->dma = -1; if (au1000->stream[PLAYBACK] == NULL || au1000->stream[CAPTURE ] == NULL) { snd_card_free(card); return -ENOMEM; } if ((err = snd_au1000_ac97_new(au1000)) < 0 ) { snd_card_free(card); return err; } if ((err = snd_au1000_pcm_new(au1000)) < 0) { snd_card_free(card); return err; } strcpy(card->driver, "Au1000-AC97"); strcpy(card->shortname, "AMD Au1000-AC97"); sprintf(card->longname, "AMD Au1000--AC97 ALSA Driver"); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } printk(KERN_INFO "ALSA AC97: Driver Initialized\n"); au1000_card = card; return 0; } static void __exit au1000_exit(void) { snd_card_free(au1000_card); } module_init(au1000_init); module_exit(au1000_exit);
gpl-2.0
armpc/Armcore-a20-V12-kernel
fs/ncpfs/symlink.c
7795
4351
/* * linux/fs/ncpfs/symlink.c * * Code for allowing symbolic links on NCPFS (i.e. NetWare) * Symbolic links are not supported on native NetWare, so we use an * infrequently-used flag (Sh) and store a two-word magic header in * the file to make sure we don't accidentally use a non-link file * as a link. * * When using the NFS namespace, we set the mode to indicate a symlink and * don't bother with the magic numbers. * * from linux/fs/ext2/symlink.c * * Copyright (C) 1998-99, Frank A. Vorstenbosch * * ncpfs symlink handling code * NLS support (c) 1999 Petr Vandrovec * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info * */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/stat.h> #include "ncp_fs.h" /* these magic numbers must appear in the symlink file -- this makes it a bit more resilient against the magic attributes being set on random files. */ #define NCP_SYMLINK_MAGIC0 cpu_to_le32(0x6c6d7973) /* "symlnk->" */ #define NCP_SYMLINK_MAGIC1 cpu_to_le32(0x3e2d6b6e) /* ----- read a symbolic link ------------------------------------------ */ static int ncp_symlink_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; int error, length, len; char *link, *rawlink; char *buf = kmap(page); error = -ENOMEM; rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL); if (!rawlink) goto fail; if (ncp_make_open(inode,O_RDONLY)) goto failEIO; error=ncp_read_kernel(NCP_SERVER(inode),NCP_FINFO(inode)->file_handle, 0,NCP_MAX_SYMLINK_SIZE,rawlink,&length); ncp_inode_close(inode); /* Close file handle if no other users... */ ncp_make_closed(inode); if (error) goto failEIO; if (NCP_FINFO(inode)->flags & NCPI_KLUDGE_SYMLINK) { if (length<NCP_MIN_SYMLINK_SIZE || ((__le32 *)rawlink)[0]!=NCP_SYMLINK_MAGIC0 || ((__le32 *)rawlink)[1]!=NCP_SYMLINK_MAGIC1) goto failEIO; link = rawlink + 8; length -= 8; } else { link = rawlink; } len = NCP_MAX_SYMLINK_SIZE; error = ncp_vol2io(NCP_SERVER(inode), buf, &len, link, length, 0); kfree(rawlink); if (error) goto fail; SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; failEIO: error = -EIO; kfree(rawlink); fail: SetPageError(page); kunmap(page); unlock_page(page); return error; } /* * symlinks can't do much... */ const struct address_space_operations ncp_symlink_aops = { .readpage = ncp_symlink_readpage, }; /* ----- create a new symbolic link -------------------------------------- */ int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; char *rawlink; int length, err, i, outlen; int kludge; umode_t mode; __le32 attr; unsigned int hdr; DPRINTK("ncp_symlink(dir=%p,dentry=%p,symname=%s)\n",dir,dentry,symname); if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) kludge = 0; else #ifdef CONFIG_NCPFS_EXTRAS if (NCP_SERVER(dir)->m.flags & NCP_MOUNT_SYMLINKS) kludge = 1; else #endif /* EPERM is returned by VFS if symlink procedure does not exist */ return -EPERM; rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL); if (!rawlink) return -ENOMEM; if (kludge) { mode = 0; attr = aSHARED | aHIDDEN; ((__le32 *)rawlink)[0]=NCP_SYMLINK_MAGIC0; ((__le32 *)rawlink)[1]=NCP_SYMLINK_MAGIC1; hdr = 8; } else { mode = S_IFLNK | S_IRWXUGO; attr = 0; hdr = 0; } length = strlen(symname); /* map to/from server charset, do not touch upper/lower case as symlink can point out of ncp filesystem */ outlen = NCP_MAX_SYMLINK_SIZE - hdr; err = ncp_io2vol(NCP_SERVER(dir), rawlink + hdr, &outlen, symname, length, 0); if (err) goto failfree; outlen += hdr; err = -EIO; if (ncp_create_new(dir,dentry,mode,0,attr)) { goto failfree; } inode=dentry->d_inode; if (ncp_make_open(inode, O_WRONLY)) goto failfree; if (ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, 0, outlen, rawlink, &i) || i!=outlen) { goto fail; } ncp_inode_close(inode); ncp_make_closed(inode); kfree(rawlink); return 0; fail:; ncp_inode_close(inode); ncp_make_closed(inode); failfree:; kfree(rawlink); return err; } /* ----- EOF ----- */
gpl-2.0
spanish33/android_kernel_oneplus_msm8974
fs/ncpfs/symlink.c
7795
4351
/* * linux/fs/ncpfs/symlink.c * * Code for allowing symbolic links on NCPFS (i.e. NetWare) * Symbolic links are not supported on native NetWare, so we use an * infrequently-used flag (Sh) and store a two-word magic header in * the file to make sure we don't accidentally use a non-link file * as a link. * * When using the NFS namespace, we set the mode to indicate a symlink and * don't bother with the magic numbers. * * from linux/fs/ext2/symlink.c * * Copyright (C) 1998-99, Frank A. Vorstenbosch * * ncpfs symlink handling code * NLS support (c) 1999 Petr Vandrovec * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info * */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/stat.h> #include "ncp_fs.h" /* these magic numbers must appear in the symlink file -- this makes it a bit more resilient against the magic attributes being set on random files. */ #define NCP_SYMLINK_MAGIC0 cpu_to_le32(0x6c6d7973) /* "symlnk->" */ #define NCP_SYMLINK_MAGIC1 cpu_to_le32(0x3e2d6b6e) /* ----- read a symbolic link ------------------------------------------ */ static int ncp_symlink_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; int error, length, len; char *link, *rawlink; char *buf = kmap(page); error = -ENOMEM; rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL); if (!rawlink) goto fail; if (ncp_make_open(inode,O_RDONLY)) goto failEIO; error=ncp_read_kernel(NCP_SERVER(inode),NCP_FINFO(inode)->file_handle, 0,NCP_MAX_SYMLINK_SIZE,rawlink,&length); ncp_inode_close(inode); /* Close file handle if no other users... */ ncp_make_closed(inode); if (error) goto failEIO; if (NCP_FINFO(inode)->flags & NCPI_KLUDGE_SYMLINK) { if (length<NCP_MIN_SYMLINK_SIZE || ((__le32 *)rawlink)[0]!=NCP_SYMLINK_MAGIC0 || ((__le32 *)rawlink)[1]!=NCP_SYMLINK_MAGIC1) goto failEIO; link = rawlink + 8; length -= 8; } else { link = rawlink; } len = NCP_MAX_SYMLINK_SIZE; error = ncp_vol2io(NCP_SERVER(inode), buf, &len, link, length, 0); kfree(rawlink); if (error) goto fail; SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; failEIO: error = -EIO; kfree(rawlink); fail: SetPageError(page); kunmap(page); unlock_page(page); return error; } /* * symlinks can't do much... */ const struct address_space_operations ncp_symlink_aops = { .readpage = ncp_symlink_readpage, }; /* ----- create a new symbolic link -------------------------------------- */ int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; char *rawlink; int length, err, i, outlen; int kludge; umode_t mode; __le32 attr; unsigned int hdr; DPRINTK("ncp_symlink(dir=%p,dentry=%p,symname=%s)\n",dir,dentry,symname); if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) kludge = 0; else #ifdef CONFIG_NCPFS_EXTRAS if (NCP_SERVER(dir)->m.flags & NCP_MOUNT_SYMLINKS) kludge = 1; else #endif /* EPERM is returned by VFS if symlink procedure does not exist */ return -EPERM; rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL); if (!rawlink) return -ENOMEM; if (kludge) { mode = 0; attr = aSHARED | aHIDDEN; ((__le32 *)rawlink)[0]=NCP_SYMLINK_MAGIC0; ((__le32 *)rawlink)[1]=NCP_SYMLINK_MAGIC1; hdr = 8; } else { mode = S_IFLNK | S_IRWXUGO; attr = 0; hdr = 0; } length = strlen(symname); /* map to/from server charset, do not touch upper/lower case as symlink can point out of ncp filesystem */ outlen = NCP_MAX_SYMLINK_SIZE - hdr; err = ncp_io2vol(NCP_SERVER(dir), rawlink + hdr, &outlen, symname, length, 0); if (err) goto failfree; outlen += hdr; err = -EIO; if (ncp_create_new(dir,dentry,mode,0,attr)) { goto failfree; } inode=dentry->d_inode; if (ncp_make_open(inode, O_WRONLY)) goto failfree; if (ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, 0, outlen, rawlink, &i) || i!=outlen) { goto fail; } ncp_inode_close(inode); ncp_make_closed(inode); kfree(rawlink); return 0; fail:; ncp_inode_close(inode); ncp_make_closed(inode); failfree:; kfree(rawlink); return err; } /* ----- EOF ----- */
gpl-2.0
thicklizard/sense-4.2
arch/arm/mach-pxa/colibri-evalboard.c
7795
3365
/* * linux/arch/arm/mach-pxa/colibri-evalboard.c * * Support for Toradex Colibri Evaluation Carrier Board * Daniel Mack <daniel@caiaq.de> * Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/mach/arch.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <mach/pxa27x.h> #include <mach/colibri.h> #include <mach/mmc.h> #include <mach/ohci.h> #include <mach/pxa27x-udc.h> #include "generic.h" #include "devices.h" /****************************************************************************** * SD/MMC card controller ******************************************************************************/ #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) static struct pxamci_platform_data colibri_mci_platform_data = { .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .gpio_power = -1, .gpio_card_ro = -1, .detect_delay_ms = 200, }; static void __init colibri_mmc_init(void) { if (machine_is_colibri()) /* PXA270 Colibri */ colibri_mci_platform_data.gpio_card_detect = GPIO0_COLIBRI_PXA270_SD_DETECT; if (machine_is_colibri300()) /* PXA300 Colibri */ colibri_mci_platform_data.gpio_card_detect = GPIO13_COLIBRI_PXA300_SD_DETECT; else /* PXA320 Colibri */ colibri_mci_platform_data.gpio_card_detect = GPIO28_COLIBRI_PXA320_SD_DETECT; pxa_set_mci_info(&colibri_mci_platform_data); } #else static inline void colibri_mmc_init(void) {} #endif /****************************************************************************** * USB Host ******************************************************************************/ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int colibri_ohci_init(struct device *dev) { UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE; return 0; } static struct pxaohci_platform_data colibri_ohci_info = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | POWER_CONTROL_LOW | POWER_SENSE_LOW, .init = colibri_ohci_init, }; static void __init colibri_uhc_init(void) { /* Colibri PXA270 has two usb ports, TBA for 320 */ if (machine_is_colibri()) colibri_ohci_info.flags |= ENABLE_PORT2; pxa_set_ohci_info(&colibri_ohci_info); } #else static inline void colibri_uhc_init(void) {} #endif /****************************************************************************** * I2C RTC ******************************************************************************/ #if defined(CONFIG_RTC_DRV_DS1307) || defined(CONFIG_RTC_DRV_DS1307_MODULE) static struct i2c_board_info __initdata colibri_i2c_devs[] = { { I2C_BOARD_INFO("m41t00", 0x68), }, }; static void __init colibri_rtc_init(void) { pxa_set_i2c_info(NULL); i2c_register_board_info(0, ARRAY_AND_SIZE(colibri_i2c_devs)); } #else static inline void colibri_rtc_init(void) {} #endif void __init colibri_evalboard_init(void) { pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); colibri_mmc_init(); colibri_uhc_init(); colibri_rtc_init(); }
gpl-2.0
davidmueller13/android_kernel_lge_msm8974-old
arch/m32r/platforms/opsput/io.c
13939
10799
/* * linux/arch/m32r/platforms/opsput/io.c * * Typical I/O routines for OPSPUT board. * * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Takeo Takahashi * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. */ #include <asm/m32r.h> #include <asm/page.h> #include <asm/io.h> #include <asm/byteorder.h> #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) #include <linux/types.h> #define M32R_PCC_IOMAP_SIZE 0x1000 #define M32R_PCC_IOSTART0 0x1000 #define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1) extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int); extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int); extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int); extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); #endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */ #define PORT2ADDR(port) _port2addr(port) #define PORT2ADDR_USB(port) _port2addr_usb(port) static inline void *_port2addr(unsigned long port) { return (void *)(port | NONCACHE_OFFSET); } #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) static inline void *__port2addr_ata(unsigned long port) { static int dummy_reg; switch (port) { case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET); case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET); case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET); case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET); case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET); case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET); case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET); case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET); case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET); default: return (void *)&dummy_reg; } } #endif /* * OPSPUT-LAN is located in the extended bus space * from 0x10000000 to 0x13ffffff on physical address. * The base address of LAN controller(LAN91C111) is 0x300. */ #define LAN_IOSTART (0x300 | NONCACHE_OFFSET) #define LAN_IOEND (0x320 | NONCACHE_OFFSET) static inline void *_port2addr_ne(unsigned long port) { return (void *)(port + 0x10000000); } static inline void *_port2addr_usb(unsigned long port) { return (void *)((port & 0x0f) + NONCACHE_OFFSET + 0x10303000); } static inline void delay(void) { __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory"); } /* * NIC I/O function */ #define PORT2ADDR_NE(port) _port2addr_ne(port) static inline unsigned char _ne_inb(void *portp) { return *(volatile unsigned char *)portp; } static inline unsigned short _ne_inw(void *portp) { return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp); } static inline void _ne_insb(void *portp, void *addr, unsigned long count) { unsigned char *buf = (unsigned char *)addr; while (count--) *buf++ = _ne_inb(portp); } static inline void _ne_outb(unsigned char b, void *portp) { *(volatile unsigned char *)portp = b; } static inline void _ne_outw(unsigned short w, void *portp) { *(volatile unsigned short *)portp = cpu_to_le16(w); } unsigned char _inb(unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) return _ne_inb(PORT2ADDR_NE(port)); #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { return *(volatile unsigned char *)__port2addr_ata(port); } #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned char b; pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0); return b; } else #endif return *(volatile unsigned char *)PORT2ADDR(port); } unsigned short _inw(unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) return _ne_inw(PORT2ADDR_NE(port)); #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { return *(volatile unsigned short *)__port2addr_ata(port); } #endif #if defined(CONFIG_USB) else if(port >= 0x340 && port < 0x3a0) return *(volatile unsigned short *)PORT2ADDR_USB(port); #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned short w; pcc_ioread_word(0, port, &w, sizeof(w), 1, 0); return w; } else #endif return *(volatile unsigned short *)PORT2ADDR(port); } unsigned long _inl(unsigned long port) { #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned long l; pcc_ioread_word(0, port, &l, sizeof(l), 1, 0); return l; } else #endif return *(volatile unsigned long *)PORT2ADDR(port); } unsigned char _inb_p(unsigned long port) { unsigned char v = _inb(port); delay(); return (v); } unsigned short _inw_p(unsigned long port) { unsigned short v = _inw(port); delay(); return (v); } unsigned long _inl_p(unsigned long port) { unsigned long v = _inl(port); delay(); return (v); } void _outb(unsigned char b, unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_outb(b, PORT2ADDR_NE(port)); else #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { *(volatile unsigned char *)__port2addr_ata(port) = b; } else #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0); } else #endif *(volatile unsigned char *)PORT2ADDR(port) = b; } void _outw(unsigned short w, unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_outw(w, PORT2ADDR_NE(port)); else #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { *(volatile unsigned short *)__port2addr_ata(port) = w; } else #endif #if defined(CONFIG_USB) if(port >= 0x340 && port < 0x3a0) *(volatile unsigned short *)PORT2ADDR_USB(port) = w; else #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0); } else #endif *(volatile unsigned short *)PORT2ADDR(port) = w; } void _outl(unsigned long l, unsigned long port) { #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0); } else #endif *(volatile unsigned long *)PORT2ADDR(port) = l; } void _outb_p(unsigned char b, unsigned long port) { _outb(b, port); delay(); } void _outw_p(unsigned short w, unsigned long port) { _outw(w, port); delay(); } void _outl_p(unsigned long l, unsigned long port) { _outl(l, port); delay(); } void _insb(unsigned int port, void *addr, unsigned long count) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_insb(PORT2ADDR_NE(port), addr, count); #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { unsigned char *buf = addr; unsigned char *portp = __port2addr_ata(port); while (count--) *buf++ = *(volatile unsigned char *)portp; } #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_ioread_byte(0, port, (void *)addr, sizeof(unsigned char), count, 1); } #endif else { unsigned char *buf = addr; unsigned char *portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned char *)portp; } } void _insw(unsigned int port, void *addr, unsigned long count) { unsigned short *buf = addr; unsigned short *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { /* * This portion is only used by smc91111.c to read data * from the DATA_REG. Do not swap the data. */ portp = PORT2ADDR_NE(port); while (count--) *buf++ = *(volatile unsigned short *)portp; #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short), count, 1); #endif #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { portp = __port2addr_ata(port); while (count--) *buf++ = *(volatile unsigned short *)portp; #endif } else { portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned short *)portp; } } void _insl(unsigned int port, void *addr, unsigned long count) { unsigned long *buf = addr; unsigned long *portp; portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned long *)portp; } void _outsb(unsigned int port, const void *addr, unsigned long count) { const unsigned char *buf = addr; unsigned char *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { portp = PORT2ADDR_NE(port); while (count--) _ne_outb(*buf++, portp); #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { portp = __port2addr_ata(port); while (count--) *(volatile unsigned char *)portp = *buf++; #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char), count, 1); #endif } else { portp = PORT2ADDR(port); while (count--) *(volatile unsigned char *)portp = *buf++; } } void _outsw(unsigned int port, const void *addr, unsigned long count) { const unsigned short *buf = addr; unsigned short *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { /* * This portion is only used by smc91111.c to write data * into the DATA_REG. Do not swap the data. */ portp = PORT2ADDR_NE(port); while (count--) *(volatile unsigned short *)portp = *buf++; #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { portp = __port2addr_ata(port); while (count--) *(volatile unsigned short *)portp = *buf++; #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short), count, 1); #endif } else { portp = PORT2ADDR(port); while (count--) *(volatile unsigned short *)portp = *buf++; } } void _outsl(unsigned int port, const void *addr, unsigned long count) { const unsigned long *buf = addr; unsigned char *portp; portp = PORT2ADDR(port); while (count--) *(volatile unsigned long *)portp = *buf++; }
gpl-2.0
willcharlton/linux
arch/cris/arch-v32/drivers/sync_serial.c
372
48865
/* * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3. * * Copyright (c) 2005, 2008 Axis Communications AB * Author: Mikael Starvik * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/wait.h> #include <asm/io.h> #include <mach/dma.h> #include <pinmux.h> #include <hwregs/reg_rdwr.h> #include <hwregs/sser_defs.h> #include <hwregs/timer_defs.h> #include <hwregs/dma_defs.h> #include <hwregs/dma.h> #include <hwregs/intr_vect_defs.h> #include <hwregs/intr_vect.h> #include <hwregs/reg_map.h> #include <asm/sync_serial.h> /* The receiver is a bit tricky because of the continuous stream of data.*/ /* */ /* Three DMA descriptors are linked together. Each DMA descriptor is */ /* responsible for port->bufchunk of a common buffer. */ /* */ /* +---------------------------------------------+ */ /* | +----------+ +----------+ +----------+ | */ /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */ /* +----------+ +----------+ +----------+ */ /* | | | */ /* v v v */ /* +-------------------------------------+ */ /* | BUFFER | */ /* +-------------------------------------+ */ /* |<- data_avail ->| */ /* readp writep */ /* */ /* If the application keeps up the pace readp will be right after writep.*/ /* If the application can't keep the pace we have to throw away data. */ /* The idea is that readp should be ready with the data pointed out by */ /* Descr[i] when the DMA has filled in Descr[i+1]. */ /* Otherwise we will discard */ /* the rest of the data pointed out by Descr1 and set readp to the start */ /* of Descr2 */ /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ /* words can be handled */ #define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE #define NBR_IN_DESCR (8*6) #define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR) #define NBR_OUT_DESCR 8 #define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR) #define DEFAULT_FRAME_RATE 0 #define DEFAULT_WORD_RATE 7 /* To be removed when we move to pure udev. */ #define SYNC_SERIAL_MAJOR 125 /* NOTE: Enabling some debug will likely cause overrun or underrun, * especially if manual mode is used. */ #define DEBUG(x) #define DEBUGREAD(x) #define DEBUGWRITE(x) #define DEBUGPOLL(x) #define DEBUGRXINT(x) #define DEBUGTXINT(x) #define DEBUGTRDMA(x) #define DEBUGOUTBUF(x) enum syncser_irq_setup { no_irq_setup = 0, dma_irq_setup = 1, manual_irq_setup = 2, }; struct sync_port { unsigned long regi_sser; unsigned long regi_dmain; unsigned long regi_dmaout; /* Interrupt vectors. */ unsigned long dma_in_intr_vect; /* Used for DMA in. */ unsigned long dma_out_intr_vect; /* Used for DMA out. */ unsigned long syncser_intr_vect; /* Used when no DMA. */ /* DMA number for in and out. */ unsigned int dma_in_nbr; unsigned int dma_out_nbr; /* DMA owner. */ enum dma_owner req_dma; char started; /* 1 if port has been started */ char port_nbr; /* Port 0 or 1 */ char busy; /* 1 if port is busy */ char enabled; /* 1 if port is enabled */ char use_dma; /* 1 if port uses dma */ char tr_running; enum syncser_irq_setup init_irqs; int output; int input; /* Next byte to be read by application */ unsigned char *readp; /* Next byte to be written by etrax */ unsigned char *writep; unsigned int in_buffer_size; unsigned int in_buffer_len; unsigned int inbufchunk; /* Data buffers for in and output. */ unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32); unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32); unsigned char flip[IN_BUFFER_SIZE] __aligned(32); struct timespec timestamp[NBR_IN_DESCR]; struct dma_descr_data *next_rx_desc; struct dma_descr_data *prev_rx_desc; struct timeval last_timestamp; int read_ts_idx; int write_ts_idx; /* Pointer to the first available descriptor in the ring, * unless active_tr_descr == catch_tr_descr and a dma * transfer is active */ struct dma_descr_data *active_tr_descr; /* Pointer to the first allocated descriptor in the ring */ struct dma_descr_data *catch_tr_descr; /* Pointer to the descriptor with the current end-of-list */ struct dma_descr_data *prev_tr_descr; int full; /* Pointer to the first byte being read by DMA * or current position in out_buffer if not using DMA. */ unsigned char *out_rd_ptr; /* Number of bytes currently locked for being read by DMA */ int out_buf_count; dma_descr_context in_context __aligned(32); dma_descr_context out_context __aligned(32); dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16); dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16); wait_queue_head_t out_wait_q; wait_queue_head_t in_wait_q; spinlock_t lock; }; static DEFINE_MUTEX(sync_serial_mutex); static int etrax_sync_serial_init(void); static void initialize_port(int portnbr); static inline int sync_data_avail(struct sync_port *port); static int sync_serial_open(struct inode *, struct file *); static int sync_serial_release(struct inode *, struct file *); static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); static long sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int sync_serial_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg); static ssize_t sync_serial_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos); static ssize_t sync_serial_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); #if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))) #define SYNC_SER_DMA #else #define SYNC_SER_MANUAL #endif #ifdef SYNC_SER_DMA static void start_dma_out(struct sync_port *port, const char *data, int count); static void start_dma_in(struct sync_port *port); static irqreturn_t tr_interrupt(int irq, void *dev_id); static irqreturn_t rx_interrupt(int irq, void *dev_id); #endif #ifdef SYNC_SER_MANUAL static void send_word(struct sync_port *port); static irqreturn_t manual_interrupt(int irq, void *dev_id); #endif #define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed #define artpec_request_dma crisv32_request_dma #define artpec_free_dma crisv32_free_dma #ifdef CONFIG_ETRAXFS /* ETRAX FS */ #define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR #define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR #define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR #define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR #define PINMUX_SSER0 pinmux_sser0 #define PINMUX_SSER1 pinmux_sser1 #define SYNCSER_INST0 regi_sser0 #define SYNCSER_INST1 regi_sser1 #define SYNCSER_INTR_VECT0 SSER0_INTR_VECT #define SYNCSER_INTR_VECT1 SSER1_INTR_VECT #define OUT_DMA_INST0 regi_dma4 #define IN_DMA_INST0 regi_dma5 #define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT #define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT #define DMA_IN_INTR_VECT0 DMA5_INTR_VECT #define DMA_IN_INTR_VECT1 DMA6_INTR_VECT #define REQ_DMA_SYNCSER0 dma_sser0 #define REQ_DMA_SYNCSER1 dma_sser1 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) #define PORT1_DMA 1 #else #define PORT1_DMA 0 #endif #elif defined(CONFIG_CRIS_MACH_ARTPEC3) /* ARTPEC-3 */ #define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR #define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR #define PINMUX_SSER0 pinmux_sser #define SYNCSER_INST0 regi_sser #define SYNCSER_INTR_VECT0 SSER_INTR_VECT #define OUT_DMA_INST0 regi_dma6 #define IN_DMA_INST0 regi_dma7 #define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT #define DMA_IN_INTR_VECT0 DMA7_INTR_VECT #define REQ_DMA_SYNCSER0 dma_sser #define REQ_DMA_SYNCSER1 dma_sser #endif #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) #define PORT0_DMA 1 #else #define PORT0_DMA 0 #endif /* The ports */ static struct sync_port ports[] = { { .regi_sser = SYNCSER_INST0, .regi_dmaout = OUT_DMA_INST0, .regi_dmain = IN_DMA_INST0, .use_dma = PORT0_DMA, .dma_in_intr_vect = DMA_IN_INTR_VECT0, .dma_out_intr_vect = DMA_OUT_INTR_VECT0, .dma_in_nbr = DMA_IN_NBR0, .dma_out_nbr = DMA_OUT_NBR0, .req_dma = REQ_DMA_SYNCSER0, .syncser_intr_vect = SYNCSER_INTR_VECT0, }, #ifdef CONFIG_ETRAXFS { .regi_sser = SYNCSER_INST1, .regi_dmaout = regi_dma6, .regi_dmain = regi_dma7, .use_dma = PORT1_DMA, .dma_in_intr_vect = DMA_IN_INTR_VECT1, .dma_out_intr_vect = DMA_OUT_INTR_VECT1, .dma_in_nbr = DMA_IN_NBR1, .dma_out_nbr = DMA_OUT_NBR1, .req_dma = REQ_DMA_SYNCSER1, .syncser_intr_vect = SYNCSER_INTR_VECT1, }, #endif }; #define NBR_PORTS ARRAY_SIZE(ports) static const struct file_operations syncser_fops = { .owner = THIS_MODULE, .write = sync_serial_write, .read = sync_serial_read, .poll = sync_serial_poll, .unlocked_ioctl = sync_serial_ioctl, .open = sync_serial_open, .release = sync_serial_release, .llseek = noop_llseek, }; static dev_t syncser_first; static int minor_count = NBR_PORTS; #define SYNCSER_NAME "syncser" static struct cdev *syncser_cdev; static struct class *syncser_class; static void sync_serial_start_port(struct sync_port *port) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; tr_cfg.tr_en = regk_sser_yes; rec_cfg.rec_en = regk_sser_yes; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } static void __init initialize_port(int portnbr) { struct sync_port *port = &ports[portnbr]; reg_sser_rw_cfg cfg = { 0 }; reg_sser_rw_frm_cfg frm_cfg = { 0 }; reg_sser_rw_tr_cfg tr_cfg = { 0 }; reg_sser_rw_rec_cfg rec_cfg = { 0 }; DEBUG(pr_info("Init sync serial port %d\n", portnbr)); port->port_nbr = portnbr; port->init_irqs = no_irq_setup; port->out_rd_ptr = port->out_buffer; port->out_buf_count = 0; port->output = 1; port->input = 0; port->readp = port->flip; port->writep = port->flip; port->in_buffer_size = IN_BUFFER_SIZE; port->in_buffer_len = 0; port->inbufchunk = IN_DESCR_SIZE; port->read_ts_idx = 0; port->write_ts_idx = 0; init_waitqueue_head(&port->out_wait_q); init_waitqueue_head(&port->in_wait_q); spin_lock_init(&port->lock); cfg.out_clk_src = regk_sser_intern_clk; cfg.out_clk_pol = regk_sser_pos; cfg.clk_od_mode = regk_sser_no; cfg.clk_dir = regk_sser_out; cfg.gate_clk = regk_sser_no; cfg.base_freq = regk_sser_f29_493; cfg.clk_div = 256; REG_WR(sser, port->regi_sser, rw_cfg, cfg); frm_cfg.wordrate = DEFAULT_WORD_RATE; frm_cfg.type = regk_sser_edge; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.frame_pin_use = regk_sser_frm; frm_cfg.status_pin_dir = regk_sser_in; frm_cfg.status_pin_use = regk_sser_hold; frm_cfg.out_on = regk_sser_tr; frm_cfg.tr_delay = 1; REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); tr_cfg.urun_stop = regk_sser_no; tr_cfg.sample_size = 7; tr_cfg.sh_dir = regk_sser_msbfirst; tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; #if 0 tr_cfg.rate_ctrl = regk_sser_bulk; tr_cfg.data_pin_use = regk_sser_dout; #else tr_cfg.rate_ctrl = regk_sser_iso; tr_cfg.data_pin_use = regk_sser_dout; #endif tr_cfg.bulk_wspace = 1; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); rec_cfg.sample_size = 7; rec_cfg.sh_dir = regk_sser_msbfirst; rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; rec_cfg.fifo_thr = regk_sser_inf; REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); #ifdef SYNC_SER_DMA { int i; /* Setup the descriptor ring for dma out/transmit. */ for (i = 0; i < NBR_OUT_DESCR; i++) { dma_descr_data *descr = &port->out_descr[i]; descr->wait = 0; descr->intr = 1; descr->eol = 0; descr->out_eop = 0; descr->next = (dma_descr_data *)virt_to_phys(&descr[i+1]); } } /* Create a ring from the list. */ port->out_descr[NBR_OUT_DESCR-1].next = (dma_descr_data *)virt_to_phys(&port->out_descr[0]); /* Setup context for traversing the ring. */ port->active_tr_descr = &port->out_descr[0]; port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1]; port->catch_tr_descr = &port->out_descr[0]; #endif } static inline int sync_data_avail(struct sync_port *port) { return port->in_buffer_len; } static int sync_serial_open(struct inode *inode, struct file *file) { int ret = 0; int dev = iminor(inode); struct sync_port *port; #ifdef SYNC_SER_DMA reg_dma_rw_cfg cfg = { .en = regk_dma_yes }; reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes }; #endif DEBUG(pr_debug("Open sync serial port %d\n", dev)); if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(pr_info("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; /* Allow open this device twice (assuming one reader and one writer) */ if (port->busy == 2) { DEBUG(pr_info("syncser%d is busy\n", dev)); return -EBUSY; } mutex_lock(&sync_serial_mutex); /* Clear any stale date left in the flip buffer */ port->readp = port->writep = port->flip; port->in_buffer_len = 0; port->read_ts_idx = 0; port->write_ts_idx = 0; if (port->init_irqs != no_irq_setup) { /* Init only on first call. */ port->busy++; mutex_unlock(&sync_serial_mutex); return 0; } if (port->use_dma) { #ifdef SYNC_SER_DMA const char *tmp; DEBUG(pr_info("Using DMA for syncser%d\n", dev)); tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx"; if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0, tmp, port)) { pr_err("Can't alloc syncser%d TX IRQ", dev); ret = -EBUSY; goto unlock_and_exit; } if (artpec_request_dma(port->dma_out_nbr, tmp, DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) { free_irq(port->dma_out_intr_vect, port); pr_err("Can't alloc syncser%d TX DMA", dev); ret = -EBUSY; goto unlock_and_exit; } tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx"; if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0, tmp, port)) { artpec_free_dma(port->dma_out_nbr); free_irq(port->dma_out_intr_vect, port); pr_err("Can't alloc syncser%d RX IRQ", dev); ret = -EBUSY; goto unlock_and_exit; } if (artpec_request_dma(port->dma_in_nbr, tmp, DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) { artpec_free_dma(port->dma_out_nbr); free_irq(port->dma_out_intr_vect, port); free_irq(port->dma_in_intr_vect, port); pr_err("Can't alloc syncser%d RX DMA", dev); ret = -EBUSY; goto unlock_and_exit; } /* Enable DMAs */ REG_WR(dma, port->regi_dmain, rw_cfg, cfg); REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); /* Enable DMA IRQs */ REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); /* Set up wordsize = 1 for DMAs. */ DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1); DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1); start_dma_in(port); port->init_irqs = dma_irq_setup; #endif } else { /* !port->use_dma */ #ifdef SYNC_SER_MANUAL const char *tmp = dev == 0 ? "syncser0 manual irq" : "syncser1 manual irq"; if (request_irq(port->syncser_intr_vect, manual_interrupt, 0, tmp, port)) { pr_err("Can't alloc syncser%d manual irq", dev); ret = -EBUSY; goto unlock_and_exit; } port->init_irqs = manual_irq_setup; #else panic("sync_serial: Manual mode not supported\n"); #endif /* SYNC_SER_MANUAL */ } port->busy++; ret = 0; unlock_and_exit: mutex_unlock(&sync_serial_mutex); return ret; } static int sync_serial_release(struct inode *inode, struct file *file) { int dev = iminor(inode); struct sync_port *port; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(pr_info("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; if (port->busy) port->busy--; if (!port->busy) /* XXX */; return 0; } static unsigned int sync_serial_poll(struct file *file, poll_table *wait) { int dev = iminor(file_inode(file)); unsigned int mask = 0; struct sync_port *port; DEBUGPOLL( static unsigned int prev_mask; ); port = &ports[dev]; if (!port->started) sync_serial_start_port(port); poll_wait(file, &port->out_wait_q, wait); poll_wait(file, &port->in_wait_q, wait); /* No active transfer, descriptors are available */ if (port->output && !port->tr_running) mask |= POLLOUT | POLLWRNORM; /* Descriptor and buffer space available. */ if (port->output && port->active_tr_descr != port->catch_tr_descr && port->out_buf_count < OUT_BUFFER_SIZE) mask |= POLLOUT | POLLWRNORM; /* At least an inbufchunk of data */ if (port->input && sync_data_avail(port) >= port->inbufchunk) mask |= POLLIN | POLLRDNORM; DEBUGPOLL( if (mask != prev_mask) pr_info("sync_serial_poll: mask 0x%08X %s %s\n", mask, mask & POLLOUT ? "POLLOUT" : "", mask & POLLIN ? "POLLIN" : ""); prev_mask = mask; ); return mask; } static ssize_t __sync_serial_read(struct file *file, char __user *buf, size_t count, loff_t *ppos, struct timespec *ts) { unsigned long flags; int dev = MINOR(file_inode(file)->i_rdev); int avail; struct sync_port *port; unsigned char *start; unsigned char *end; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(pr_info("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; if (!port->started) sync_serial_start_port(port); /* Calculate number of available bytes */ /* Save pointers to avoid that they are modified by interrupt */ spin_lock_irqsave(&port->lock, flags); start = port->readp; end = port->writep; spin_unlock_irqrestore(&port->lock, flags); while ((start == end) && !port->in_buffer_len) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; wait_event_interruptible(port->in_wait_q, !(start == end && !port->full)); if (signal_pending(current)) return -EINTR; spin_lock_irqsave(&port->lock, flags); start = port->readp; end = port->writep; spin_unlock_irqrestore(&port->lock, flags); } DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n", dev, count, start - port->flip, end - port->flip, port->in_buffer_size)); /* Lazy read, never return wrapped data. */ if (end > start) avail = end - start; else avail = port->flip + port->in_buffer_size - start; count = count > avail ? avail : count; if (copy_to_user(buf, start, count)) return -EFAULT; /* If timestamp requested, find timestamp of first returned byte * and copy it. * N.B: Applications that request timstamps MUST read data in * chunks that are multiples of IN_DESCR_SIZE. * Otherwise the timestamps will not be aligned to the data read. */ if (ts != NULL) { int idx = port->read_ts_idx; memcpy(ts, &port->timestamp[idx], sizeof(struct timespec)); port->read_ts_idx += count / IN_DESCR_SIZE; if (port->read_ts_idx >= NBR_IN_DESCR) port->read_ts_idx = 0; } spin_lock_irqsave(&port->lock, flags); port->readp += count; /* Check for wrap */ if (port->readp >= port->flip + port->in_buffer_size) port->readp = port->flip; port->in_buffer_len -= count; port->full = 0; spin_unlock_irqrestore(&port->lock, flags); DEBUGREAD(pr_info("r %d\n", count)); return count; } static ssize_t sync_serial_input(struct file *file, unsigned long arg) { struct ssp_request req; int count; int ret; /* Copy the request structure from user-mode. */ ret = copy_from_user(&req, (struct ssp_request __user *)arg, sizeof(struct ssp_request)); if (ret) { DEBUG(pr_info("sync_serial_input copy from user failed\n")); return -EFAULT; } /* To get the timestamps aligned, make sure that 'len' * is a multiple of IN_DESCR_SIZE. */ if ((req.len % IN_DESCR_SIZE) != 0) { DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n", req.len, IN_DESCR_SIZE)); return -EFAULT; } /* Do the actual read. */ /* Note that req.buf is actually a pointer to user space. */ count = __sync_serial_read(file, req.buf, req.len, NULL, &req.ts); if (count < 0) { DEBUG(pr_info("sync_serial_input read failed\n")); return count; } /* Copy the request back to user-mode. */ ret = copy_to_user((struct ssp_request __user *)arg, &req, sizeof(struct ssp_request)); if (ret) { DEBUG(pr_info("syncser input copy2user failed\n")); return -EFAULT; } /* Return the number of bytes read. */ return count; } static int sync_serial_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg) { int return_val = 0; int dma_w_size = regk_dma_set_w_size1; int dev = iminor(file_inode(file)); struct sync_port *port; reg_sser_rw_tr_cfg tr_cfg; reg_sser_rw_rec_cfg rec_cfg; reg_sser_rw_frm_cfg frm_cfg; reg_sser_rw_cfg gen_cfg; reg_sser_rw_intr_mask intr_mask; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(pr_info("Invalid minor %d\n", dev)); return -1; } if (cmd == SSP_INPUT) return sync_serial_input(file, arg); port = &ports[dev]; spin_lock_irq(&port->lock); tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg); gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg); intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); switch (cmd) { case SSP_SPEED: if (GET_SPEED(arg) == CODEC) { unsigned int freq; gen_cfg.base_freq = regk_sser_f32; /* Clock divider will internally be * gen_cfg.clk_div + 1. */ freq = GET_FREQ(arg); switch (freq) { case FREQ_32kHz: case FREQ_64kHz: case FREQ_128kHz: case FREQ_256kHz: gen_cfg.clk_div = 125 * (1 << (freq - FREQ_256kHz)) - 1; break; case FREQ_512kHz: gen_cfg.clk_div = 62; break; case FREQ_1MHz: case FREQ_2MHz: case FREQ_4MHz: gen_cfg.clk_div = 8 * (1 << freq) - 1; break; } } else if (GET_SPEED(arg) == CODEC_f32768) { gen_cfg.base_freq = regk_sser_f32_768; switch (GET_FREQ(arg)) { case FREQ_4096kHz: gen_cfg.clk_div = 7; break; default: spin_unlock_irq(&port->lock); return -EINVAL; } } else { gen_cfg.base_freq = regk_sser_f29_493; switch (GET_SPEED(arg)) { case SSP150: gen_cfg.clk_div = 29493000 / (150 * 8) - 1; break; case SSP300: gen_cfg.clk_div = 29493000 / (300 * 8) - 1; break; case SSP600: gen_cfg.clk_div = 29493000 / (600 * 8) - 1; break; case SSP1200: gen_cfg.clk_div = 29493000 / (1200 * 8) - 1; break; case SSP2400: gen_cfg.clk_div = 29493000 / (2400 * 8) - 1; break; case SSP4800: gen_cfg.clk_div = 29493000 / (4800 * 8) - 1; break; case SSP9600: gen_cfg.clk_div = 29493000 / (9600 * 8) - 1; break; case SSP19200: gen_cfg.clk_div = 29493000 / (19200 * 8) - 1; break; case SSP28800: gen_cfg.clk_div = 29493000 / (28800 * 8) - 1; break; case SSP57600: gen_cfg.clk_div = 29493000 / (57600 * 8) - 1; break; case SSP115200: gen_cfg.clk_div = 29493000 / (115200 * 8) - 1; break; case SSP230400: gen_cfg.clk_div = 29493000 / (230400 * 8) - 1; break; case SSP460800: gen_cfg.clk_div = 29493000 / (460800 * 8) - 1; break; case SSP921600: gen_cfg.clk_div = 29493000 / (921600 * 8) - 1; break; case SSP3125000: gen_cfg.base_freq = regk_sser_f100; gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1; break; } } frm_cfg.wordrate = GET_WORD_RATE(arg); break; case SSP_MODE: switch (arg) { case MASTER_OUTPUT: port->output = 1; port->input = 0; frm_cfg.out_on = regk_sser_tr; frm_cfg.frame_pin_dir = regk_sser_out; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_OUTPUT: port->output = 1; port->input = 0; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; case MASTER_INPUT: port->output = 0; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.out_on = regk_sser_intern_tb; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_INPUT: port->output = 0; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; case MASTER_BIDIR: port->output = 1; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.out_on = regk_sser_intern_tb; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_BIDIR: port->output = 1; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; default: spin_unlock_irq(&port->lock); return -EINVAL; } if (!port->use_dma || arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT) intr_mask.rdav = regk_sser_yes; break; case SSP_FRAME_SYNC: if (arg & NORMAL_SYNC) { frm_cfg.rec_delay = 1; frm_cfg.tr_delay = 1; } else if (arg & EARLY_SYNC) frm_cfg.rec_delay = frm_cfg.tr_delay = 0; else if (arg & LATE_SYNC) { frm_cfg.tr_delay = 2; frm_cfg.rec_delay = 2; } else if (arg & SECOND_WORD_SYNC) { frm_cfg.rec_delay = 7; frm_cfg.tr_delay = 1; } tr_cfg.bulk_wspace = frm_cfg.tr_delay; frm_cfg.early_wend = regk_sser_yes; if (arg & BIT_SYNC) frm_cfg.type = regk_sser_edge; else if (arg & WORD_SYNC) frm_cfg.type = regk_sser_level; else if (arg & EXTENDED_SYNC) frm_cfg.early_wend = regk_sser_no; if (arg & SYNC_ON) frm_cfg.frame_pin_use = regk_sser_frm; else if (arg & SYNC_OFF) frm_cfg.frame_pin_use = regk_sser_gio0; dma_w_size = regk_dma_set_w_size2; if (arg & WORD_SIZE_8) { rec_cfg.sample_size = tr_cfg.sample_size = 7; dma_w_size = regk_dma_set_w_size1; } else if (arg & WORD_SIZE_12) rec_cfg.sample_size = tr_cfg.sample_size = 11; else if (arg & WORD_SIZE_16) rec_cfg.sample_size = tr_cfg.sample_size = 15; else if (arg & WORD_SIZE_24) rec_cfg.sample_size = tr_cfg.sample_size = 23; else if (arg & WORD_SIZE_32) rec_cfg.sample_size = tr_cfg.sample_size = 31; if (arg & BIT_ORDER_MSB) rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; else if (arg & BIT_ORDER_LSB) rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst; if (arg & FLOW_CONTROL_ENABLE) { frm_cfg.status_pin_use = regk_sser_frm; rec_cfg.fifo_thr = regk_sser_thr16; } else if (arg & FLOW_CONTROL_DISABLE) { frm_cfg.status_pin_use = regk_sser_gio0; rec_cfg.fifo_thr = regk_sser_inf; } if (arg & CLOCK_NOT_GATED) gen_cfg.gate_clk = regk_sser_no; else if (arg & CLOCK_GATED) gen_cfg.gate_clk = regk_sser_yes; break; case SSP_IPOLARITY: /* NOTE!! negedge is considered NORMAL */ if (arg & CLOCK_NORMAL) rec_cfg.clk_pol = regk_sser_neg; else if (arg & CLOCK_INVERT) rec_cfg.clk_pol = regk_sser_pos; if (arg & FRAME_NORMAL) frm_cfg.level = regk_sser_pos_hi; else if (arg & FRAME_INVERT) frm_cfg.level = regk_sser_neg_lo; if (arg & STATUS_NORMAL) gen_cfg.hold_pol = regk_sser_pos; else if (arg & STATUS_INVERT) gen_cfg.hold_pol = regk_sser_neg; break; case SSP_OPOLARITY: if (arg & CLOCK_NORMAL) gen_cfg.out_clk_pol = regk_sser_pos; else if (arg & CLOCK_INVERT) gen_cfg.out_clk_pol = regk_sser_neg; if (arg & FRAME_NORMAL) frm_cfg.level = regk_sser_pos_hi; else if (arg & FRAME_INVERT) frm_cfg.level = regk_sser_neg_lo; if (arg & STATUS_NORMAL) gen_cfg.hold_pol = regk_sser_pos; else if (arg & STATUS_INVERT) gen_cfg.hold_pol = regk_sser_neg; break; case SSP_SPI: rec_cfg.fifo_thr = regk_sser_inf; rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; rec_cfg.sample_size = tr_cfg.sample_size = 7; frm_cfg.frame_pin_use = regk_sser_frm; frm_cfg.type = regk_sser_level; frm_cfg.tr_delay = 1; frm_cfg.level = regk_sser_neg_lo; if (arg & SPI_SLAVE) { rec_cfg.clk_pol = regk_sser_neg; gen_cfg.clk_dir = regk_sser_in; port->input = 1; port->output = 0; } else { gen_cfg.out_clk_pol = regk_sser_pos; port->input = 0; port->output = 1; gen_cfg.clk_dir = regk_sser_out; } break; case SSP_INBUFCHUNK: break; default: return_val = -1; } if (port->started) { rec_cfg.rec_en = port->input; gen_cfg.en = (port->output | port->input); } REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 | WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) { int en = gen_cfg.en; gen_cfg.en = 0; REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); /* ##### Should DMA be stoped before we change dma size? */ DMA_WR_CMD(port->regi_dmain, dma_w_size); DMA_WR_CMD(port->regi_dmaout, dma_w_size); gen_cfg.en = en; REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); } spin_unlock_irq(&port->lock); return return_val; } static long sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&sync_serial_mutex); ret = sync_serial_ioctl_unlocked(file, cmd, arg); mutex_unlock(&sync_serial_mutex); return ret; } /* NOTE: sync_serial_write does not support concurrency */ static ssize_t sync_serial_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int dev = iminor(file_inode(file)); DECLARE_WAITQUEUE(wait, current); struct sync_port *port; int trunc_count; unsigned long flags; int bytes_free; int out_buf_count; unsigned char *rd_ptr; /* First allocated byte in the buffer */ unsigned char *wr_ptr; /* First free byte in the buffer */ unsigned char *buf_stop_ptr; /* Last byte + 1 */ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(pr_info("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; /* |<- OUT_BUFFER_SIZE ->| * |<- out_buf_count ->| * |<- trunc_count ->| ...->| * ______________________________________________________ * | free | data | free | * |_________|___________________|________________________| * ^ rd_ptr ^ wr_ptr */ DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n", port->port_nbr, count, port->active_tr_descr, port->catch_tr_descr)); /* Read variables that may be updated by interrupts */ spin_lock_irqsave(&port->lock, flags); rd_ptr = port->out_rd_ptr; out_buf_count = port->out_buf_count; spin_unlock_irqrestore(&port->lock, flags); /* Check if resources are available */ if (port->tr_running && ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || out_buf_count >= OUT_BUFFER_SIZE)) { DEBUGWRITE(pr_info("sser%d full\n", dev)); return -EAGAIN; } buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE; /* Determine pointer to the first free byte, before copying. */ wr_ptr = rd_ptr + out_buf_count; if (wr_ptr >= buf_stop_ptr) wr_ptr -= OUT_BUFFER_SIZE; /* If we wrap the ring buffer, let the user space program handle it by * truncating the data. This could be more elegant, small buffer * fragments may occur. */ bytes_free = OUT_BUFFER_SIZE - out_buf_count; if (wr_ptr + bytes_free > buf_stop_ptr) bytes_free = buf_stop_ptr - wr_ptr; trunc_count = (count < bytes_free) ? count : bytes_free; if (copy_from_user(wr_ptr, buf, trunc_count)) return -EFAULT; DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n", out_buf_count, trunc_count, port->out_buf_count, port->out_buffer, wr_ptr, buf_stop_ptr)); /* Make sure transmitter/receiver is running */ if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; rec_cfg.rec_en = port->input; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } /* Setup wait if blocking */ if (!(file->f_flags & O_NONBLOCK)) { add_wait_queue(&port->out_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); } spin_lock_irqsave(&port->lock, flags); port->out_buf_count += trunc_count; if (port->use_dma) { #ifdef SYNC_SER_DMA start_dma_out(port, wr_ptr, trunc_count); #endif } else if (!port->tr_running) { #ifdef SYNC_SER_MANUAL reg_sser_rw_intr_mask intr_mask; intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); /* Start sender by writing data */ send_word(port); /* and enable transmitter ready IRQ */ intr_mask.trdy = 1; REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); #endif } spin_unlock_irqrestore(&port->lock, flags); /* Exit if non blocking */ if (file->f_flags & O_NONBLOCK) { DEBUGWRITE(pr_info("w d%d c %u %08x\n", port->port_nbr, trunc_count, REG_RD_INT(dma, port->regi_dmaout, r_intr))); return trunc_count; } schedule(); remove_wait_queue(&port->out_wait_q, &wait); if (signal_pending(current)) return -EINTR; DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count)); return trunc_count; } static ssize_t sync_serial_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return __sync_serial_read(file, buf, count, ppos, NULL); } #ifdef SYNC_SER_MANUAL static void send_word(struct sync_port *port) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); reg_sser_rw_tr_data tr_data = {0}; switch (tr_cfg.sample_size) { case 8: port->out_buf_count--; tr_data.data = *port->out_rd_ptr++; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 12: { int data = (*port->out_rd_ptr++) << 8; data |= *port->out_rd_ptr++; port->out_buf_count -= 2; tr_data.data = data; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; } case 16: port->out_buf_count -= 2; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 24: port->out_buf_count -= 3; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; tr_data.data = *port->out_rd_ptr++; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 32: port->out_buf_count -= 4; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; } } #endif #ifdef SYNC_SER_DMA static void start_dma_out(struct sync_port *port, const char *data, int count) { port->active_tr_descr->buf = (char *)virt_to_phys((char *)data); port->active_tr_descr->after = port->active_tr_descr->buf + count; port->active_tr_descr->intr = 1; port->active_tr_descr->eol = 1; port->prev_tr_descr->eol = 0; DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n", port->prev_tr_descr, port->active_tr_descr)); port->prev_tr_descr = port->active_tr_descr; port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next); if (!port->tr_running) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); port->out_context.next = NULL; port->out_context.saved_data = (dma_descr_data *)virt_to_phys(port->prev_tr_descr); port->out_context.saved_data_buf = port->prev_tr_descr->buf; DMA_START_CONTEXT(port->regi_dmaout, virt_to_phys((char *)&port->out_context)); tr_cfg.tr_en = regk_sser_yes; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); DEBUGTRDMA(pr_info("dma s\n");); } else { DMA_CONTINUE_DATA(port->regi_dmaout); DEBUGTRDMA(pr_info("dma c\n");); } port->tr_running = 1; } static void start_dma_in(struct sync_port *port) { int i; char *buf; unsigned long flags; spin_lock_irqsave(&port->lock, flags); port->writep = port->flip; spin_unlock_irqrestore(&port->lock, flags); buf = (char *)virt_to_phys(port->in_buffer); for (i = 0; i < NBR_IN_DESCR; i++) { port->in_descr[i].buf = buf; port->in_descr[i].after = buf + port->inbufchunk; port->in_descr[i].intr = 1; port->in_descr[i].next = (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]); port->in_descr[i].buf = buf; buf += port->inbufchunk; } /* Link the last descriptor to the first */ port->in_descr[i-1].next = (dma_descr_data *)virt_to_phys(&port->in_descr[0]); port->in_descr[i-1].eol = regk_sser_yes; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; port->in_context.saved_data = (dma_descr_data *)virt_to_phys(&port->in_descr[0]); port->in_context.saved_data_buf = port->in_descr[0].buf; DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); } static irqreturn_t tr_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes }; reg_dma_rw_stat stat; int i; int found = 0; int stop_sser = 0; for (i = 0; i < NBR_PORTS; i++) { struct sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma) continue; /* IRQ active for the port? */ masked = REG_RD(dma, port->regi_dmaout, r_masked_intr); if (!masked.data) continue; found = 1; /* Check if we should stop the DMA transfer */ stat = REG_RD(dma, port->regi_dmaout, rw_stat); if (stat.list_state == regk_dma_data_at_eol) stop_sser = 1; /* Clear IRQ */ REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr); if (!stop_sser) { /* The DMA has completed a descriptor, EOL was not * encountered, so step relevant descriptor and * datapointers forward. */ int sent; sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t" "in descr %p (ac: %p)\n", port->out_buf_count, sent, port->out_buf_count - sent, port->catch_tr_descr, port->active_tr_descr);); port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt((int) port->catch_tr_descr->next); port->out_rd_ptr = phys_to_virt((int) port->catch_tr_descr->buf); } else { reg_sser_rw_tr_cfg tr_cfg; int j, sent; /* EOL handler. * Note that if an EOL was encountered during the irq * locked section of sync_ser_write the DMA will be * restarted and the eol flag will be cleared. * The remaining descriptors will be traversed by * the descriptor interrupts as usual. */ j = 0; while (!port->catch_tr_descr->eol) { sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGOUTBUF(pr_info( "traversing descr %p -%d (%d)\n", port->catch_tr_descr, sent, port->out_buf_count)); port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt( (int)port->catch_tr_descr->next); j++; if (j >= NBR_OUT_DESCR) { /* TODO: Reset and recover */ panic("sync_serial: missing eol"); } } sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n", port->catch_tr_descr, sent, port->out_buf_count)); port->out_buf_count -= sent; /* Update read pointer to first free byte, we * may already be writing data there. */ port->out_rd_ptr = phys_to_virt((int) port->catch_tr_descr->after); if (port->out_rd_ptr > port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); DEBUGTXINT(pr_info( "tr_int DMA stop %d, set catch @ %p\n", port->out_buf_count, port->active_tr_descr)); if (port->out_buf_count != 0) pr_err("sync_ser: buf not empty after eol\n"); port->catch_tr_descr = port->active_tr_descr; port->tr_running = 0; tr_cfg.tr_en = regk_sser_no; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); } /* wake up the waiting process */ wake_up_interruptible(&port->out_wait_q); } return IRQ_RETVAL(found); } /* tr_interrupt */ static inline void handle_rx_packet(struct sync_port *port) { int idx; reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes }; unsigned long flags; DEBUGRXINT(pr_info("!")); spin_lock_irqsave(&port->lock, flags); /* If we overrun the user experience is crap regardless if we * drop new or old data. Its much easier to get it right when * dropping new data so lets do that. */ if ((port->writep + port->inbufchunk <= port->flip + port->in_buffer_size) && (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) { memcpy(port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), port->inbufchunk); port->writep += port->inbufchunk; if (port->writep >= port->flip + port->in_buffer_size) port->writep = port->flip; /* Timestamp the new data chunk. */ if (port->write_ts_idx == NBR_IN_DESCR) port->write_ts_idx = 0; idx = port->write_ts_idx++; do_posix_clock_monotonic_gettime(&port->timestamp[idx]); port->in_buffer_len += port->inbufchunk; } spin_unlock_irqrestore(&port->lock, flags); port->next_rx_desc->eol = 1; port->prev_rx_desc->eol = 0; /* Cache bug workaround */ flush_dma_descr(port->prev_rx_desc, 0); port->prev_rx_desc = port->next_rx_desc; port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); /* Cache bug workaround */ flush_dma_descr(port->prev_rx_desc, 1); /* wake up the waiting process */ wake_up_interruptible(&port->in_wait_q); DMA_CONTINUE(port->regi_dmain); REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); } static irqreturn_t rx_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; int i; int found = 0; DEBUG(pr_info("rx_interrupt\n")); for (i = 0; i < NBR_PORTS; i++) { struct sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma) continue; masked = REG_RD(dma, port->regi_dmain, r_masked_intr); if (!masked.data) continue; /* Descriptor interrupt */ found = 1; while (REG_RD(dma, port->regi_dmain, rw_data) != virt_to_phys(port->next_rx_desc)) handle_rx_packet(port); } return IRQ_RETVAL(found); } /* rx_interrupt */ #endif /* SYNC_SER_DMA */ #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id) { unsigned long flags; int i; int found = 0; reg_sser_r_masked_intr masked; for (i = 0; i < NBR_PORTS; i++) { struct sync_port *port = &ports[i]; if (!port->enabled || port->use_dma) continue; masked = REG_RD(sser, port->regi_sser, r_masked_intr); /* Data received? */ if (masked.rdav) { reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data); found = 1; /* Read data */ spin_lock_irqsave(&port->lock, flags); switch (rec_cfg.sample_size) { case 8: *port->writep++ = data.data & 0xff; break; case 12: *port->writep = (data.data & 0x0ff0) >> 4; *(port->writep + 1) = data.data & 0x0f; port->writep += 2; break; case 16: *(unsigned short *)port->writep = data.data; port->writep += 2; break; case 24: *(unsigned int *)port->writep = data.data; port->writep += 3; break; case 32: *(unsigned int *)port->writep = data.data; port->writep += 4; break; } /* Wrap? */ if (port->writep >= port->flip + port->in_buffer_size) port->writep = port->flip; if (port->writep == port->readp) { /* Receive buf overrun, discard oldest data */ port->readp++; /* Wrap? */ if (port->readp >= port->flip + port->in_buffer_size) port->readp = port->flip; } spin_unlock_irqrestore(&port->lock, flags); if (sync_data_avail(port) >= port->inbufchunk) /* Wake up application */ wake_up_interruptible(&port->in_wait_q); } /* Transmitter ready? */ if (masked.trdy) { found = 1; /* More data to send */ if (port->out_buf_count > 0) send_word(port); else { /* Transmission finished */ reg_sser_rw_intr_mask intr_mask; intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); intr_mask.trdy = 0; REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); /* Wake up application */ wake_up_interruptible(&port->out_wait_q); } } } return IRQ_RETVAL(found); } #endif static int __init etrax_sync_serial_init(void) { #if 1 /* This code will be removed when we move to udev for all devices. */ syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0); if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) { pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR); return -1; } #else /* Allocate dynamic major number. */ if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) { pr_err("Failed to allocate character device region\n"); return -1; } #endif syncser_cdev = cdev_alloc(); if (!syncser_cdev) { pr_err("Failed to allocate cdev for syncser\n"); unregister_chrdev_region(syncser_first, minor_count); return -1; } cdev_init(syncser_cdev, &syncser_fops); /* Create a sysfs class for syncser */ syncser_class = class_create(THIS_MODULE, "syncser_class"); /* Initialize Ports */ #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) { pr_warn("Unable to alloc pins for synchronous serial port 0\n"); unregister_chrdev_region(syncser_first, minor_count); return -EIO; } initialize_port(0); ports[0].enabled = 1; /* Register with sysfs so udev can pick it up. */ device_create(syncser_class, NULL, syncser_first, NULL, "%s%d", SYNCSER_NAME, 0); #endif #if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) { pr_warn("Unable to alloc pins for synchronous serial port 1\n"); unregister_chrdev_region(syncser_first, minor_count); class_destroy(syncser_class); return -EIO; } initialize_port(1); ports[1].enabled = 1; /* Register with sysfs so udev can pick it up. */ device_create(syncser_class, NULL, syncser_first, NULL, "%s%d", SYNCSER_NAME, 0); #endif /* Add it to system */ if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) { pr_err("Failed to add syncser as char device\n"); device_destroy(syncser_class, syncser_first); class_destroy(syncser_class); cdev_del(syncser_cdev); unregister_chrdev_region(syncser_first, minor_count); return -1; } pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n", SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first)); return 0; } static void __exit etrax_sync_serial_exit(void) { int i; device_destroy(syncser_class, syncser_first); class_destroy(syncser_class); if (syncser_cdev) { cdev_del(syncser_cdev); unregister_chrdev_region(syncser_first, minor_count); } for (i = 0; i < NBR_PORTS; i++) { struct sync_port *port = &ports[i]; if (port->init_irqs == dma_irq_setup) { /* Free dma irqs and dma channels. */ #ifdef SYNC_SER_DMA artpec_free_dma(port->dma_in_nbr); artpec_free_dma(port->dma_out_nbr); free_irq(port->dma_out_intr_vect, port); free_irq(port->dma_in_intr_vect, port); #endif } else if (port->init_irqs == manual_irq_setup) { /* Free manual irq. */ free_irq(port->syncser_intr_vect, port); } } pr_info("ARTPEC synchronous serial port unregistered\n"); } module_init(etrax_sync_serial_init); module_exit(etrax_sync_serial_exit); MODULE_LICENSE("GPL");
gpl-2.0
jcbless/linux-xlnx
drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
1140
3011
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> *****************************************************************************/ #include "rtl_pci.h" #include "rtl_core.h" static void rtl8192_parse_pci_configuration(struct pci_dev *pdev, struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u8 tmp; u16 LinkCtrlReg; pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg); priv->NdisAdapter.LinkCtrlReg = (u8)LinkCtrlReg; RT_TRACE(COMP_INIT, "Link Control Register =%x\n", priv->NdisAdapter.LinkCtrlReg); pci_read_config_byte(pdev, 0x98, &tmp); tmp |= BIT4; pci_write_config_byte(pdev, 0x98, tmp); tmp = 0x17; pci_write_config_byte(pdev, 0x70f, tmp); } bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u16 VenderID; u16 DeviceID; u8 RevisionID; u16 IrqLine; VenderID = pdev->vendor; DeviceID = pdev->device; RevisionID = pdev->revision; pci_read_config_word(pdev, 0x3C, &IrqLine); priv->card_8192 = priv->ops->nic_type; if (DeviceID == 0x8172) { switch (RevisionID) { case HAL_HW_PCI_REVISION_ID_8192PCIE: printk(KERN_INFO "Adapter(8192 PCI-E) is found - " "DeviceID=%x\n", DeviceID); priv->card_8192 = NIC_8192E; break; case HAL_HW_PCI_REVISION_ID_8192SE: printk(KERN_INFO "Adapter(8192SE) is found - " "DeviceID=%x\n", DeviceID); priv->card_8192 = NIC_8192SE; break; default: printk(KERN_INFO "UNKNOWN nic type(%4x:%4x)\n", pdev->vendor, pdev->device); priv->card_8192 = NIC_UNKNOWN; return false; } } if (priv->ops->nic_type != priv->card_8192) { printk(KERN_INFO "Detect info(%x) and hardware info(%x) not match!\n", priv->ops->nic_type, priv->card_8192); printk(KERN_INFO "Please select proper driver before install!!!!\n"); return false; } rtl8192_parse_pci_configuration(pdev, dev); return true; }
gpl-2.0
sbrissen/android_kernel_samsung_smdk4210_new
arch/parisc/kernel/cache.c
2164
13735
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999) * Copyright (C) 1999 SuSE GmbH Nuernberg * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org) * * Cache and TLB management * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <asm/pdc.h> #include <asm/cache.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/shmparam.h> int split_tlb __read_mostly; int dcache_stride __read_mostly; int icache_stride __read_mostly; EXPORT_SYMBOL(dcache_stride); void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); EXPORT_SYMBOL(flush_dcache_page_asm); void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); /* On some machines (e.g. ones with the Merced bus), there can be * only a single PxTLB broadcast at a time; this must be guaranteed * by software. We put a spinlock around all TLB flushes to * ensure this. */ DEFINE_SPINLOCK(pa_tlb_lock); struct pdc_cache_info cache_info __read_mostly; #ifndef CONFIG_PA20 static struct pdc_btlb_info btlb_info __read_mostly; #endif #ifdef CONFIG_SMP void flush_data_cache(void) { on_each_cpu(flush_data_cache_local, NULL, 1); } void flush_instruction_cache(void) { on_each_cpu(flush_instruction_cache_local, NULL, 1); } #endif void flush_cache_all_local(void) { flush_instruction_cache_local(NULL); flush_data_cache_local(NULL); } EXPORT_SYMBOL(flush_cache_all_local); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct page *page = pte_page(*ptep); if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { flush_kernel_dcache_page(page); clear_bit(PG_dcache_dirty, &page->flags); } else if (parisc_requires_coherency()) flush_kernel_dcache_page(page); } void show_cache_info(struct seq_file *m) { char buf[32]; seq_printf(m, "I-cache\t\t: %ld KB\n", cache_info.ic_size/1024 ); if (cache_info.dc_loop != 1) snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", cache_info.dc_size/1024, (cache_info.dc_conf.cc_wt ? "WT":"WB"), (cache_info.dc_conf.cc_sh ? ", shared I/D":""), ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", cache_info.it_size, cache_info.dt_size, cache_info.dt_conf.tc_sh ? " - shared with ITLB":"" ); #ifndef CONFIG_PA20 /* BTLB - Block TLB */ if (btlb_info.max_size==0) { seq_printf(m, "BTLB\t\t: not supported\n" ); } else { seq_printf(m, "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n" "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n" "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n", btlb_info.max_size, (int)4096, btlb_info.max_size>>8, btlb_info.fixed_range_info.num_i, btlb_info.fixed_range_info.num_d, btlb_info.fixed_range_info.num_comb, btlb_info.variable_range_info.num_i, btlb_info.variable_range_info.num_d, btlb_info.variable_range_info.num_comb ); } #endif } void __init parisc_cache_init(void) { if (pdc_cache_info(&cache_info) < 0) panic("parisc_cache_init: pdc_cache_info failed"); #if 0 printk("ic_size %lx dc_size %lx it_size %lx\n", cache_info.ic_size, cache_info.dc_size, cache_info.it_size); printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", cache_info.dc_base, cache_info.dc_stride, cache_info.dc_count, cache_info.dc_loop); printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n", *(unsigned long *) (&cache_info.dc_conf), cache_info.dc_conf.cc_alias, cache_info.dc_conf.cc_block, cache_info.dc_conf.cc_line, cache_info.dc_conf.cc_shift); printk(" wt %d sh %d cst %d hv %d\n", cache_info.dc_conf.cc_wt, cache_info.dc_conf.cc_sh, cache_info.dc_conf.cc_cst, cache_info.dc_conf.cc_hv); printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", cache_info.ic_base, cache_info.ic_stride, cache_info.ic_count, cache_info.ic_loop); printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n", *(unsigned long *) (&cache_info.ic_conf), cache_info.ic_conf.cc_alias, cache_info.ic_conf.cc_block, cache_info.ic_conf.cc_line, cache_info.ic_conf.cc_shift); printk(" wt %d sh %d cst %d hv %d\n", cache_info.ic_conf.cc_wt, cache_info.ic_conf.cc_sh, cache_info.ic_conf.cc_cst, cache_info.ic_conf.cc_hv); printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", cache_info.dt_conf.tc_sh, cache_info.dt_conf.tc_page, cache_info.dt_conf.tc_cst, cache_info.dt_conf.tc_aid, cache_info.dt_conf.tc_pad1); printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", cache_info.it_conf.tc_sh, cache_info.it_conf.tc_page, cache_info.it_conf.tc_cst, cache_info.it_conf.tc_aid, cache_info.it_conf.tc_pad1); #endif split_tlb = 0; if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) { if (cache_info.dt_conf.tc_sh == 2) printk(KERN_WARNING "Unexpected TLB configuration. " "Will flush I/D separately (could be optimized).\n"); split_tlb = 1; } /* "New and Improved" version from Jim Hull * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift)) * The following CAFL_STRIDE is an optimized version, see * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html */ #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift)) dcache_stride = CAFL_STRIDE(cache_info.dc_conf); icache_stride = CAFL_STRIDE(cache_info.ic_conf); #undef CAFL_STRIDE #ifndef CONFIG_PA20 if (pdc_btlb_info(&btlb_info) < 0) { memset(&btlb_info, 0, sizeof btlb_info); } #endif if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == PDC_MODEL_NVA_UNSUPPORTED) { printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n"); #if 0 panic("SMP kernel required to avoid non-equivalent aliasing"); #endif } } void disable_sr_hashing(void) { int srhash_type, retval; unsigned long space_bits; switch (boot_cpu_data.cpu_type) { case pcx: /* We shouldn't get this far. setup.c should prevent it. */ BUG(); return; case pcxs: case pcxt: case pcxt_: srhash_type = SRHASH_PCXST; break; case pcxl: srhash_type = SRHASH_PCXL; break; case pcxl2: /* pcxl2 doesn't support space register hashing */ return; default: /* Currently all PA2.0 machines use the same ins. sequence */ srhash_type = SRHASH_PA20; break; } disable_sr_hashing_asm(srhash_type); retval = pdc_spaceid_bits(&space_bits); /* If this procedure isn't implemented, don't panic. */ if (retval < 0 && retval != PDC_BAD_OPTION) panic("pdc_spaceid_bits call failed.\n"); if (space_bits != 0) panic("SpaceID hashing is still on!\n"); } static inline void __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long physaddr) { flush_dcache_page_asm(physaddr, vmaddr); if (vma->vm_flags & VM_EXEC) flush_icache_page_asm(physaddr, vmaddr); } void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); struct vm_area_struct *mpnt; struct prio_tree_iter iter; unsigned long offset; unsigned long addr, old_addr = 0; pgoff_t pgoff; if (mapping && !mapping_mapped(mapping)) { set_bit(PG_dcache_dirty, &page->flags); return; } flush_kernel_dcache_page(page); if (!mapping) return; pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); /* We have carefully arranged in arch_get_unmapped_area() that * *any* mappings of a file are always congruently mapped (whether * declared as MAP_PRIVATE or MAP_SHARED), so we only need * to flush one address here for them all to become coherent */ flush_dcache_mmap_lock(mapping); vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; /* The TLB is the engine of coherence on parisc: The * CPU is entitled to speculate any page with a TLB * mapping, so here we kill the mapping then flush the * page along a special flush only alias mapping. * This guarantees that the page is no-longer in the * cache for any process and nor may it be * speculatively read in (until the user or kernel * specifically accesses it, of course) */ flush_tlb_page(mpnt, addr); if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { __flush_cache_page(mpnt, addr, page_to_phys(page)); if (old_addr) printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); old_addr = addr; } } flush_dcache_mmap_unlock(mapping); } EXPORT_SYMBOL(flush_dcache_page); /* Defined in arch/parisc/kernel/pacache.S */ EXPORT_SYMBOL(flush_kernel_dcache_range_asm); EXPORT_SYMBOL(flush_kernel_dcache_page_asm); EXPORT_SYMBOL(flush_data_cache_local); EXPORT_SYMBOL(flush_kernel_icache_range_asm); void clear_user_page_asm(void *page, unsigned long vaddr) { unsigned long flags; /* This function is implemented in assembly in pacache.S */ extern void __clear_user_page_asm(void *page, unsigned long vaddr); purge_tlb_start(flags); __clear_user_page_asm(page, vaddr); purge_tlb_end(flags); } #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; unsigned long size; alltime = mfctl(16); flush_data_cache(); alltime = mfctl(16) - alltime; size = (unsigned long)(_end - _text); rangetime = mfctl(16); flush_kernel_dcache_range((unsigned long)_text, size); rangetime = mfctl(16) - rangetime; printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); /* Racy, but if we see an intermediate value, it's ok too... */ parisc_cache_flush_threshold = size * alltime / rangetime; parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); if (!parisc_cache_flush_threshold) parisc_cache_flush_threshold = FLUSH_THRESHOLD; if (parisc_cache_flush_threshold > cache_info.dc_size) parisc_cache_flush_threshold = cache_info.dc_size; printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); } extern void purge_kernel_dcache_page(unsigned long); extern void clear_user_page_asm(void *page, unsigned long vaddr); void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { unsigned long flags; purge_kernel_dcache_page((unsigned long)page); purge_tlb_start(flags); pdtlb_kernel(page); purge_tlb_end(flags); clear_user_page_asm(page, vaddr); } EXPORT_SYMBOL(clear_user_page); void flush_kernel_dcache_page_addr(void *addr) { unsigned long flags; flush_kernel_dcache_page_asm(addr); purge_tlb_start(flags); pdtlb_kernel(addr); purge_tlb_end(flags); } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg) { /* no coherency needed (all in kmap/kunmap) */ copy_user_page_asm(vto, vfrom); if (!parisc_requires_coherency()) flush_kernel_dcache_page_asm(vto); } EXPORT_SYMBOL(copy_user_page); #ifdef CONFIG_PA8X00 void kunmap_parisc(void *addr) { if (parisc_requires_coherency()) flush_kernel_dcache_page_addr(addr); } EXPORT_SYMBOL(kunmap_parisc); #endif void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { unsigned long npages; npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { unsigned long flags; mtsp(sid, 1); purge_tlb_start(flags); if (split_tlb) { while (npages--) { pdtlb(start); pitlb(start); start += PAGE_SIZE; } } else { while (npages--) { pdtlb(start); start += PAGE_SIZE; } } purge_tlb_end(flags); } } static void cacheflush_h_tmp_function(void *dummy) { flush_cache_all_local(); } void flush_cache_all(void) { on_each_cpu(cacheflush_h_tmp_function, NULL, 1); } void flush_cache_mm(struct mm_struct *mm) { #ifdef CONFIG_SMP flush_cache_all(); #else flush_cache_all_local(); #endif } void flush_user_dcache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_dcache_range_asm(start,end); else flush_data_cache(); } void flush_user_icache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_icache_range_asm(start,end); else flush_instruction_cache(); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int sr3; BUG_ON(!vma->vm_mm->context); sr3 = mfsp(3); if (vma->vm_mm->context == sr3) { flush_user_dcache_range(start,end); flush_user_icache_range(start,end); } else { flush_cache_all(); } } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { BUG_ON(!vma->vm_mm->context); flush_tlb_page(vma, vmaddr); __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); }
gpl-2.0
SchulerControl/linux
drivers/net/dsa/mv88e6131.c
2676
10904
/* * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support * Copyright (c) 2008-2009 Marvell Semiconductor * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <net/dsa.h> #include "mv88e6xxx.h" /* Switch product IDs */ #define ID_6085 0x04a0 #define ID_6095 0x0950 #define ID_6131 0x1060 static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) { int ret; ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { ret &= 0xfff0; if (ret == ID_6085) return "Marvell 88E6085"; if (ret == ID_6095) return "Marvell 88E6095/88E6095F"; if (ret == ID_6131) return "Marvell 88E6131"; } return NULL; } static int mv88e6131_switch_reset(struct dsa_switch *ds) { int i; int ret; unsigned long timeout; /* Set all ports to the disabled state. */ for (i = 0; i < 11; i++) { ret = REG_READ(REG_PORT(i), 0x04); REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); } /* Wait for transmit queues to drain. */ usleep_range(2000, 4000); /* Reset the switch. */ REG_WRITE(REG_GLOBAL, 0x04, 0xc400); /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { ret = REG_READ(REG_GLOBAL, 0x00); if ((ret & 0xc800) == 0xc800) break; usleep_range(1000, 2000); } if (time_after(jiffies, timeout)) return -ETIMEDOUT; return 0; } static int mv88e6131_setup_global(struct dsa_switch *ds) { int ret; int i; /* Enable the PHY polling unit, don't discard packets with * excessive collisions, use a weighted fair queueing scheme * to arbitrate between packet queues, set the maximum frame * size to 1632, and mask all interrupt sources. */ REG_WRITE(REG_GLOBAL, 0x04, 0x4400); /* Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. */ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); /* Configure the priority mapping registers. */ ret = mv88e6xxx_config_prio(ds); if (ret < 0) return ret; /* Set the VLAN ethertype to 0x8100. */ REG_WRITE(REG_GLOBAL, 0x19, 0x8100); /* Disable ARP mirroring, and configure the upstream port as * the port to which ingress and egress monitor frames are to * be sent. */ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0); /* Disable cascade port functionality unless this device * is used in a cascade configuration, and set the switch's * DSA device number. */ if (ds->dst->pd->nr_chips > 1) REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f)); else REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f)); /* Send all frames with destination addresses matching * 01:80:c2:00:00:0x to the CPU port. */ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); /* Ignore removed tag data on doubly tagged packets, disable * flow control messages, force flow control priority to the * highest, and send all special multicast frames to the CPU * port at the highest priority. */ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); /* Program the DSA routing table. */ for (i = 0; i < 32; i++) { int nexthop; nexthop = 0x1f; if (i != ds->index && i < ds->dst->pd->nr_chips) nexthop = ds->pd->rtable[i] & 0x1f; REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); } /* Clear all trunk masks. */ for (i = 0; i < 8; i++) REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff); /* Clear all trunk mappings. */ for (i = 0; i < 16; i++) REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); /* Force the priority of IGMP/MLD snoop frames and ARP frames * to the highest setting. */ REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff); return 0; } static int mv88e6131_setup_port(struct dsa_switch *ds, int p) { struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); int addr = REG_PORT(p); u16 val; /* MAC Forcing register: don't force link, speed, duplex * or flow control state to any particular values on physical * ports, but force the CPU port and all DSA ports to 1000 Mb/s * (100 Mb/s on 6085) full duplex. */ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) if (ps->id == ID_6085) REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */ else REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */ else REG_WRITE(addr, 0x01, 0x0003); /* Port Control: disable Core Tag, disable Drop-on-Lock, * transmit frames unmodified, disable Header mode, * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN * tunneling, determine priority by looking at 802.1p and * IP priority fields (IP prio has precedence), and set STP * state to Forwarding. * * If this is the upstream port for this switch, enable * forwarding of unknown unicasts, and enable DSA tagging * mode. * * If this is the link to another switch, use DSA tagging * mode, but do not enable forwarding of unknown unicasts. */ val = 0x0433; if (p == dsa_upstream_port(ds)) { val |= 0x0104; /* On 6085, unknown multicast forward is controlled * here rather than in Port Control 2 register. */ if (ps->id == ID_6085) val |= 0x0008; } if (ds->dsa_port_mask & (1 << p)) val |= 0x0100; REG_WRITE(addr, 0x04, val); /* Port Control 1: disable trunking. Also, if this is the * CPU port, enable learn messages to be sent to this port. */ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); /* Port based VLAN map: give each port its own address * database, allow the CPU port to talk to each of the 'real' * ports, and allow each of the 'real' ports to only talk to * the upstream port. */ val = (p & 0xf) << 12; if (dsa_is_cpu_port(ds, p)) val |= ds->phys_port_mask; else val |= 1 << dsa_upstream_port(ds); REG_WRITE(addr, 0x06, val); /* Default VLAN ID and priority: don't set a default VLAN * ID, and set the default packet priority to zero. */ REG_WRITE(addr, 0x07, 0x0000); /* Port Control 2: don't force a good FCS, don't use * VLAN-based, source address-based or destination * address-based priority overrides, don't let the switch * add or strip 802.1q tags, don't discard tagged or * untagged frames on this port, do a destination address * lookup on received packets as usual, don't send a copy * of all transmitted/received frames on this port to the * CPU, and configure the upstream port number. * * If this is the upstream port for this switch, enable * forwarding of unknown multicast addresses. */ if (ps->id == ID_6085) /* on 6085, bits 3:0 are reserved, bit 6 control ARP * mirroring, and multicast forward is handled in * Port Control register. */ REG_WRITE(addr, 0x08, 0x0080); else { val = 0x0080 | dsa_upstream_port(ds); if (p == dsa_upstream_port(ds)) val |= 0x0040; REG_WRITE(addr, 0x08, val); } /* Rate Control: disable ingress rate limiting. */ REG_WRITE(addr, 0x09, 0x0000); /* Rate Control 2: disable egress rate limiting. */ REG_WRITE(addr, 0x0a, 0x0000); /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using * a port bitmap that has only the bit for this port set and * the other bits clear. */ REG_WRITE(addr, 0x0b, 1 << p); /* Tag Remap: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x18, 0x3210); /* Tag Remap 2: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x19, 0x7654); return 0; } static int mv88e6131_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); int i; int ret; mutex_init(&ps->smi_mutex); mv88e6xxx_ppu_state_init(ds); mutex_init(&ps->stats_mutex); ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; ret = mv88e6131_switch_reset(ds); if (ret < 0) return ret; /* @@@ initialise vtu and atu */ ret = mv88e6131_setup_global(ds); if (ret < 0) return ret; for (i = 0; i < 11; i++) { ret = mv88e6131_setup_port(ds, i); if (ret < 0) return ret; } return 0; } static int mv88e6131_port_to_phy_addr(int port) { if (port >= 0 && port <= 11) return port; return -1; } static int mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum) { int addr = mv88e6131_port_to_phy_addr(port); return mv88e6xxx_phy_read_ppu(ds, addr, regnum); } static int mv88e6131_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { int addr = mv88e6131_port_to_phy_addr(port); return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val); } static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = { { "in_good_octets", 8, 0x00, }, { "in_bad_octets", 4, 0x02, }, { "in_unicast", 4, 0x04, }, { "in_broadcasts", 4, 0x06, }, { "in_multicasts", 4, 0x07, }, { "in_pause", 4, 0x16, }, { "in_undersize", 4, 0x18, }, { "in_fragments", 4, 0x19, }, { "in_oversize", 4, 0x1a, }, { "in_jabber", 4, 0x1b, }, { "in_rx_error", 4, 0x1c, }, { "in_fcs_error", 4, 0x1d, }, { "out_octets", 8, 0x0e, }, { "out_unicast", 4, 0x10, }, { "out_broadcasts", 4, 0x13, }, { "out_multicasts", 4, 0x12, }, { "out_pause", 4, 0x15, }, { "excessive", 4, 0x11, }, { "collisions", 4, 0x1e, }, { "deferred", 4, 0x05, }, { "single", 4, 0x14, }, { "multiple", 4, 0x17, }, { "out_fcs_error", 4, 0x03, }, { "late", 4, 0x1f, }, { "hist_64bytes", 4, 0x08, }, { "hist_65_127bytes", 4, 0x09, }, { "hist_128_255bytes", 4, 0x0a, }, { "hist_256_511bytes", 4, 0x0b, }, { "hist_512_1023bytes", 4, 0x0c, }, { "hist_1024_max_bytes", 4, 0x0d, }, }; static void mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats), mv88e6131_hw_stats, port, data); } static void mv88e6131_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats), mv88e6131_hw_stats, port, data); } static int mv88e6131_get_sset_count(struct dsa_switch *ds) { return ARRAY_SIZE(mv88e6131_hw_stats); } struct dsa_switch_driver mv88e6131_switch_driver = { .tag_protocol = cpu_to_be16(ETH_P_DSA), .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6131_probe, .setup = mv88e6131_setup, .set_addr = mv88e6xxx_set_addr_direct, .phy_read = mv88e6131_phy_read, .phy_write = mv88e6131_phy_write, .poll_link = mv88e6xxx_poll_link, .get_strings = mv88e6131_get_strings, .get_ethtool_stats = mv88e6131_get_ethtool_stats, .get_sset_count = mv88e6131_get_sset_count, }; MODULE_ALIAS("platform:mv88e6085"); MODULE_ALIAS("platform:mv88e6095"); MODULE_ALIAS("platform:mv88e6095f"); MODULE_ALIAS("platform:mv88e6131");
gpl-2.0
mickael-guene/kernel
drivers/staging/vt6656/michael.c
2676
3958
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: michael.cpp * * Purpose: The implementation of LIST data structure. * * Author: Kyle Hsu * * Date: Sep 4, 2002 * * Functions: * s_dwGetUINT32 - Convert from u8[] to u32 in a portable way * s_vPutUINT32 - Convert from u32 to u8[] in a portable way * s_vClear - Reset the state to the empty message. * s_vSetKey - Set the key. * MIC_vInit - Set the key. * s_vAppendByte - Append the byte to our word-sized buffer. * MIC_vAppend - call s_vAppendByte. * MIC_vGetMIC - Append the minimum padding and call s_vAppendByte. * * Revision History: * */ #include "tmacro.h" #include "michael.h" /* * static u32 s_dwGetUINT32(u8 * p); Get u32 from * 4 bytes LSByte first * static void s_vPutUINT32(u8* p, u32 val); Put u32 into * 4 bytes LSByte first */ static void s_vClear(void); /* Clear the internal message, * resets the object to the * state just after construction. */ static void s_vSetKey(u32 dwK0, u32 dwK1); static void s_vAppendByte(u8 b); /* Add a single byte to the internal * message */ static u32 L, R; /* Current state */ static u32 K0, K1; /* Key */ static u32 M; /* Message accumulator (single word) */ static unsigned int nBytesInM; /* # bytes in M */ /* static u32 s_dwGetUINT32 (u8 * p) // Convert from u8[] to u32 in a portable way { u32 res = 0; unsigned int i; for (i = 0; i < 4; i++) res |= (*p++) << (8*i); return res; } static void s_vPutUINT32(u8 *p, u32 val) // Convert from u32 to u8[] in a portable way { unsigned int i; for (i = 0; i < 4; i++) { *p++ = (u8) (val & 0xff); val >>= 8; } } */ static void s_vClear(void) { /* Reset the state to the empty message. */ L = K0; R = K1; nBytesInM = 0; M = 0; } static void s_vSetKey(u32 dwK0, u32 dwK1) { /* Set the key */ K0 = dwK0; K1 = dwK1; /* and reset the message */ s_vClear(); } static void s_vAppendByte(u8 b) { /* Append the byte to our word-sized buffer */ M |= b << (8*nBytesInM); nBytesInM++; /* Process the word if it is full. */ if (nBytesInM >= 4) { L ^= M; R ^= ROL32(L, 17); L += R; R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); L += R; R ^= ROL32(L, 3); L += R; R ^= ROR32(L, 2); L += R; /* Clear the buffer */ M = 0; nBytesInM = 0; } } void MIC_vInit(u32 dwK0, u32 dwK1) { /* Set the key */ s_vSetKey(dwK0, dwK1); } void MIC_vUnInit(void) { /* Wipe the key material */ K0 = 0; K1 = 0; /* And the other fields as well. */ /* Note that this sets (L,R) to (K0,K1) which is just fine. */ s_vClear(); } void MIC_vAppend(u8 * src, unsigned int nBytes) { /* This is simple */ while (nBytes > 0) { s_vAppendByte(*src++); nBytes--; } } void MIC_vGetMIC(u32 * pdwL, u32 * pdwR) { /* Append the minimum padding */ s_vAppendByte(0x5a); s_vAppendByte(0); s_vAppendByte(0); s_vAppendByte(0); s_vAppendByte(0); /* and then zeroes until the length is a multiple of 4 */ while (nBytesInM != 0) s_vAppendByte(0); /* The s_vAppendByte function has already computed the result. */ *pdwL = L; *pdwR = R; /* Reset to the empty message. */ s_vClear(); }
gpl-2.0
droidroidz/d2usc-tw-jb
fs/sysv/inode.c
2932
9858
/* * linux/fs/sysv/inode.c * * minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * xenix/inode.c * Copyright (C) 1992 Doug Evans * * coh/inode.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/inode.c * Copyright (C) 1993 Paul B. Monday * * sysv/inode.c * Copyright (C) 1993 Bruno Haible * Copyright (C) 1997, 1998 Krzysztof G. Baranowski * * This file contains code for allocating/freeing inodes and for read/writing * the superblock. */ #include <linux/highuid.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/writeback.h> #include <linux/namei.h> #include <asm/byteorder.h> #include "sysv.h" static int sysv_sync_fs(struct super_block *sb, int wait) { struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned long time = get_seconds(), old_time; lock_super(sb); /* * If we are going to write out the super block, * then attach current time stamp. * But if the filesystem was marked clean, keep it clean. */ sb->s_dirt = 0; old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); if (sbi->s_type == FSTYPE_SYSV4) { if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time); *sbi->s_sb_time = cpu_to_fs32(sbi, time); mark_buffer_dirty(sbi->s_bh2); } unlock_super(sb); return 0; } static void sysv_write_super(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) sysv_sync_fs(sb, 1); else sb->s_dirt = 0; } static int sysv_remount(struct super_block *sb, int *flags, char *data) { struct sysv_sb_info *sbi = SYSV_SB(sb); lock_super(sb); if (sbi->s_forced_ro) *flags |= MS_RDONLY; if (*flags & MS_RDONLY) sysv_write_super(sb); unlock_super(sb); return 0; } static void sysv_put_super(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sb->s_dirt) sysv_write_super(sb); if (!(sb->s_flags & MS_RDONLY)) { /* XXX ext2 also updates the state here */ mark_buffer_dirty(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) mark_buffer_dirty(sbi->s_bh2); } brelse(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) brelse(sbi->s_bh2); kfree(sbi); } static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->s_ndatazones; buf->f_bavail = buf->f_bfree = sysv_count_free_blocks(sb); buf->f_files = sbi->s_ninodes; buf->f_ffree = sysv_count_free_inodes(sb); buf->f_namelen = SYSV_NAMELEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } /* * NXI <-> N0XI for PDP, XIN <-> XIN0 for le32, NIX <-> 0NIX for be32 */ static inline void read3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = 0; to[2] = from[1]; to[3] = from[2]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; to[3] = 0; } else { to[0] = 0; to[1] = from[0]; to[2] = from[1]; to[3] = from[2]; } } static inline void write3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = from[2]; to[2] = from[3]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; } else { to[0] = from[1]; to[1] = from[2]; to[2] = from[3]; } } static const struct inode_operations sysv_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = sysv_getattr, }; void sysv_set_inode(struct inode *inode, dev_t rdev) { if (S_ISREG(inode->i_mode)) { inode->i_op = &sysv_file_inode_operations; inode->i_fop = &sysv_file_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &sysv_dir_inode_operations; inode->i_fop = &sysv_dir_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISLNK(inode->i_mode)) { if (inode->i_blocks) { inode->i_op = &sysv_symlink_inode_operations; inode->i_mapping->a_ops = &sysv_aops; } else { inode->i_op = &sysv_fast_symlink_inode_operations; nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, sizeof(SYSV_I(inode)->i_data) - 1); } } else init_special_inode(inode, inode->i_mode, rdev); } struct inode *sysv_iget(struct super_block *sb, unsigned int ino) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; struct inode *inode; unsigned int block; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", sb->s_id, ino); return ERR_PTR(-EIO); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("Major problem: unable to read inode from dev %s\n", inode->i_sb->s_id); goto bad_inode; } /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */ inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode); inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid); inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid); inode->i_nlink = fs16_to_cpu(sbi, raw_inode->i_nlink); inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size); inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime); inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime); inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_ctime); inode->i_ctime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_blocks = 0; si = SYSV_I(inode); for (block = 0; block < 10+1+1+1; block++) read3byte(sbi, &raw_inode->i_data[3*block], (u8 *)&si->i_data[block]); brelse(bh); si->i_dir_start_lookup = 0; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) sysv_set_inode(inode, old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); else sysv_set_inode(inode, 0); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(-EIO); } static int __sysv_write_inode(struct inode *inode, int wait) { struct super_block * sb = inode->i_sb; struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; unsigned int ino, block; int err = 0; ino = inode->i_ino; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", inode->i_sb->s_id, ino); return -EIO; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("unable to read i-node block\n"); return -EIO; } raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode); raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid)); raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid)); raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink); raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size); raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec); raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec); raw_inode->i_ctime = cpu_to_fs32(sbi, inode->i_ctime.tv_sec); si = SYSV_I(inode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) si->i_data[0] = cpu_to_fs32(sbi, old_encode_dev(inode->i_rdev)); for (block = 0; block < 10+1+1+1; block++) write3byte(sbi, (u8 *)&si->i_data[block], &raw_inode->i_data[3*block]); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08x]\n", sb->s_id, ino); err = -EIO; } } brelse(bh); return 0; } int sysv_write_inode(struct inode *inode, struct writeback_control *wbc) { return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int sysv_sync_inode(struct inode *inode) { return __sysv_write_inode(inode, 1); } static void sysv_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); if (!inode->i_nlink) { inode->i_size = 0; sysv_truncate(inode); } invalidate_inode_buffers(inode); end_writeback(inode); if (!inode->i_nlink) sysv_free_inode(inode); } static struct kmem_cache *sysv_inode_cachep; static struct inode *sysv_alloc_inode(struct super_block *sb) { struct sysv_inode_info *si; si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL); if (!si) return NULL; return &si->vfs_inode; } static void sysv_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); } static void sysv_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, sysv_i_callback); } static void init_once(void *p) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; inode_init_once(&si->vfs_inode); } const struct super_operations sysv_sops = { .alloc_inode = sysv_alloc_inode, .destroy_inode = sysv_destroy_inode, .write_inode = sysv_write_inode, .evict_inode = sysv_evict_inode, .put_super = sysv_put_super, .write_super = sysv_write_super, .sync_fs = sysv_sync_fs, .remount_fs = sysv_remount, .statfs = sysv_statfs, }; int __init sysv_init_icache(void) { sysv_inode_cachep = kmem_cache_create("sysv_inode_cache", sizeof(struct sysv_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (!sysv_inode_cachep) return -ENOMEM; return 0; } void sysv_destroy_icache(void) { kmem_cache_destroy(sysv_inode_cachep); }
gpl-2.0
gromaudio/linux-imx6
drivers/usb/atm/speedtch.c
3188
29192
/****************************************************************************** * speedtch.c - Alcatel SpeedTouch USB xDSL modem driver * * Copyright (C) 2001, Alcatel * Copyright (C) 2003, Duncan Sands * Copyright (C) 2004, David Woodhouse * * Based on "modem_run.c", copyright (C) 2001, Benoit Papillault * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <asm/page.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/usb/ch9.h> #include <linux/workqueue.h> #include "usbatm.h" #define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>" #define DRIVER_VERSION "1.10" #define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION static const char speedtch_driver_name[] = "speedtch"; #define CTRL_TIMEOUT 2000 /* milliseconds */ #define DATA_TIMEOUT 2000 /* milliseconds */ #define OFFSET_7 0 /* size 1 */ #define OFFSET_b 1 /* size 8 */ #define OFFSET_d 9 /* size 4 */ #define OFFSET_e 13 /* size 1 */ #define OFFSET_f 14 /* size 1 */ #define SIZE_7 1 #define SIZE_b 8 #define SIZE_d 4 #define SIZE_e 1 #define SIZE_f 1 #define MIN_POLL_DELAY 5000 /* milliseconds */ #define MAX_POLL_DELAY 60000 /* milliseconds */ #define RESUBMIT_DELAY 1000 /* milliseconds */ #define DEFAULT_BULK_ALTSETTING 1 #define DEFAULT_ISOC_ALTSETTING 3 #define DEFAULT_DL_512_FIRST 0 #define DEFAULT_ENABLE_ISOC 0 #define DEFAULT_SW_BUFFERING 0 static unsigned int altsetting = 0; /* zero means: use the default */ static int dl_512_first = DEFAULT_DL_512_FIRST; static int enable_isoc = DEFAULT_ENABLE_ISOC; static int sw_buffering = DEFAULT_SW_BUFFERING; #define DEFAULT_B_MAX_DSL 8128 #define DEFAULT_MODEM_MODE 11 #define MODEM_OPTION_LENGTH 16 static const unsigned char DEFAULT_MODEM_OPTION[MODEM_OPTION_LENGTH] = { 0x10, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static unsigned int BMaxDSL = DEFAULT_B_MAX_DSL; static unsigned char ModemMode = DEFAULT_MODEM_MODE; static unsigned char ModemOption[MODEM_OPTION_LENGTH]; static unsigned int num_ModemOption; module_param(altsetting, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(altsetting, "Alternative setting for data interface (bulk_default: " __MODULE_STRING(DEFAULT_BULK_ALTSETTING) "; isoc_default: " __MODULE_STRING(DEFAULT_ISOC_ALTSETTING) ")"); module_param(dl_512_first, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dl_512_first, "Read 512 bytes before sending firmware (default: " __MODULE_STRING(DEFAULT_DL_512_FIRST) ")"); module_param(enable_isoc, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(enable_isoc, "Use isochronous transfers if available (default: " __MODULE_STRING(DEFAULT_ENABLE_ISOC) ")"); module_param(sw_buffering, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(sw_buffering, "Enable software buffering (default: " __MODULE_STRING(DEFAULT_SW_BUFFERING) ")"); module_param(BMaxDSL, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(BMaxDSL, "default: " __MODULE_STRING(DEFAULT_B_MAX_DSL)); module_param(ModemMode, byte, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ModemMode, "default: " __MODULE_STRING(DEFAULT_MODEM_MODE)); module_param_array(ModemOption, byte, &num_ModemOption, S_IRUGO); MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20"); #define INTERFACE_DATA 1 #define ENDPOINT_INT 0x81 #define ENDPOINT_BULK_DATA 0x07 #define ENDPOINT_ISOC_DATA 0x07 #define ENDPOINT_FIRMWARE 0x05 struct speedtch_params { unsigned int altsetting; unsigned int BMaxDSL; unsigned char ModemMode; unsigned char ModemOption[MODEM_OPTION_LENGTH]; }; struct speedtch_instance_data { struct usbatm_data *usbatm; struct speedtch_params params; /* set in probe, constant afterwards */ struct timer_list status_check_timer; struct work_struct status_check_work; unsigned char last_status; int poll_delay; /* milliseconds */ struct timer_list resubmit_timer; struct urb *int_urb; unsigned char int_data[16]; unsigned char scratch_buffer[16]; }; /*************** ** firmware ** ***************/ static void speedtch_set_swbuff(struct speedtch_instance_data *instance, int state) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; int ret; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x32, 0x40, state ? 0x01 : 0x00, 0x00, NULL, 0, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%sabling SW buffering: usb_control_msg returned %d\n", state ? "En" : "Dis", ret); else dbg("speedtch_set_swbuff: %sbled SW buffering", state ? "En" : "Dis"); } static void speedtch_test_sequence(struct speedtch_instance_data *instance) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; unsigned char *buf = instance->scratch_buffer; int ret; /* URB 147 */ buf[0] = 0x1c; buf[1] = 0x50; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x0b, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB147: %d\n", __func__, ret); /* URB 148 */ buf[0] = 0x32; buf[1] = 0x00; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x02, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB148: %d\n", __func__, ret); /* URB 149 */ buf[0] = 0x01; buf[1] = 0x00; buf[2] = 0x01; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x03, 0x00, buf, 3, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB149: %d\n", __func__, ret); /* URB 150 */ buf[0] = 0x01; buf[1] = 0x00; buf[2] = 0x01; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x04, 0x00, buf, 3, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB150: %d\n", __func__, ret); /* Extra initialisation in recent drivers - gives higher speeds */ /* URBext1 */ buf[0] = instance->params.ModemMode; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x11, 0x00, buf, 1, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URBext1: %d\n", __func__, ret); /* URBext2 */ /* This seems to be the one which actually triggers the higher sync rate -- it does require the new firmware too, although it works OK with older firmware */ ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x14, 0x00, instance->params.ModemOption, MODEM_OPTION_LENGTH, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URBext2: %d\n", __func__, ret); /* URBext3 */ buf[0] = instance->params.BMaxDSL & 0xff; buf[1] = instance->params.BMaxDSL >> 8; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x12, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URBext3: %d\n", __func__, ret); } static int speedtch_upload_firmware(struct speedtch_instance_data *instance, const struct firmware *fw1, const struct firmware *fw2) { unsigned char *buffer; struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; int actual_length; int ret = 0; int offset; usb_dbg(usbatm, "%s entered\n", __func__); if (!(buffer = (unsigned char *)__get_free_page(GFP_KERNEL))) { ret = -ENOMEM; usb_dbg(usbatm, "%s: no memory for buffer!\n", __func__); goto out; } if (!usb_ifnum_to_if(usb_dev, 2)) { ret = -ENODEV; usb_dbg(usbatm, "%s: interface not found!\n", __func__); goto out_free; } /* URB 7 */ if (dl_512_first) { /* some modems need a read before writing the firmware */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, 0x200, &actual_length, 2000); if (ret < 0 && ret != -ETIMEDOUT) usb_warn(usbatm, "%s: read BLOCK0 from modem failed (%d)!\n", __func__, ret); else usb_dbg(usbatm, "%s: BLOCK0 downloaded (%d bytes)\n", __func__, ret); } /* URB 8 : both leds are static green */ for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) { int thislen = min_t(int, PAGE_SIZE, fw1->size - offset); memcpy(buffer, fw1->data + offset, thislen); ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, thislen, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: write BLOCK1 to modem failed (%d)!\n", __func__, ret); goto out_free; } usb_dbg(usbatm, "%s: BLOCK1 uploaded (%zu bytes)\n", __func__, fw1->size); } /* USB led blinking green, ADSL led off */ /* URB 11 */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, 0x200, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: read BLOCK2 from modem failed (%d)!\n", __func__, ret); goto out_free; } usb_dbg(usbatm, "%s: BLOCK2 downloaded (%d bytes)\n", __func__, actual_length); /* URBs 12 to 139 - USB led blinking green, ADSL led off */ for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) { int thislen = min_t(int, PAGE_SIZE, fw2->size - offset); memcpy(buffer, fw2->data + offset, thislen); ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, thislen, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: write BLOCK3 to modem failed (%d)!\n", __func__, ret); goto out_free; } } usb_dbg(usbatm, "%s: BLOCK3 uploaded (%zu bytes)\n", __func__, fw2->size); /* USB led static green, ADSL led static red */ /* URB 142 */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, 0x200, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: read BLOCK4 from modem failed (%d)!\n", __func__, ret); goto out_free; } /* success */ usb_dbg(usbatm, "%s: BLOCK4 downloaded (%d bytes)\n", __func__, actual_length); /* Delay to allow firmware to start up. We can do this here because we're in our own kernel thread anyway. */ msleep_interruptible(1000); if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) { usb_err(usbatm, "%s: setting interface to %d failed (%d)!\n", __func__, instance->params.altsetting, ret); goto out_free; } /* Enable software buffering, if requested */ if (sw_buffering) speedtch_set_swbuff(instance, 1); /* Magic spell; don't ask us what this does */ speedtch_test_sequence(instance); ret = 0; out_free: free_page((unsigned long)buffer); out: return ret; } static int speedtch_find_firmware(struct usbatm_data *usbatm, struct usb_interface *intf, int phase, const struct firmware **fw_p) { struct device *dev = &intf->dev; const u16 bcdDevice = le16_to_cpu(interface_to_usbdev(intf)->descriptor.bcdDevice); const u8 major_revision = bcdDevice >> 8; const u8 minor_revision = bcdDevice & 0xff; char buf[24]; sprintf(buf, "speedtch-%d.bin.%x.%02x", phase, major_revision, minor_revision); usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf); if (request_firmware(fw_p, buf, dev)) { sprintf(buf, "speedtch-%d.bin.%x", phase, major_revision); usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf); if (request_firmware(fw_p, buf, dev)) { sprintf(buf, "speedtch-%d.bin", phase); usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf); if (request_firmware(fw_p, buf, dev)) { usb_err(usbatm, "%s: no stage %d firmware found!\n", __func__, phase); return -ENOENT; } } } usb_info(usbatm, "found stage %d firmware %s\n", phase, buf); return 0; } static int speedtch_heavy_init(struct usbatm_data *usbatm, struct usb_interface *intf) { const struct firmware *fw1, *fw2; struct speedtch_instance_data *instance = usbatm->driver_data; int ret; if ((ret = speedtch_find_firmware(usbatm, intf, 1, &fw1)) < 0) return ret; if ((ret = speedtch_find_firmware(usbatm, intf, 2, &fw2)) < 0) { release_firmware(fw1); return ret; } if ((ret = speedtch_upload_firmware(instance, fw1, fw2)) < 0) usb_err(usbatm, "%s: firmware upload failed (%d)!\n", __func__, ret); release_firmware(fw2); release_firmware(fw1); return ret; } /********** ** ATM ** **********/ static int speedtch_read_status(struct speedtch_instance_data *instance) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; unsigned char *buf = instance->scratch_buffer; int ret; memset(buf, 0, 16); ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x07, 0x00, buf + OFFSET_7, SIZE_7, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG 7 failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x0b, 0x00, buf + OFFSET_b, SIZE_b, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG B failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x0d, 0x00, buf + OFFSET_d, SIZE_d, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG D failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x01, 0xc0, 0x0e, 0x00, buf + OFFSET_e, SIZE_e, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG E failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x01, 0xc0, 0x0f, 0x00, buf + OFFSET_f, SIZE_f, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG F failed\n", __func__); return ret; } return 0; } static int speedtch_start_synchro(struct speedtch_instance_data *instance) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; unsigned char *buf = instance->scratch_buffer; int ret; atm_dbg(usbatm, "%s entered\n", __func__); memset(buf, 0, 2); ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x04, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) atm_warn(usbatm, "failed to start ADSL synchronisation: %d\n", ret); else atm_dbg(usbatm, "%s: modem prodded. %d bytes returned: %02x %02x\n", __func__, ret, buf[0], buf[1]); return ret; } static void speedtch_check_status(struct work_struct *work) { struct speedtch_instance_data *instance = container_of(work, struct speedtch_instance_data, status_check_work); struct usbatm_data *usbatm = instance->usbatm; struct atm_dev *atm_dev = usbatm->atm_dev; unsigned char *buf = instance->scratch_buffer; int down_speed, up_speed, ret; unsigned char status; #ifdef VERBOSE_DEBUG atm_dbg(usbatm, "%s entered\n", __func__); #endif ret = speedtch_read_status(instance); if (ret < 0) { atm_warn(usbatm, "error %d fetching device status\n", ret); instance->poll_delay = min(2 * instance->poll_delay, MAX_POLL_DELAY); return; } instance->poll_delay = max(instance->poll_delay / 2, MIN_POLL_DELAY); status = buf[OFFSET_7]; if ((status != instance->last_status) || !status) { atm_dbg(usbatm, "%s: line state 0x%02x\n", __func__, status); switch (status) { case 0: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); if (instance->last_status) atm_info(usbatm, "ADSL line is down\n"); /* It may never resync again unless we ask it to... */ ret = speedtch_start_synchro(instance); break; case 0x08: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "ADSL line is blocked?\n"); break; case 0x10: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line is synchronising\n"); break; case 0x20: down_speed = buf[OFFSET_b] | (buf[OFFSET_b + 1] << 8) | (buf[OFFSET_b + 2] << 16) | (buf[OFFSET_b + 3] << 24); up_speed = buf[OFFSET_b + 4] | (buf[OFFSET_b + 5] << 8) | (buf[OFFSET_b + 6] << 16) | (buf[OFFSET_b + 7] << 24); if (!(down_speed & 0x0000ffff) && !(up_speed & 0x0000ffff)) { down_speed >>= 16; up_speed >>= 16; } atm_dev->link_rate = down_speed * 1000 / 424; atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND); atm_info(usbatm, "ADSL line is up (%d kb/s down | %d kb/s up)\n", down_speed, up_speed); break; default: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "unknown line state %02x\n", status); break; } instance->last_status = status; } } static void speedtch_status_poll(unsigned long data) { struct speedtch_instance_data *instance = (void *)data; schedule_work(&instance->status_check_work); /* The following check is racy, but the race is harmless */ if (instance->poll_delay < MAX_POLL_DELAY) mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(instance->poll_delay)); else atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n"); } static void speedtch_resubmit_int(unsigned long data) { struct speedtch_instance_data *instance = (void *)data; struct urb *int_urb = instance->int_urb; int ret; atm_dbg(instance->usbatm, "%s entered\n", __func__); if (int_urb) { ret = usb_submit_urb(int_urb, GFP_ATOMIC); if (!ret) schedule_work(&instance->status_check_work); else { atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); } } } static void speedtch_handle_int(struct urb *int_urb) { struct speedtch_instance_data *instance = int_urb->context; struct usbatm_data *usbatm = instance->usbatm; unsigned int count = int_urb->actual_length; int status = int_urb->status; int ret; /* The magic interrupt for "up state" */ static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 }; /* The magic interrupt for "down state" */ static const unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 }; atm_dbg(usbatm, "%s entered\n", __func__); if (status < 0) { atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status); goto fail; } if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) { del_timer(&instance->status_check_timer); atm_info(usbatm, "DSL line goes up\n"); } else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) { atm_info(usbatm, "DSL line goes down\n"); } else { int i; atm_dbg(usbatm, "%s: unknown interrupt packet of length %d:", __func__, count); for (i = 0; i < count; i++) printk(" %02x", instance->int_data[i]); printk("\n"); goto fail; } if ((int_urb = instance->int_urb)) { ret = usb_submit_urb(int_urb, GFP_ATOMIC); schedule_work(&instance->status_check_work); if (ret < 0) { atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); goto fail; } } return; fail: if ((int_urb = instance->int_urb)) mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); } static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_dev) { struct usb_device *usb_dev = usbatm->usb_dev; struct speedtch_instance_data *instance = usbatm->driver_data; int i, ret; unsigned char mac_str[13]; atm_dbg(usbatm, "%s entered\n", __func__); /* Set MAC address, it is stored in the serial number */ memset(atm_dev->esi, 0, sizeof(atm_dev->esi)); if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) { for (i = 0; i < 6; i++) atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) + hex_to_bin(mac_str[i * 2 + 1]); } /* Start modem synchronisation */ ret = speedtch_start_synchro(instance); /* Set up interrupt endpoint */ if (instance->int_urb) { ret = usb_submit_urb(instance->int_urb, GFP_KERNEL); if (ret < 0) { /* Doesn't matter; we'll poll anyway */ atm_dbg(usbatm, "%s: submission of interrupt URB failed (%d)!\n", __func__, ret); usb_free_urb(instance->int_urb); instance->int_urb = NULL; } } /* Start status polling */ mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(1000)); return 0; } static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_dev) { struct speedtch_instance_data *instance = usbatm->driver_data; struct urb *int_urb = instance->int_urb; atm_dbg(usbatm, "%s entered\n", __func__); del_timer_sync(&instance->status_check_timer); /* * Since resubmit_timer and int_urb can schedule themselves and * each other, shutting them down correctly takes some care */ instance->int_urb = NULL; /* signal shutdown */ mb(); usb_kill_urb(int_urb); del_timer_sync(&instance->resubmit_timer); /* * At this point, speedtch_handle_int and speedtch_resubmit_int * can run or be running, but instance->int_urb == NULL means that * they will not reschedule */ usb_kill_urb(int_urb); del_timer_sync(&instance->resubmit_timer); usb_free_urb(int_urb); flush_work_sync(&instance->status_check_work); } static int speedtch_pre_reset(struct usb_interface *intf) { return 0; } static int speedtch_post_reset(struct usb_interface *intf) { return 0; } /********** ** USB ** **********/ static struct usb_device_id speedtch_usb_ids[] = { {USB_DEVICE(0x06b9, 0x4061)}, {} }; MODULE_DEVICE_TABLE(usb, speedtch_usb_ids); static int speedtch_usb_probe(struct usb_interface *, const struct usb_device_id *); static struct usb_driver speedtch_usb_driver = { .name = speedtch_driver_name, .probe = speedtch_usb_probe, .disconnect = usbatm_usb_disconnect, .pre_reset = speedtch_pre_reset, .post_reset = speedtch_post_reset, .id_table = speedtch_usb_ids }; static void speedtch_release_interfaces(struct usb_device *usb_dev, int num_interfaces) { struct usb_interface *cur_intf; int i; for (i = 0; i < num_interfaces; i++) if ((cur_intf = usb_ifnum_to_if(usb_dev, i))) { usb_set_intfdata(cur_intf, NULL); usb_driver_release_interface(&speedtch_usb_driver, cur_intf); } } static int speedtch_bind(struct usbatm_data *usbatm, struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_interface *cur_intf, *data_intf; struct speedtch_instance_data *instance; int ifnum = intf->altsetting->desc.bInterfaceNumber; int num_interfaces = usb_dev->actconfig->desc.bNumInterfaces; int i, ret; int use_isoc; usb_dbg(usbatm, "%s entered\n", __func__); /* sanity checks */ if (usb_dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) { usb_err(usbatm, "%s: wrong device class %d\n", __func__, usb_dev->descriptor.bDeviceClass); return -ENODEV; } if (!(data_intf = usb_ifnum_to_if(usb_dev, INTERFACE_DATA))) { usb_err(usbatm, "%s: data interface not found!\n", __func__); return -ENODEV; } /* claim all interfaces */ for (i = 0; i < num_interfaces; i++) { cur_intf = usb_ifnum_to_if(usb_dev, i); if ((i != ifnum) && cur_intf) { ret = usb_driver_claim_interface(&speedtch_usb_driver, cur_intf, usbatm); if (ret < 0) { usb_err(usbatm, "%s: failed to claim interface %2d (%d)!\n", __func__, i, ret); speedtch_release_interfaces(usb_dev, i); return ret; } } } instance = kzalloc(sizeof(*instance), GFP_KERNEL); if (!instance) { usb_err(usbatm, "%s: no memory for instance data!\n", __func__); ret = -ENOMEM; goto fail_release; } instance->usbatm = usbatm; /* module parameters may change at any moment, so take a snapshot */ instance->params.altsetting = altsetting; instance->params.BMaxDSL = BMaxDSL; instance->params.ModemMode = ModemMode; memcpy(instance->params.ModemOption, DEFAULT_MODEM_OPTION, MODEM_OPTION_LENGTH); memcpy(instance->params.ModemOption, ModemOption, num_ModemOption); use_isoc = enable_isoc; if (instance->params.altsetting) if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) { usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, instance->params.altsetting, ret); instance->params.altsetting = 0; /* fall back to default */ } if (!instance->params.altsetting && use_isoc) if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_ISOC_ALTSETTING)) < 0) { usb_dbg(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_ISOC_ALTSETTING, ret); use_isoc = 0; /* fall back to bulk */ } if (use_isoc) { const struct usb_host_interface *desc = data_intf->cur_altsetting; const __u8 target_address = USB_DIR_IN | usbatm->driver->isoc_in; use_isoc = 0; /* fall back to bulk if endpoint not found */ for (i = 0; i < desc->desc.bNumEndpoints; i++) { const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc; if ((endpoint_desc->bEndpointAddress == target_address)) { use_isoc = usb_endpoint_xfer_isoc(endpoint_desc); break; } } if (!use_isoc) usb_info(usbatm, "isochronous transfer not supported - using bulk\n"); } if (!use_isoc && !instance->params.altsetting) if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_BULK_ALTSETTING)) < 0) { usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_BULK_ALTSETTING, ret); goto fail_free; } if (!instance->params.altsetting) instance->params.altsetting = use_isoc ? DEFAULT_ISOC_ALTSETTING : DEFAULT_BULK_ALTSETTING; usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); INIT_WORK(&instance->status_check_work, speedtch_check_status); init_timer(&instance->status_check_timer); instance->status_check_timer.function = speedtch_status_poll; instance->status_check_timer.data = (unsigned long)instance; instance->last_status = 0xff; instance->poll_delay = MIN_POLL_DELAY; init_timer(&instance->resubmit_timer); instance->resubmit_timer.function = speedtch_resubmit_int; instance->resubmit_timer.data = (unsigned long)instance; instance->int_urb = usb_alloc_urb(0, GFP_KERNEL); if (instance->int_urb) usb_fill_int_urb(instance->int_urb, usb_dev, usb_rcvintpipe(usb_dev, ENDPOINT_INT), instance->int_data, sizeof(instance->int_data), speedtch_handle_int, instance, 50); else usb_dbg(usbatm, "%s: no memory for interrupt urb!\n", __func__); /* check whether the modem already seems to be alive */ ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x07, 0x00, instance->scratch_buffer + OFFSET_7, SIZE_7, 500); usbatm->flags |= (ret == SIZE_7 ? UDSL_SKIP_HEAVY_INIT : 0); usb_dbg(usbatm, "%s: firmware %s loaded\n", __func__, usbatm->flags & UDSL_SKIP_HEAVY_INIT ? "already" : "not"); if (!(usbatm->flags & UDSL_SKIP_HEAVY_INIT)) if ((ret = usb_reset_device(usb_dev)) < 0) { usb_err(usbatm, "%s: device reset failed (%d)!\n", __func__, ret); goto fail_free; } usbatm->driver_data = instance; return 0; fail_free: usb_free_urb(instance->int_urb); kfree(instance); fail_release: speedtch_release_interfaces(usb_dev, num_interfaces); return ret; } static void speedtch_unbind(struct usbatm_data *usbatm, struct usb_interface *intf) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct speedtch_instance_data *instance = usbatm->driver_data; usb_dbg(usbatm, "%s entered\n", __func__); speedtch_release_interfaces(usb_dev, usb_dev->actconfig->desc.bNumInterfaces); usb_free_urb(instance->int_urb); kfree(instance); } /*********** ** init ** ***********/ static struct usbatm_driver speedtch_usbatm_driver = { .driver_name = speedtch_driver_name, .bind = speedtch_bind, .heavy_init = speedtch_heavy_init, .unbind = speedtch_unbind, .atm_start = speedtch_atm_start, .atm_stop = speedtch_atm_stop, .bulk_in = ENDPOINT_BULK_DATA, .bulk_out = ENDPOINT_BULK_DATA, .isoc_in = ENDPOINT_ISOC_DATA }; static int speedtch_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver); } static int __init speedtch_usb_init(void) { dbg("%s: driver version %s", __func__, DRIVER_VERSION); return usb_register(&speedtch_usb_driver); } static void __exit speedtch_usb_cleanup(void) { dbg("%s", __func__); usb_deregister(&speedtch_usb_driver); } module_init(speedtch_usb_init); module_exit(speedtch_usb_cleanup); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION);
gpl-2.0
mereck/os-fork
drivers/hwmon/m_adcproc.c
3444
11564
/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/msm_adc.h> #define KELVINMIL_DEGMIL 273160 static const struct adc_map_pt adcmap_batttherm[] = { {2020, -30}, {1923, -20}, {1796, -10}, {1640, 0}, {1459, 10}, {1260, 20}, {1159, 25}, {1059, 30}, {871, 40}, {706, 50}, {567, 60}, {453, 70}, {364, 80} }; static const struct adc_map_pt adcmap_msmtherm[] = { {2150, -30}, {2107, -20}, {2037, -10}, {1929, 0}, {1776, 10}, {1579, 20}, {1467, 25}, {1349, 30}, {1108, 40}, {878, 50}, {677, 60}, {513, 70}, {385, 80}, {287, 90}, {215, 100}, {186, 110}, {107, 120} }; static const struct adc_map_pt adcmap_ntcg104ef104fb[] = { {696483, -40960}, {649148, -39936}, {605368, -38912}, {564809, -37888}, {527215, -36864}, {492322, -35840}, {460007, -34816}, {429982, -33792}, {402099, -32768}, {376192, -31744}, {352075, -30720}, {329714, -29696}, {308876, -28672}, {289480, -27648}, {271417, -26624}, {254574, -25600}, {238903, -24576}, {224276, -23552}, {210631, -22528}, {197896, -21504}, {186007, -20480}, {174899, -19456}, {164521, -18432}, {154818, -17408}, {145744, -16384}, {137265, -15360}, {129307, -14336}, {121866, -13312}, {114896, -12288}, {108365, -11264}, {102252, -10240}, {96499, -9216}, {91111, -8192}, {86055, -7168}, {81308, -6144}, {76857, -5120}, {72660, -4096}, {68722, -3072}, {65020, -2048}, {61538, -1024}, {58261, 0}, {55177, 1024}, {52274, 2048}, {49538, 3072}, {46962, 4096}, {44531, 5120}, {42243, 6144}, {40083, 7168}, {38045, 8192}, {36122, 9216}, {34308, 10240}, {32592, 11264}, {30972, 12288}, {29442, 13312}, {27995, 14336}, {26624, 15360}, {25333, 16384}, {24109, 17408}, {22951, 18432}, {21854, 19456}, {20807, 20480}, {19831, 21504}, {18899, 22528}, {18016, 23552}, {17178, 24576}, {16384, 25600}, {15631, 26624}, {14916, 27648}, {14237, 28672}, {13593, 29696}, {12976, 30720}, {12400, 31744}, {11848, 32768}, {11324, 33792}, {10825, 34816}, {10354, 35840}, {9900, 36864}, {9471, 37888}, {9062, 38912}, {8674, 39936}, {8306, 40960}, {7951, 41984}, {7616, 43008}, {7296, 44032}, {6991, 45056}, {6701, 46080}, {6424, 47104}, {6160, 48128}, {5908, 49152}, {5667, 50176}, {5439, 51200}, {5219, 52224}, {5010, 53248}, {4810, 54272}, {4619, 55296}, {4440, 56320}, {4263, 57344}, {4097, 58368}, {3938, 59392}, {3785, 60416}, {3637, 61440}, {3501, 62464}, {3368, 63488}, {3240, 64512}, {3118, 65536}, {2998, 66560}, {2889, 67584}, {2782, 68608}, {2680, 69632}, {2581, 70656}, {2490, 71680}, {2397, 72704}, {2310, 73728}, {2227, 74752}, {2147, 75776}, {2064, 76800}, {1998, 77824}, {1927, 78848}, {1860, 79872}, {1795, 80896}, {1736, 81920}, {1673, 82944}, {1615, 83968}, {1560, 84992}, {1507, 86016}, {1456, 87040}, {1407, 88064}, {1360, 89088}, {1314, 90112}, {1271, 91136}, {1228, 92160}, {1189, 93184}, {1150, 94208}, {1112, 95232}, {1076, 96256}, {1042, 97280}, {1008, 98304}, {976, 99328}, {945, 100352}, {915, 101376}, {886, 102400}, {859, 103424}, {832, 104448}, {807, 105472}, {782, 106496}, {756, 107520}, {735, 108544}, {712, 109568}, {691, 110592}, {670, 111616}, {650, 112640}, {631, 113664}, {612, 114688}, {594, 115712}, {577, 116736}, {560, 117760}, {544, 118784}, {528, 119808}, {513, 120832}, {498, 121856}, {483, 122880}, {470, 123904}, {457, 124928}, {444, 125952}, {431, 126976}, {419, 128000} }; static int32_t adc_map_linear(const struct adc_map_pt *pts, uint32_t tablesize, int32_t input, int64_t *output) { bool descending = 1; uint32_t i = 0; if ((pts == NULL) || (output == NULL)) return -EINVAL; /* Check if table is descending or ascending */ if (tablesize > 1) { if (pts[0].x < pts[1].x) descending = 0; } while (i < tablesize) { if ((descending == 1) && (pts[i].x < input)) { /* table entry is less than measured value and table is descending, stop */ break; } else if ((descending == 0) && (pts[i].x > input)) { /* table entry is greater than measured value and table is ascending, stop */ break; } else i++; } if (i == 0) *output = pts[0].y; else if (i == tablesize) *output = pts[tablesize-1].y; else { /* result is between search_index and search_index-1 */ /* interpolate linearly */ *output = (((int32_t) ((pts[i].y - pts[i-1].y)* (input - pts[i-1].x))/ (pts[i].x - pts[i-1].x))+ pts[i-1].y); } return 0; } int32_t scale_default(int32_t adc_code, const struct adc_properties *adc_properties, const struct chan_properties *chan_properties, struct adc_chan_result *adc_chan_result) { bool negative_rawfromoffset = 0; int32_t rawfromoffset = adc_code - chan_properties->adc_graph->offset; if (!chan_properties->gain_numerator || !chan_properties->gain_denominator) return -EINVAL; adc_chan_result->adc_code = adc_code; if (rawfromoffset < 0) { if (adc_properties->bipolar) { rawfromoffset = (rawfromoffset ^ -1) + 1; negative_rawfromoffset = 1; } else rawfromoffset = 0; } if (rawfromoffset >= 1 << adc_properties->bitresolution) rawfromoffset = (1 << adc_properties->bitresolution) - 1; adc_chan_result->measurement = (int64_t)rawfromoffset* chan_properties->adc_graph->dx* chan_properties->gain_denominator; /* do_div only perform positive integer division! */ do_div(adc_chan_result->measurement, chan_properties->adc_graph->dy* chan_properties->gain_numerator); if (negative_rawfromoffset) adc_chan_result->measurement = (adc_chan_result->measurement ^ -1) + 1; /* Note: adc_chan_result->measurement is in the unit of * adc_properties.adc_reference. For generic channel processing, * channel measurement is a scale/ratio relative to the adc * reference input */ adc_chan_result->physical = (int32_t) adc_chan_result->measurement; return 0; } int32_t scale_batt_therm(int32_t adc_code, const struct adc_properties *adc_properties, const struct chan_properties *chan_properties, struct adc_chan_result *adc_chan_result) { scale_default(adc_code, adc_properties, chan_properties, adc_chan_result); /* convert mV ---> degC using the table */ return adc_map_linear( adcmap_batttherm, sizeof(adcmap_batttherm)/sizeof(adcmap_batttherm[0]), adc_chan_result->physical, &adc_chan_result->physical); } int32_t scale_msm_therm(int32_t adc_code, const struct adc_properties *adc_properties, const struct chan_properties *chan_properties, struct adc_chan_result *adc_chan_result) { scale_default(adc_code, adc_properties, chan_properties, adc_chan_result); /* convert mV ---> degC using the table */ return adc_map_linear( adcmap_msmtherm, sizeof(adcmap_msmtherm)/sizeof(adcmap_msmtherm[0]), adc_chan_result->physical, &adc_chan_result->physical); } int32_t scale_pmic_therm(int32_t adc_code, const struct adc_properties *adc_properties, const struct chan_properties *chan_properties, struct adc_chan_result *adc_chan_result) { /* 2mV/K */ int32_t rawfromoffset = adc_code - chan_properties->adc_graph->offset; if (!chan_properties->gain_numerator || !chan_properties->gain_denominator) return -EINVAL; adc_chan_result->adc_code = adc_code; if (rawfromoffset > 0) { if (rawfromoffset >= 1 << adc_properties->bitresolution) rawfromoffset = (1 << adc_properties->bitresolution) - 1; adc_chan_result->measurement = (int64_t)rawfromoffset* chan_properties->adc_graph->dx* chan_properties->gain_denominator*1000; do_div(adc_chan_result->measurement, chan_properties->adc_graph->dy* chan_properties->gain_numerator*2); } else { adc_chan_result->measurement = 0; } /* Note: adc_chan_result->measurement is in the unit of adc_properties.adc_reference */ adc_chan_result->physical = (int32_t)adc_chan_result->measurement; /* Change to .001 deg C */ adc_chan_result->physical -= KELVINMIL_DEGMIL; adc_chan_result->measurement <<= 1; return 0; } /* Scales the ADC code to 0.001 degrees C using the map * table for the XO thermistor. */ int32_t tdkntcgtherm(int32_t adc_code, const struct adc_properties *adc_properties, const struct chan_properties *chan_properties, struct adc_chan_result *adc_chan_result) { int32_t offset = chan_properties->adc_graph->offset, dy = chan_properties->adc_graph->dy, dx = chan_properties->adc_graph->dx, fullscale_calibrated_adc_code; uint32_t rt_r25; uint32_t num1, num2, denom; adc_chan_result->adc_code = adc_code; fullscale_calibrated_adc_code = dy + offset; /* The above is a short cut in math that would reduce a lot of computation whereas the below expression (adc_properties->adc_reference*dy+dx*offset+(dx>>1))/dx is a more generic formula when the 2 reference voltages are different than 0 and full scale voltage. */ if ((dy == 0) || (dx == 0) || (offset >= fullscale_calibrated_adc_code)) { return -EINVAL; } else { if (adc_code >= fullscale_calibrated_adc_code) { rt_r25 = (uint32_t)-1; } else if (adc_code <= offset) { rt_r25 = 0; } else { /* The formula used is (adc_code of current reading - offset)/ * (the calibrated fullscale adc code - adc_code of current reading). * For this channel, at this time, chan_properties->gain_numerator = * chan_properties->gain_denominator = 1, so no need to incorporate * into the formula even though we could and multiply/divide by 1 * which yields the same result but expensive on computation. */ num1 = (adc_code - offset) << 14; num2 = (fullscale_calibrated_adc_code - adc_code) >> 1; denom = fullscale_calibrated_adc_code - adc_code; if ((int)denom <= 0) rt_r25 = 0x7FFFFFFF; else rt_r25 = (num1 + num2) / denom; } if (rt_r25 > 0x7FFFFFFF) rt_r25 = 0x7FFFFFFF; adc_map_linear(adcmap_ntcg104ef104fb, sizeof(adcmap_ntcg104ef104fb)/sizeof(adcmap_ntcg104ef104fb[0]), (int32_t)rt_r25, &adc_chan_result->physical); } return 0; } int32_t scale_xtern_chgr_cur(int32_t adc_code, const struct adc_properties *adc_properties, const struct chan_properties *chan_properties, struct adc_chan_result *adc_chan_result) { int32_t rawfromoffset = adc_code - chan_properties->adc_graph->offset; if (!chan_properties->gain_numerator || !chan_properties->gain_denominator) return -EINVAL; adc_chan_result->adc_code = adc_code; if (rawfromoffset > 0) { if (rawfromoffset >= 1 << adc_properties->bitresolution) rawfromoffset = (1 << adc_properties->bitresolution) - 1; adc_chan_result->measurement = ((int64_t)rawfromoffset * 5)* chan_properties->adc_graph->dx* chan_properties->gain_denominator; do_div(adc_chan_result->measurement, chan_properties->adc_graph->dy* chan_properties->gain_numerator); } else { adc_chan_result->measurement = 0; } adc_chan_result->physical = (int32_t) adc_chan_result->measurement; return 0; }
gpl-2.0
wulsic/android_kernel_samsung_kylevess
drivers/staging/speakup/speakup_soft.c
3956
9772
/* speakup_soft.c - speakup driver to register and make available * a user space device for software synthesizers. written by: Kirk * Reiser <kirk@braille.uwo.ca> * * Copyright (C) 2003 Kirk Reiser. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ #include <linux/unistd.h> #include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */ #include <linux/poll.h> /* for poll_wait() */ #include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */ #include "spk_priv.h" #include "speakup.h" #define DRV_VERSION "2.6" #define SOFTSYNTH_MINOR 26 /* might as well give it one more than /dev/synth */ #define PROCSPEECH 0x0d #define CLEAR_SYNTH 0x18 static int softsynth_probe(struct spk_synth *synth); static void softsynth_release(void); static int softsynth_is_alive(struct spk_synth *synth); static unsigned char get_index(void); static struct miscdevice synth_device; static int initialized; static int misc_registered; static struct var_t vars[] = { { CAPS_START, .u.s = {"\x01+3p" } }, { CAPS_STOP, .u.s = {"\x01-3p" } }, { RATE, .u.n = {"\x01%ds", 5, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/soft. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute freq_attribute = __ATTR(freq, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); /* * We should uncomment the following definition, when we agree on a * method of passing a language designation to the software synthesizer. * static struct kobj_attribute lang_attribute = * __ATTR(lang, USER_RW, spk_var_show, spk_var_store); */ static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &freq_attribute.attr, /* &lang_attribute.attr, */ &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_soft = { .name = "soft", .version = DRV_VERSION, .long_name = "software synth", .init = "\01@\x01\x31y\n", .procspeech = PROCSPEECH, .delay = 0, .trigger = 0, .jiffies = 0, .full = 0, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = softsynth_probe, .release = softsynth_release, .synth_immediate = NULL, .catch_up = NULL, .flush = NULL, .is_alive = softsynth_is_alive, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = get_index, .indexing = { .command = "\x01%di", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "soft", }, }; static char *get_initstring(void) { static char buf[40]; char *cp; struct var_t *var; memset(buf, 0, sizeof(buf)); cp = buf; var = synth_soft.vars; while (var->var_id != MAXVARS) { if (var->var_id != CAPS_START && var->var_id != CAPS_STOP && var->var_id != DIRECT) cp = cp + sprintf(cp, var->u.n.synth_fmt, var->u.n.value); var++; } cp = cp + sprintf(cp, "\n"); return buf; } static int softsynth_open(struct inode *inode, struct file *fp) { unsigned long flags; /*if ((fp->f_flags & O_ACCMODE) != O_RDONLY) */ /* return -EPERM; */ spk_lock(flags); if (synth_soft.alive) { spk_unlock(flags); return -EBUSY; } synth_soft.alive = 1; spk_unlock(flags); return 0; } static int softsynth_close(struct inode *inode, struct file *fp) { unsigned long flags; spk_lock(flags); synth_soft.alive = 0; initialized = 0; spk_unlock(flags); /* Make sure we let applications go before leaving */ speakup_start_ttys(); return 0; } static ssize_t softsynth_read(struct file *fp, char *buf, size_t count, loff_t *pos) { int chars_sent = 0; char *cp; char *init; char ch; int empty; unsigned long flags; DEFINE_WAIT(wait); spk_lock(flags); while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); if (!synth_buffer_empty() || speakup_info.flushing) break; spk_unlock(flags); if (fp->f_flags & O_NONBLOCK) { finish_wait(&speakup_event, &wait); return -EAGAIN; } if (signal_pending(current)) { finish_wait(&speakup_event, &wait); return -ERESTARTSYS; } schedule(); spk_lock(flags); } finish_wait(&speakup_event, &wait); cp = buf; init = get_initstring(); while (chars_sent < count) { if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; } else if (synth_buffer_empty()) { break; } else if (!initialized) { if (*init) { ch = *init; init++; } else { initialized = 1; } } else { ch = synth_buffer_getc(); } spk_unlock(flags); if (copy_to_user(cp, &ch, 1)) return -EFAULT; spk_lock(flags); chars_sent++; cp++; } *pos += chars_sent; empty = synth_buffer_empty(); spk_unlock(flags); if (empty) { speakup_start_ttys(); *pos = 0; } return chars_sent; } static int last_index; static ssize_t softsynth_write(struct file *fp, const char *buf, size_t count, loff_t *pos) { unsigned long supplied_index = 0; int converted; converted = kstrtoul_from_user(buf, count, 0, &supplied_index); if (converted < 0) return converted; last_index = supplied_index; return count; } static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait) { unsigned long flags; int ret = 0; poll_wait(fp, &speakup_event, wait); spk_lock(flags); if (!synth_buffer_empty() || speakup_info.flushing) ret = POLLIN | POLLRDNORM; spk_unlock(flags); return ret; } static unsigned char get_index(void) { int rv; rv = last_index; last_index = 0; return rv; } static const struct file_operations softsynth_fops = { .owner = THIS_MODULE, .poll = softsynth_poll, .read = softsynth_read, .write = softsynth_write, .open = softsynth_open, .release = softsynth_close, }; static int softsynth_probe(struct spk_synth *synth) { if (misc_registered != 0) return 0; memset(&synth_device, 0, sizeof(synth_device)); synth_device.minor = SOFTSYNTH_MINOR; synth_device.name = "softsynth"; synth_device.fops = &softsynth_fops; if (misc_register(&synth_device)) { pr_warn("Couldn't initialize miscdevice /dev/softsynth.\n"); return -ENODEV; } misc_registered = 1; pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR 26)\n"); return 0; } static void softsynth_release(void) { misc_deregister(&synth_device); misc_registered = 0; pr_info("unregistered /dev/softsynth\n"); } static int softsynth_is_alive(struct spk_synth *synth) { if (synth_soft.alive) return 1; return 0; } module_param_named(start, synth_soft.startup, short, S_IRUGO); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init soft_init(void) { return synth_add(&synth_soft); } static void __exit soft_exit(void) { synth_remove(&synth_soft); } module_init(soft_init); module_exit(soft_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_DESCRIPTION("Speakup userspace software synthesizer support"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
tripleoxygen/kernel_zeebo
drivers/video/omap/lcd_htcherald.c
4212
3186
/* * File: drivers/video/omap/lcd-htcherald.c * * LCD panel support for the HTC Herald * * Copyright (C) 2009 Cory Maccarrone <darkstar6262@gmail.com> * Copyright (C) 2009 Wing Linux * * Based on the lcd_htcwizard.c file from the linwizard project: * Copyright (C) linwizard.sourceforge.net * Author: Angelo Arrifano <miknix@gmail.com> * Based on lcd_h4 by Imre Deak <imre.deak@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include "omapfb.h" static int htcherald_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { return 0; } static void htcherald_panel_cleanup(struct lcd_panel *panel) { } static int htcherald_panel_enable(struct lcd_panel *panel) { return 0; } static void htcherald_panel_disable(struct lcd_panel *panel) { } static unsigned long htcherald_panel_get_caps(struct lcd_panel *panel) { return 0; } /* Found on WIZ200 (miknix) and some HERA110 models (darkstar62) */ struct lcd_panel htcherald_panel_1 = { .name = "lcd_herald", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_PIX_CLOCK, .bpp = 16, .data_lines = 16, .x_res = 240, .y_res = 320, .pixel_clock = 6093, .pcd = 0, /* 15 */ .hsw = 10, .hfp = 10, .hbp = 20, .vsw = 3, .vfp = 2, .vbp = 2, .init = htcherald_panel_init, .cleanup = htcherald_panel_cleanup, .enable = htcherald_panel_enable, .disable = htcherald_panel_disable, .get_caps = htcherald_panel_get_caps, }; static int htcherald_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&htcherald_panel_1); return 0; } static int htcherald_panel_remove(struct platform_device *pdev) { return 0; } static int htcherald_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int htcherald_panel_resume(struct platform_device *pdev) { return 0; } struct platform_driver htcherald_panel_driver = { .probe = htcherald_panel_probe, .remove = htcherald_panel_remove, .suspend = htcherald_panel_suspend, .resume = htcherald_panel_resume, .driver = { .name = "lcd_htcherald", .owner = THIS_MODULE, }, }; static int __init htcherald_panel_drv_init(void) { return platform_driver_register(&htcherald_panel_driver); } static void __exit htcherald_panel_drv_cleanup(void) { platform_driver_unregister(&htcherald_panel_driver); } module_init(htcherald_panel_drv_init); module_exit(htcherald_panel_drv_cleanup);
gpl-2.0
Stuxnet-Kernel/kernel_g3
arch/s390/kernel/vdso.c
4468
8391
/* * vdso setup for s390 * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/security.h> #include <linux/bootmem.h> #include <linux/compat.h> #include <asm/asm-offsets.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/vdso.h> #include <asm/facility.h> #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) extern char vdso32_start, vdso32_end; static void *vdso32_kbase = &vdso32_start; static unsigned int vdso32_pages; static struct page **vdso32_pagelist; #endif #ifdef CONFIG_64BIT extern char vdso64_start, vdso64_end; static void *vdso64_kbase = &vdso64_start; static unsigned int vdso64_pages; static struct page **vdso64_pagelist; #endif /* CONFIG_64BIT */ /* * Should the kernel map a VDSO page into processes and pass its * address down to glibc upon exec()? */ unsigned int __read_mostly vdso_enabled = 1; static int __init vdso_setup(char *s) { unsigned long val; int rc; rc = 0; if (strncmp(s, "on", 3) == 0) vdso_enabled = 1; else if (strncmp(s, "off", 4) == 0) vdso_enabled = 0; else { rc = strict_strtoul(s, 0, &val); vdso_enabled = rc ? 0 : !!val; } return !rc; } __setup("vdso=", vdso_setup); /* * The vdso data page */ static union { struct vdso_data data; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; /* * Setup vdso data page. */ static void vdso_init_data(struct vdso_data *vd) { vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31); } #ifdef CONFIG_64BIT /* * Allocate/free per cpu vdso data. */ #define SEGMENT_ORDER 2 int vdso_alloc_per_cpu(struct _lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; u32 *psal, *aste; int i; lowcore->vdso_per_cpu_data = __LC_PASTE; if (user_mode == HOME_SPACE_MODE || !vdso_enabled) return 0; segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA); page_frame = get_zeroed_page(GFP_KERNEL); if (!segment_table || !page_table || !page_frame) goto out; clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE << SEGMENT_ORDER); clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, 256*sizeof(unsigned long)); *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; *(unsigned long *) page_table = _PAGE_RO + page_frame; psal = (u32 *) (page_table + 256*sizeof(unsigned long)); aste = psal + 32; for (i = 4; i < 32; i += 4) psal[i] = 0x80000000; lowcore->paste[4] = (u32)(addr_t) psal; psal[0] = 0x20000000; psal[2] = (u32)(addr_t) aste; *(unsigned long *) (aste + 2) = segment_table + _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; aste[4] = (u32)(addr_t) psal; lowcore->vdso_per_cpu_data = page_frame; return 0; out: free_page(page_frame); free_page(page_table); free_pages(segment_table, SEGMENT_ORDER); return -ENOMEM; } void vdso_free_per_cpu(struct _lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; u32 *psal, *aste; if (user_mode == HOME_SPACE_MODE || !vdso_enabled) return; psal = (u32 *)(addr_t) lowcore->paste[4]; aste = (u32 *)(addr_t) psal[2]; segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK; page_table = *(unsigned long *) segment_table; page_frame = *(unsigned long *) page_table; free_page(page_frame); free_page(page_table); free_pages(segment_table, SEGMENT_ORDER); } static void vdso_init_cr5(void) { unsigned long cr5; if (user_mode == HOME_SPACE_MODE || !vdso_enabled) return; cr5 = offsetof(struct _lowcore, paste); __ctl_load(cr5, 5, 5); } #endif /* CONFIG_64BIT */ /* * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct page **vdso_pagelist; unsigned long vdso_pages; unsigned long vdso_base; int rc; if (!vdso_enabled) return 0; /* * Only map the vdso for dynamically linked elf binaries. */ if (!uses_interp) return 0; #ifdef CONFIG_64BIT vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT if (is_compat_task()) { vdso_pagelist = vdso32_pagelist; vdso_pages = vdso32_pages; } #endif #else vdso_pagelist = vdso32_pagelist; vdso_pages = vdso32_pages; #endif /* * vDSO has a problem and was disabled, just don't "enable" it for * the process */ if (vdso_pages == 0) return 0; current->mm->context.vdso_base = 0; /* * pick a base address for the vDSO in process space. We try to put * it at vdso_base which is the "natural" base for it, but we might * fail and end up putting it elsewhere. */ down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); if (IS_ERR_VALUE(vdso_base)) { rc = vdso_base; goto out_up; } /* * Put vDSO base into mm struct. We need to do this before calling * install_special_mapping or the perf counter mmap tracking code * will fail to recognise it as a vDSO (since arch_vma_name fails). */ current->mm->context.vdso_base = vdso_base; /* * our vma flags don't have VM_WRITE so by default, the process * isn't allowed to write those pages. * gdb can break that with ptrace interface, and thus trigger COW * on those pages but it's then your responsibility to never do that * on the "data" page of the vDSO or you'll stop getting kernel * updates and your nice userland gettimeofday will be totally dead. * It's fine to use that for setting breakpoints in the vDSO code * pages though. */ rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); if (rc) current->mm->context.vdso_base = 0; out_up: up_write(&mm->mmap_sem); return rc; } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) return "[vdso]"; return NULL; } static int __init vdso_init(void) { int i; if (!vdso_enabled) return 0; vdso_init_data(vdso_data); #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) /* Calculate the size of the 32 bit vDSO */ vdso32_pages = ((&vdso32_end - &vdso32_start + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; /* Make sure pages are in the correct state */ vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), GFP_KERNEL); BUG_ON(vdso32_pagelist == NULL); for (i = 0; i < vdso32_pages - 1; i++) { struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); ClearPageReserved(pg); get_page(pg); vdso32_pagelist[i] = pg; } vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); vdso32_pagelist[vdso32_pages] = NULL; #endif #ifdef CONFIG_64BIT /* Calculate the size of the 64 bit vDSO */ vdso64_pages = ((&vdso64_end - &vdso64_start + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; /* Make sure pages are in the correct state */ vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), GFP_KERNEL); BUG_ON(vdso64_pagelist == NULL); for (i = 0; i < vdso64_pages - 1; i++) { struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); ClearPageReserved(pg); get_page(pg); vdso64_pagelist[i] = pg; } vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); vdso64_pagelist[vdso64_pages] = NULL; if (vdso_alloc_per_cpu(&S390_lowcore)) BUG(); vdso_init_cr5(); #endif /* CONFIG_64BIT */ get_page(virt_to_page(vdso_data)); smp_wmb(); return 0; } early_initcall(vdso_init); int in_gate_area_no_mm(unsigned long addr) { return 0; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { return 0; } struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return NULL; }
gpl-2.0
spegelius/android_kernel_samsung_jf
arch/mips/bcm63xx/setup.c
4468
3283
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <linux/pm.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <asm/reboot.h> #include <asm/cacheflush.h> #include <bcm63xx_board.h> #include <bcm63xx_cpu.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> void bcm63xx_machine_halt(void) { printk(KERN_INFO "System halted\n"); while (1) ; } static void bcm6348_a1_reboot(void) { u32 reg; /* soft reset all blocks */ printk(KERN_INFO "soft-resetting all blocks ...\n"); reg = bcm_perf_readl(PERF_SOFTRESET_REG); reg &= ~SOFTRESET_6348_ALL; bcm_perf_writel(reg, PERF_SOFTRESET_REG); mdelay(10); reg = bcm_perf_readl(PERF_SOFTRESET_REG); reg |= SOFTRESET_6348_ALL; bcm_perf_writel(reg, PERF_SOFTRESET_REG); mdelay(10); /* Jump to the power on address. */ printk(KERN_INFO "jumping to reset vector.\n"); /* set high vectors (base at 0xbfc00000 */ set_c0_status(ST0_BEV | ST0_ERL); /* run uncached in kseg0 */ change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED); __flush_cache_all(); /* remove all wired TLB entries */ write_c0_wired(0); __asm__ __volatile__( "jr\t%0" : : "r" (0xbfc00000)); while (1) ; } void bcm63xx_machine_reboot(void) { u32 reg, perf_regs[2] = { 0, 0 }; unsigned int i; /* mask and clear all external irq */ switch (bcm63xx_get_cpu_id()) { case BCM6338_CPU_ID: perf_regs[0] = PERF_EXTIRQ_CFG_REG_6338; break; case BCM6348_CPU_ID: perf_regs[0] = PERF_EXTIRQ_CFG_REG_6348; break; case BCM6358_CPU_ID: perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358; break; } for (i = 0; i < 2; i++) { reg = bcm_perf_readl(perf_regs[i]); if (BCMCPU_IS_6348()) { reg &= ~EXTIRQ_CFG_MASK_ALL_6348; reg |= EXTIRQ_CFG_CLEAR_ALL_6348; } else { reg &= ~EXTIRQ_CFG_MASK_ALL; reg |= EXTIRQ_CFG_CLEAR_ALL; } bcm_perf_writel(reg, perf_regs[i]); } if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() == 0xa1)) bcm6348_a1_reboot(); printk(KERN_INFO "triggering watchdog soft-reset...\n"); reg = bcm_perf_readl(PERF_SYS_PLL_CTL_REG); reg |= SYS_PLL_SOFT_RESET; bcm_perf_writel(reg, PERF_SYS_PLL_CTL_REG); while (1) ; } static void __bcm63xx_machine_reboot(char *p) { bcm63xx_machine_reboot(); } /* * return system type in /proc/cpuinfo */ const char *get_system_type(void) { static char buf[128]; snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)", board_get_name(), bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev()); return buf; } void __init plat_time_init(void) { mips_hpt_frequency = bcm63xx_get_cpu_freq() / 2; } void __init plat_mem_setup(void) { add_memory_region(0, bcm63xx_get_memory_size(), BOOT_MEM_RAM); _machine_halt = bcm63xx_machine_halt; _machine_restart = __bcm63xx_machine_reboot; pm_power_off = bcm63xx_machine_halt; set_io_port_base(0); ioport_resource.start = 0; ioport_resource.end = ~0; board_setup(); } int __init bcm63xx_register_devices(void) { return board_register_devices(); } device_initcall(bcm63xx_register_devices);
gpl-2.0
CyanideL/android_kernel_lge_hammerhead
drivers/media/video/videobuf2-dma-sg.c
4724
6899
/* * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/videobuf2-core.h> #include <media/videobuf2-memops.h> #include <media/videobuf2-dma-sg.h> struct vb2_dma_sg_buf { void *vaddr; struct page **pages; int write; int offset; struct vb2_dma_sg_desc sg_desc; atomic_t refcount; struct vb2_vmarea_handler handler; }; static void vb2_dma_sg_put(void *buf_priv); static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size) { struct vb2_dma_sg_buf *buf; int i; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->write = 0; buf->offset = 0; buf->sg_desc.size = size; buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist)); if (!buf->sg_desc.sglist) goto fail_sglist_alloc; sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto fail_pages_array_alloc; for (i = 0; i < buf->sg_desc.num_pages; ++i) { buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); if (NULL == buf->pages[i]) goto fail_pages_alloc; sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i], PAGE_SIZE, 0); } buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_dma_sg_put; buf->handler.arg = buf; atomic_inc(&buf->refcount); printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n", __func__, buf->sg_desc.num_pages); return buf; fail_pages_alloc: while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); fail_pages_array_alloc: vfree(buf->sg_desc.sglist); fail_sglist_alloc: kfree(buf); return NULL; } static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; int i = buf->sg_desc.num_pages; if (atomic_dec_and_test(&buf->refcount)) { printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__, buf->sg_desc.num_pages); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); vfree(buf->sg_desc.sglist); while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); kfree(buf); } } static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { struct vb2_dma_sg_buf *buf; unsigned long first, last; int num_pages_from_user, i; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->vaddr = NULL; buf->write = write; buf->offset = vaddr & ~PAGE_MASK; buf->sg_desc.size = size; first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; buf->sg_desc.num_pages = last - first + 1; buf->sg_desc.sglist = vzalloc( buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist)); if (!buf->sg_desc.sglist) goto userptr_fail_sglist_alloc; sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto userptr_fail_pages_array_alloc; num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, buf->sg_desc.num_pages, write, 1, /* force */ buf->pages, NULL); if (num_pages_from_user != buf->sg_desc.num_pages) goto userptr_fail_get_user_pages; sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0], PAGE_SIZE - buf->offset, buf->offset); size -= PAGE_SIZE - buf->offset; for (i = 1; i < buf->sg_desc.num_pages; ++i) { sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i], min_t(size_t, PAGE_SIZE, size), 0); size -= min_t(size_t, PAGE_SIZE, size); } return buf; userptr_fail_get_user_pages: printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n", num_pages_from_user, buf->sg_desc.num_pages); while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); kfree(buf->pages); userptr_fail_pages_array_alloc: vfree(buf->sg_desc.sglist); userptr_fail_sglist_alloc: kfree(buf); return NULL; } /* * @put_userptr: inform the allocator that a USERPTR buffer will no longer * be used */ static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; int i = buf->sg_desc.num_pages; printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n", __func__, buf->sg_desc.num_pages); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); while (--i >= 0) { if (buf->write) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } vfree(buf->sg_desc.sglist); kfree(buf->pages); kfree(buf); } static void *vb2_dma_sg_vaddr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; BUG_ON(!buf); if (!buf->vaddr) buf->vaddr = vm_map_ram(buf->pages, buf->sg_desc.num_pages, -1, PAGE_KERNEL); /* add offset in case userptr is not page-aligned */ return buf->vaddr + buf->offset; } static unsigned int vb2_dma_sg_num_users(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; return atomic_read(&buf->refcount); } static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) { struct vb2_dma_sg_buf *buf = buf_priv; unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; int i = 0; if (!buf) { printk(KERN_ERR "No memory to map\n"); return -EINVAL; } do { int ret; ret = vm_insert_page(vma, uaddr, buf->pages[i++]); if (ret) { printk(KERN_ERR "Remapping memory, error: %d\n", ret); return ret; } uaddr += PAGE_SIZE; usize -= PAGE_SIZE; } while (usize > 0); /* * Use common vm_area operations to track buffer refcount. */ vma->vm_private_data = &buf->handler; vma->vm_ops = &vb2_common_vm_ops; vma->vm_ops->open(vma); return 0; } static void *vb2_dma_sg_cookie(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; return &buf->sg_desc; } const struct vb2_mem_ops vb2_dma_sg_memops = { .alloc = vb2_dma_sg_alloc, .put = vb2_dma_sg_put, .get_userptr = vb2_dma_sg_get_userptr, .put_userptr = vb2_dma_sg_put_userptr, .vaddr = vb2_dma_sg_vaddr, .mmap = vb2_dma_sg_mmap, .num_users = vb2_dma_sg_num_users, .cookie = vb2_dma_sg_cookie, }; EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); MODULE_AUTHOR("Andrzej Pietrasiewicz"); MODULE_LICENSE("GPL");
gpl-2.0
elliott-wen/S7270-Kernel
drivers/net/wireless/ath/ath6kl/sdio.c
4724
37491
/* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> #include <linux/mmc/card.h> #include <linux/mmc/mmc.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sd.h> #include "hif.h" #include "hif-ops.h" #include "target.h" #include "debug.h" #include "cfg80211.h" struct ath6kl_sdio { struct sdio_func *func; /* protects access to bus_req_freeq */ spinlock_t lock; /* free list */ struct list_head bus_req_freeq; /* available bus requests */ struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; struct ath6kl *ar; u8 *dma_buffer; /* protects access to dma_buffer */ struct mutex dma_buffer_mutex; /* scatter request list head */ struct list_head scat_req; atomic_t irq_handling; wait_queue_head_t irq_wq; /* protects access to scat_req */ spinlock_t scat_lock; bool scatter_enabled; bool is_disabled; const struct sdio_device_id *id; struct work_struct wr_async_work; struct list_head wr_asyncq; /* protects access to wr_asyncq */ spinlock_t wr_async_lock; }; #define CMD53_ARG_READ 0 #define CMD53_ARG_WRITE 1 #define CMD53_ARG_BLOCK_BASIS 1 #define CMD53_ARG_FIXED_ADDRESS 0 #define CMD53_ARG_INCR_ADDRESS 1 static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) { return ar->hif_priv; } /* * Macro to check if DMA buffer is WORD-aligned and DMA-able. * Most host controllers assume the buffer is DMA'able and will * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid * check fails on stack memory. */ static inline bool buf_needs_bounce(u8 *buf) { return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf); } static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) { struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; /* EP1 has an extended range */ mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; } static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, u8 mode, u8 opcode, u32 addr, u16 blksz) { *arg = (((rw & 1) << 31) | ((func & 0x7) << 28) | ((mode & 1) << 27) | ((opcode & 1) << 26) | ((addr & 0x1FFFF) << 9) | (blksz & 0x1FF)); } static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, unsigned int address, unsigned char val) { const u8 func = 0; *arg = ((write & 1) << 31) | ((func & 0x7) << 28) | ((raw & 1) << 27) | (1 << 26) | ((address & 0x1FFFF) << 9) | (1 << 8) | (val & 0xFF); } static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card, unsigned int address, unsigned char byte) { struct mmc_command io_cmd; memset(&io_cmd, 0, sizeof(io_cmd)); ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); io_cmd.opcode = SD_IO_RW_DIRECT; io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; return mmc_wait_for_cmd(card->host, &io_cmd, 0); } static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, u8 *buf, u32 len) { int ret = 0; sdio_claim_host(func); if (request & HIF_WRITE) { /* FIXME: looks like ugly workaround for something */ if (addr >= HIF_MBOX_BASE_ADDR && addr <= HIF_MBOX_END_ADDR) addr += (HIF_MBOX_WIDTH - len); /* FIXME: this also looks like ugly workaround */ if (addr == HIF_MBOX0_EXT_BASE_ADDR) addr += HIF_MBOX0_EXT_WIDTH - len; if (request & HIF_FIXED_ADDRESS) ret = sdio_writesb(func, addr, buf, len); else ret = sdio_memcpy_toio(func, addr, buf, len); } else { if (request & HIF_FIXED_ADDRESS) ret = sdio_readsb(func, buf, addr, len); else ret = sdio_memcpy_fromio(func, buf, addr, len); } sdio_release_host(func); ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", request & HIF_WRITE ? "wr" : "rd", addr, request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); return ret; } static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) { struct bus_request *bus_req; spin_lock_bh(&ar_sdio->lock); if (list_empty(&ar_sdio->bus_req_freeq)) { spin_unlock_bh(&ar_sdio->lock); return NULL; } bus_req = list_first_entry(&ar_sdio->bus_req_freeq, struct bus_request, list); list_del(&bus_req->list); spin_unlock_bh(&ar_sdio->lock); ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", __func__, bus_req); return bus_req; } static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, struct bus_request *bus_req) { ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", __func__, bus_req); spin_lock_bh(&ar_sdio->lock); list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); spin_unlock_bh(&ar_sdio->lock); } static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, struct mmc_data *data) { struct scatterlist *sg; int i; data->blksz = HIF_MBOX_BLOCK_SIZE; data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; ath6kl_dbg(ATH6KL_DBG_SCATTER, "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n", (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr, data->blksz, data->blocks, scat_req->len, scat_req->scat_entries); data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : MMC_DATA_READ; /* fill SG entries */ sg = scat_req->sgentries; sg_init_table(sg, scat_req->scat_entries); /* assemble SG list */ for (i = 0; i < scat_req->scat_entries; i++, sg++) { ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n", i, scat_req->scat_list[i].buf, scat_req->scat_list[i].len); sg_set_buf(sg, scat_req->scat_list[i].buf, scat_req->scat_list[i].len); } /* set scatter-gather table for request */ data->sg = scat_req->sgentries; data->sg_len = scat_req->scat_entries; } static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, struct bus_request *req) { struct mmc_request mmc_req; struct mmc_command cmd; struct mmc_data data; struct hif_scatter_req *scat_req; u8 opcode, rw; int status, len; scat_req = req->scat_req; if (scat_req->virt_scat) { len = scat_req->len; if (scat_req->req & HIF_BLOCK_BASIS) len = round_down(len, HIF_MBOX_BLOCK_SIZE); status = ath6kl_sdio_io(ar_sdio->func, scat_req->req, scat_req->addr, scat_req->virt_dma_buf, len); goto scat_complete; } memset(&mmc_req, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); ath6kl_sdio_setup_scat_data(scat_req, &data); opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS; rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; /* Fixup the address so that the last byte will fall on MBOX EOM */ if (scat_req->req & HIF_WRITE) { if (scat_req->addr == HIF_MBOX_BASE_ADDR) scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; else /* Uses extended address range */ scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; } /* set command argument */ ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num, CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr, data.blocks); cmd.opcode = SD_IO_RW_EXTENDED; cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; mmc_req.cmd = &cmd; mmc_req.data = &data; sdio_claim_host(ar_sdio->func); mmc_set_data_timeout(&data, ar_sdio->func->card); /* synchronous call to process request */ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); sdio_release_host(ar_sdio->func); status = cmd.error ? cmd.error : data.error; scat_complete: scat_req->status = status; if (scat_req->status) ath6kl_err("Scatter write request failed:%d\n", scat_req->status); if (scat_req->req & HIF_ASYNCHRONOUS) scat_req->complete(ar_sdio->ar->htc_target, scat_req); return status; } static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, int n_scat_entry, int n_scat_req, bool virt_scat) { struct hif_scatter_req *s_req; struct bus_request *bus_req; int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; u8 *virt_buf; scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); scat_req_sz = sizeof(*s_req) + scat_list_sz; if (!virt_scat) sg_sz = sizeof(struct scatterlist) * n_scat_entry; else buf_sz = 2 * L1_CACHE_BYTES + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; for (i = 0; i < n_scat_req; i++) { /* allocate the scatter request */ s_req = kzalloc(scat_req_sz, GFP_KERNEL); if (!s_req) return -ENOMEM; if (virt_scat) { virt_buf = kzalloc(buf_sz, GFP_KERNEL); if (!virt_buf) { kfree(s_req); return -ENOMEM; } s_req->virt_dma_buf = (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); } else { /* allocate sglist */ s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); if (!s_req->sgentries) { kfree(s_req); return -ENOMEM; } } /* allocate a bus request for this scatter request */ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); if (!bus_req) { kfree(s_req->sgentries); kfree(s_req->virt_dma_buf); kfree(s_req); return -ENOMEM; } /* assign the scatter request to this bus request */ bus_req->scat_req = s_req; s_req->busrequest = bus_req; s_req->virt_scat = virt_scat; /* add it to the scatter pool */ hif_scatter_req_add(ar_sdio->ar, s_req); } return 0; } static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, u32 len, u32 request) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); u8 *tbuf = NULL; int ret; bool bounced = false; if (request & HIF_BLOCK_BASIS) len = round_down(len, HIF_MBOX_BLOCK_SIZE); if (buf_needs_bounce(buf)) { if (!ar_sdio->dma_buffer) return -ENOMEM; mutex_lock(&ar_sdio->dma_buffer_mutex); tbuf = ar_sdio->dma_buffer; if (request & HIF_WRITE) memcpy(tbuf, buf, len); bounced = true; } else tbuf = buf; ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); if ((request & HIF_READ) && bounced) memcpy(buf, tbuf, len); if (bounced) mutex_unlock(&ar_sdio->dma_buffer_mutex); return ret; } static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, struct bus_request *req) { if (req->scat_req) ath6kl_sdio_scat_rw(ar_sdio, req); else { void *context; int status; status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, req->buffer, req->length, req->request); context = req->packet; ath6kl_sdio_free_bus_req(ar_sdio, req); ath6kl_hif_rw_comp_handler(context, status); } } static void ath6kl_sdio_write_async_work(struct work_struct *work) { struct ath6kl_sdio *ar_sdio; struct bus_request *req, *tmp_req; ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); spin_lock_bh(&ar_sdio->wr_async_lock); list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { list_del(&req->list); spin_unlock_bh(&ar_sdio->wr_async_lock); __ath6kl_sdio_write_async(ar_sdio, req); spin_lock_bh(&ar_sdio->wr_async_lock); } spin_unlock_bh(&ar_sdio->wr_async_lock); } static void ath6kl_sdio_irq_handler(struct sdio_func *func) { int status; struct ath6kl_sdio *ar_sdio; ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n"); ar_sdio = sdio_get_drvdata(func); atomic_set(&ar_sdio->irq_handling, 1); /* * Release the host during interrups so we can pick it back up when * we process commands. */ sdio_release_host(ar_sdio->func); status = ath6kl_hif_intr_bh_handler(ar_sdio->ar); sdio_claim_host(ar_sdio->func); atomic_set(&ar_sdio->irq_handling, 0); wake_up(&ar_sdio->irq_wq); WARN_ON(status && status != -ECANCELED); } static int ath6kl_sdio_power_on(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; int ret = 0; if (!ar_sdio->is_disabled) return 0; ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n"); sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) { ath6kl_err("Unable to enable sdio func: %d)\n", ret); sdio_release_host(func); return ret; } sdio_release_host(func); /* * Wait for hardware to initialise. It should take a lot less than * 10 ms but let's be conservative here. */ msleep(10); ar_sdio->is_disabled = false; return ret; } static int ath6kl_sdio_power_off(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); int ret; if (ar_sdio->is_disabled) return 0; ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n"); /* Disable the card */ sdio_claim_host(ar_sdio->func); ret = sdio_disable_func(ar_sdio->func); sdio_release_host(ar_sdio->func); if (ret) return ret; ar_sdio->is_disabled = true; return ret; } static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, u32 length, u32 request, struct htc_packet *packet) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct bus_request *bus_req; bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); if (!bus_req) return -ENOMEM; bus_req->address = address; bus_req->buffer = buffer; bus_req->length = length; bus_req->request = request; bus_req->packet = packet; spin_lock_bh(&ar_sdio->wr_async_lock); list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); spin_unlock_bh(&ar_sdio->wr_async_lock); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); return 0; } static void ath6kl_sdio_irq_enable(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); int ret; sdio_claim_host(ar_sdio->func); /* Register the isr */ ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler); if (ret) ath6kl_err("Failed to claim sdio irq: %d\n", ret); sdio_release_host(ar_sdio->func); } static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); return !atomic_read(&ar_sdio->irq_handling); } static void ath6kl_sdio_irq_disable(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); int ret; sdio_claim_host(ar_sdio->func); if (atomic_read(&ar_sdio->irq_handling)) { sdio_release_host(ar_sdio->func); ret = wait_event_interruptible(ar_sdio->irq_wq, ath6kl_sdio_is_on_irq(ar)); if (ret) return; sdio_claim_host(ar_sdio->func); } ret = sdio_release_irq(ar_sdio->func); if (ret) ath6kl_err("Failed to release sdio irq: %d\n", ret); sdio_release_host(ar_sdio->func); } static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct hif_scatter_req *node = NULL; spin_lock_bh(&ar_sdio->scat_lock); if (!list_empty(&ar_sdio->scat_req)) { node = list_first_entry(&ar_sdio->scat_req, struct hif_scatter_req, list); list_del(&node->list); node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req); } spin_unlock_bh(&ar_sdio->scat_lock); return node; } static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, struct hif_scatter_req *s_req) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); spin_lock_bh(&ar_sdio->scat_lock); list_add_tail(&s_req->list, &ar_sdio->scat_req); spin_unlock_bh(&ar_sdio->scat_lock); } /* scatter gather read write request */ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, struct hif_scatter_req *scat_req) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); u32 request = scat_req->req; int status = 0; if (!scat_req->len) return -EINVAL; ath6kl_dbg(ATH6KL_DBG_SCATTER, "hif-scatter: total len: %d scatter entries: %d\n", scat_req->len, scat_req->scat_entries); if (request & HIF_SYNCHRONOUS) status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); else { spin_lock_bh(&ar_sdio->wr_async_lock); list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); spin_unlock_bh(&ar_sdio->wr_async_lock); queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); } return status; } /* clean up scatter support */ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct hif_scatter_req *s_req, *tmp_req; /* empty the free list */ spin_lock_bh(&ar_sdio->scat_lock); list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { list_del(&s_req->list); spin_unlock_bh(&ar_sdio->scat_lock); /* * FIXME: should we also call completion handler with * ath6kl_hif_rw_comp_handler() with status -ECANCELED so * that the packet is properly freed? */ if (s_req->busrequest) ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); kfree(s_req->virt_dma_buf); kfree(s_req->sgentries); kfree(s_req); spin_lock_bh(&ar_sdio->scat_lock); } spin_unlock_bh(&ar_sdio->scat_lock); } /* setup of HIF scatter resources */ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct htc_target *target = ar->htc_target; int ret; bool virt_scat = false; if (ar_sdio->scatter_enabled) return 0; ar_sdio->scatter_enabled = true; /* check if host supports scatter and it meets our requirements */ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { ath6kl_err("host only supports scatter of :%d entries, need: %d\n", ar_sdio->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ); virt_scat = true; } if (!virt_scat) { ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, MAX_SCATTER_ENTRIES_PER_REQ, MAX_SCATTER_REQUESTS, virt_scat); if (!ret) { ath6kl_dbg(ATH6KL_DBG_BOOT, "hif-scatter enabled requests %d entries %d\n", MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ); target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; target->max_xfer_szper_scatreq = MAX_SCATTER_REQ_TRANSFER_SIZE; } else { ath6kl_sdio_cleanup_scatter(ar); ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n"); } } if (virt_scat || ret) { ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, ATH6KL_SCATTER_ENTRIES_PER_REQ, ATH6KL_SCATTER_REQS, virt_scat); if (ret) { ath6kl_err("failed to alloc virtual scatter resources !\n"); ath6kl_sdio_cleanup_scatter(ar); return ret; } ath6kl_dbg(ATH6KL_DBG_BOOT, "virtual scatter enabled requests %d entries %d\n", ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; target->max_xfer_szper_scatreq = ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; } return 0; } static int ath6kl_sdio_config(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; int ret; sdio_claim_host(func); if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= MANUFACTURER_ID_AR6003_BASE) { /* enable 4-bit ASYNC interrupt on AR6003 or later */ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, CCCR_SDIO_IRQ_MODE_REG, SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); if (ret) { ath6kl_err("Failed to enable 4-bit async irq mode %d\n", ret); goto out; } ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n"); } /* give us some time to enable, in ms */ func->enable_timeout = 100; ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); if (ret) { ath6kl_err("Set sdio block size %d failed: %d)\n", HIF_MBOX_BLOCK_SIZE, ret); goto out; } out: sdio_release_host(func); return ret; } static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; mmc_pm_flag_t flags; int ret; flags = sdio_get_host_pm_caps(func); ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags); if (!(flags & MMC_PM_WAKE_SDIO_IRQ) || !(flags & MMC_PM_KEEP_POWER)) return -EINVAL; ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { ath6kl_err("set sdio keep pwr flag failed: %d\n", ret); return ret; } /* sdio irq wakes up host */ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); if (ret) ath6kl_err("set sdio wake irq flag failed: %d\n", ret); return ret; } static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; mmc_pm_flag_t flags; bool try_deepsleep = false; int ret; if (ar->state == ATH6KL_STATE_SCHED_SCAN) { ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n"); ret = ath6kl_set_sdio_pm_caps(ar); if (ret) goto cut_pwr; ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_SCHED_SCAN, NULL); if (ret) goto cut_pwr; return 0; } if (ar->suspend_mode == WLAN_POWER_STATE_WOW || (!ar->suspend_mode && wow)) { ret = ath6kl_set_sdio_pm_caps(ar); if (ret) goto cut_pwr; ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow); if (ret && ret != -ENOTCONN) ath6kl_err("wow suspend failed: %d\n", ret); if (ret && (!ar->wow_suspend_mode || ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP)) try_deepsleep = true; else if (ret && ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR) goto cut_pwr; if (!ret) return 0; } if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || !ar->suspend_mode || try_deepsleep) { flags = sdio_get_host_pm_caps(func); if (!(flags & MMC_PM_KEEP_POWER)) goto cut_pwr; ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) goto cut_pwr; /* * Workaround to support Deep Sleep with MSM, set the host pm * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable * the sdc2_clock and internally allows MSM to enter * TCXO shutdown properly. */ if ((flags & MMC_PM_WAKE_SDIO_IRQ)) { ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); if (ret) goto cut_pwr; } ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL); if (ret) goto cut_pwr; return 0; } cut_pwr: return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL); } static int ath6kl_sdio_resume(struct ath6kl *ar) { switch (ar->state) { case ATH6KL_STATE_OFF: case ATH6KL_STATE_CUTPOWER: ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio resume configuring sdio\n"); /* need to set sdio settings after power is cut from sdio */ ath6kl_sdio_config(ar); break; case ATH6KL_STATE_ON: break; case ATH6KL_STATE_DEEPSLEEP: break; case ATH6KL_STATE_WOW: break; case ATH6KL_STATE_SCHED_SCAN: break; case ATH6KL_STATE_SUSPENDING: break; case ATH6KL_STATE_RESUMING: break; } ath6kl_cfg80211_resume(ar); return 0; } /* set the window address register (using 4-byte register access ). */ static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) { int status; u8 addr_val[4]; s32 i; /* * Write bytes 1,2,3 of the register to set the upper address bytes, * the LSB is written last to initiate the access cycle */ for (i = 1; i <= 3; i++) { /* * Fill the buffer with the address byte value we want to * hit 4 times. */ memset(addr_val, ((u8 *)&addr)[i], 4); /* * Hit each byte of the register address with a 4-byte * write operation to the same address, this is a harmless * operation. */ status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val, 4, HIF_WR_SYNC_BYTE_FIX); if (status) break; } if (status) { ath6kl_err("%s: failed to write initial bytes of 0x%x " "to window reg: 0x%X\n", __func__, addr, reg_addr); return status; } /* * Write the address register again, this time write the whole * 4-byte value. The effect here is that the LSB write causes the * cycle to start, the extra 3 byte write to bytes 1,2,3 has no * effect since we are writing the same values again */ status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr), 4, HIF_WR_SYNC_BYTE_INC); if (status) { ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n", __func__, addr, reg_addr); return status; } return 0; } static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) { int status; /* set window register to start read cycle */ status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address); if (status) return status; /* read the data */ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC); if (status) { ath6kl_err("%s: failed to read from window data addr\n", __func__); return status; } return status; } static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, __le32 data) { int status; u32 val = (__force u32) data; /* set write data */ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC); if (status) { ath6kl_err("%s: failed to write 0x%x to window data addr\n", __func__, data); return status; } /* set window register, which starts the write cycle */ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, address); } static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) { u32 addr; unsigned long timeout; int ret; ar->bmi.cmd_credits = 0; /* Read the counter register to get the command credits */ addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { /* * Hit the credit counter with a 4-byte access, the first byte * read will hit the counter and cause a decrement, while the * remaining 3 bytes has no effect. The rationale behind this * is to make all HIF accesses 4-byte aligned. */ ret = ath6kl_sdio_read_write_sync(ar, addr, (u8 *)&ar->bmi.cmd_credits, 4, HIF_RD_SYNC_BYTE_INC); if (ret) { ath6kl_err("Unable to decrement the command credit " "count register: %d\n", ret); return ret; } /* The counter is only 8 bits. * Ignore anything in the upper 3 bytes */ ar->bmi.cmd_credits &= 0xFF; } if (!ar->bmi.cmd_credits) { ath6kl_err("bmi communication timeout\n"); return -ETIMEDOUT; } return 0; } static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) { unsigned long timeout; u32 rx_word = 0; int ret = 0; timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); while ((time_before(jiffies, timeout)) && !rx_word) { ret = ath6kl_sdio_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS, (u8 *)&rx_word, sizeof(rx_word), HIF_RD_SYNC_BYTE_INC); if (ret) { ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n"); return ret; } /* all we really want is one bit */ rx_word &= (1 << ENDPOINT1); } if (!rx_word) { ath6kl_err("bmi_recv_buf FIFO empty\n"); return -EINVAL; } return ret; } static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) { int ret; u32 addr; ret = ath6kl_sdio_bmi_credits(ar); if (ret) return ret; addr = ar->mbox_info.htc_addr; ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, HIF_WR_SYNC_BYTE_INC); if (ret) ath6kl_err("unable to send the bmi data to the device\n"); return ret; } static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) { int ret; u32 addr; /* * During normal bootup, small reads may be required. * Rather than issue an HIF Read and then wait as the Target * adds successive bytes to the FIFO, we wait here until * we know that response data is available. * * This allows us to cleanly timeout on an unexpected * Target failure rather than risk problems at the HIF level. * In particular, this avoids SDIO timeouts and possibly garbage * data on some host controllers. And on an interconnect * such as Compact Flash (as well as some SDIO masters) which * does not provide any indication on data timeout, it avoids * a potential hang or garbage response. * * Synchronization is more difficult for reads larger than the * size of the MBOX FIFO (128B), because the Target is unable * to push the 129th byte of data until AFTER the Host posts an * HIF Read and removes some FIFO data. So for large reads the * Host proceeds to post an HIF Read BEFORE all the data is * actually available to read. Fortunately, large BMI reads do * not occur in practice -- they're supported for debug/development. * * So Host/Target BMI synchronization is divided into these cases: * CASE 1: length < 4 * Should not happen * * CASE 2: 4 <= length <= 128 * Wait for first 4 bytes to be in FIFO * If CONSERVATIVE_BMI_READ is enabled, also wait for * a BMI command credit, which indicates that the ENTIRE * response is available in the the FIFO * * CASE 3: length > 128 * Wait for the first 4 bytes to be in FIFO * * For most uses, a small timeout should be sufficient and we will * usually see a response quickly; but there may be some unusual * (debug) cases of BMI_EXECUTE where we want an larger timeout. * For now, we use an unbounded busy loop while waiting for * BMI_EXECUTE. * * If BMI_EXECUTE ever needs to support longer-latency execution, * especially in production, this code needs to be enhanced to sleep * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently * a function of Host processor speed. */ if (len >= 4) { /* NB: Currently, always true */ ret = ath6kl_bmi_get_rx_lkahd(ar); if (ret) return ret; } addr = ar->mbox_info.htc_addr; ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, HIF_RD_SYNC_BYTE_INC); if (ret) { ath6kl_err("Unable to read the bmi data from the device: %d\n", ret); return ret; } return 0; } static void ath6kl_sdio_stop(struct ath6kl *ar) { struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); struct bus_request *req, *tmp_req; void *context; /* FIXME: make sure that wq is not queued again */ cancel_work_sync(&ar_sdio->wr_async_work); spin_lock_bh(&ar_sdio->wr_async_lock); list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { list_del(&req->list); if (req->scat_req) { /* this is a scatter gather request */ req->scat_req->status = -ECANCELED; req->scat_req->complete(ar_sdio->ar->htc_target, req->scat_req); } else { context = req->packet; ath6kl_sdio_free_bus_req(ar_sdio, req); ath6kl_hif_rw_comp_handler(context, -ECANCELED); } } spin_unlock_bh(&ar_sdio->wr_async_lock); WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); } static const struct ath6kl_hif_ops ath6kl_sdio_ops = { .read_write_sync = ath6kl_sdio_read_write_sync, .write_async = ath6kl_sdio_write_async, .irq_enable = ath6kl_sdio_irq_enable, .irq_disable = ath6kl_sdio_irq_disable, .scatter_req_get = ath6kl_sdio_scatter_req_get, .scatter_req_add = ath6kl_sdio_scatter_req_add, .enable_scatter = ath6kl_sdio_enable_scatter, .scat_req_rw = ath6kl_sdio_async_rw_scatter, .cleanup_scatter = ath6kl_sdio_cleanup_scatter, .suspend = ath6kl_sdio_suspend, .resume = ath6kl_sdio_resume, .diag_read32 = ath6kl_sdio_diag_read32, .diag_write32 = ath6kl_sdio_diag_write32, .bmi_read = ath6kl_sdio_bmi_read, .bmi_write = ath6kl_sdio_bmi_write, .power_on = ath6kl_sdio_power_on, .power_off = ath6kl_sdio_power_off, .stop = ath6kl_sdio_stop, }; #ifdef CONFIG_PM_SLEEP /* * Empty handlers so that mmc subsystem doesn't remove us entirely during * suspend. We instead follow cfg80211 suspend/resume handlers. */ static int ath6kl_sdio_pm_suspend(struct device *device) { ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n"); return 0; } static int ath6kl_sdio_pm_resume(struct device *device) { ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n"); return 0; } static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, ath6kl_sdio_pm_resume); #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) #else #define ATH6KL_SDIO_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static int ath6kl_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret; struct ath6kl_sdio *ar_sdio; struct ath6kl *ar; int count; ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", func->num, func->vendor, func->device, func->max_blksize, func->cur_blksize); ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL); if (!ar_sdio) return -ENOMEM; ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); if (!ar_sdio->dma_buffer) { ret = -ENOMEM; goto err_hif; } ar_sdio->func = func; sdio_set_drvdata(func, ar_sdio); ar_sdio->id = id; ar_sdio->is_disabled = true; spin_lock_init(&ar_sdio->lock); spin_lock_init(&ar_sdio->scat_lock); spin_lock_init(&ar_sdio->wr_async_lock); mutex_init(&ar_sdio->dma_buffer_mutex); INIT_LIST_HEAD(&ar_sdio->scat_req); INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); INIT_LIST_HEAD(&ar_sdio->wr_asyncq); INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); init_waitqueue_head(&ar_sdio->irq_wq); for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); ar = ath6kl_core_create(&ar_sdio->func->dev); if (!ar) { ath6kl_err("Failed to alloc ath6kl core\n"); ret = -ENOMEM; goto err_dma; } ar_sdio->ar = ar; ar->hif_type = ATH6KL_HIF_TYPE_SDIO; ar->hif_priv = ar_sdio; ar->hif_ops = &ath6kl_sdio_ops; ar->bmi.max_data_size = 256; ath6kl_sdio_set_mbox_info(ar); ret = ath6kl_sdio_config(ar); if (ret) { ath6kl_err("Failed to config sdio: %d\n", ret); goto err_core_alloc; } ret = ath6kl_core_init(ar); if (ret) { ath6kl_err("Failed to init ath6kl core\n"); goto err_core_alloc; } return ret; err_core_alloc: ath6kl_core_destroy(ar_sdio->ar); err_dma: kfree(ar_sdio->dma_buffer); err_hif: kfree(ar_sdio); return ret; } static void ath6kl_sdio_remove(struct sdio_func *func) { struct ath6kl_sdio *ar_sdio; ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio removed func %d vendor 0x%x device 0x%x\n", func->num, func->vendor, func->device); ar_sdio = sdio_get_drvdata(func); ath6kl_stop_txrx(ar_sdio->ar); cancel_work_sync(&ar_sdio->wr_async_work); ath6kl_core_cleanup(ar_sdio->ar); ath6kl_core_destroy(ar_sdio->ar); kfree(ar_sdio->dma_buffer); kfree(ar_sdio); } static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, {}, }; MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); static struct sdio_driver ath6kl_sdio_driver = { .name = "ath6kl_sdio", .id_table = ath6kl_sdio_devices, .probe = ath6kl_sdio_probe, .remove = ath6kl_sdio_remove, .drv.pm = ATH6KL_SDIO_PM_OPS, }; static int __init ath6kl_sdio_init(void) { int ret; ret = sdio_register_driver(&ath6kl_sdio_driver); if (ret) ath6kl_err("sdio driver registration failed: %d\n", ret); return ret; } static void __exit ath6kl_sdio_exit(void) { sdio_unregister_driver(&ath6kl_sdio_driver); } module_init(ath6kl_sdio_init); module_exit(ath6kl_sdio_exit); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE); MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE); MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE); MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE); MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE); MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE); MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE); MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE); MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
gpl-2.0
Ander-Alvarez/UltraPluscondor
drivers/macintosh/windfarm_pm121.c
4980
25373
/* * Windfarm PowerMac thermal control. iMac G5 iSight * * (c) Copyright 2007 Étienne Bersac <bersace@gmail.com> * * Bits & pieces from windfarm_pm81.c by (c) Copyright 2005 Benjamin * Herrenschmidt, IBM Corp. <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. * * * * PowerMac12,1 * ============ * * * The algorithm used is the PID control algorithm, used the same way * the published Darwin code does, using the same values that are * present in the Darwin 8.10 snapshot property lists (note however * that none of the code has been re-used, it's a complete * re-implementation * * There is two models using PowerMac12,1. Model 2 is iMac G5 iSight * 17" while Model 3 is iMac G5 20". They do have both the same * controls with a tiny difference. The control-ids of hard-drive-fan * and cpu-fan is swapped. * * * Target Correction : * * controls have a target correction calculated as : * * new_min = ((((average_power * slope) >> 16) + offset) >> 16) + min_value * new_value = max(new_value, max(new_min, 0)) * * OD Fan control correction. * * # model_id: 2 * offset : -19563152 * slope : 1956315 * * # model_id: 3 * offset : -15650652 * slope : 1565065 * * HD Fan control correction. * * # model_id: 2 * offset : -15650652 * slope : 1565065 * * # model_id: 3 * offset : -19563152 * slope : 1956315 * * CPU Fan control correction. * * # model_id: 2 * offset : -25431900 * slope : 2543190 * * # model_id: 3 * offset : -15650652 * slope : 1565065 * * * Target rubber-banding : * * Some controls have a target correction which depends on another * control value. The correction is computed in the following way : * * new_min = ref_value * slope + offset * * ref_value is the value of the reference control. If new_min is * greater than 0, then we correct the target value using : * * new_target = max (new_target, new_min >> 16) * * * # model_id : 2 * control : cpu-fan * ref : optical-drive-fan * offset : -15650652 * slope : 1565065 * * # model_id : 3 * control : optical-drive-fan * ref : hard-drive-fan * offset : -32768000 * slope : 65536 * * * In order to have the moste efficient correction with those * dependencies, we must trigger HD loop before OD loop before CPU * loop. * * * The various control loops found in Darwin config file are: * * HD Fan control loop. * * # model_id: 2 * control : hard-drive-fan * sensor : hard-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x002D70A3 * G_r = 0x00019999 * History = 2 entries * Input target = 0x370000 * Interval = 5s * * # model_id: 3 * control : hard-drive-fan * sensor : hard-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x002170A3 * G_r = 0x00019999 * History = 2 entries * Input target = 0x370000 * Interval = 5s * * OD Fan control loop. * * # model_id: 2 * control : optical-drive-fan * sensor : optical-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x001FAE14 * G_r = 0x00019999 * History = 2 entries * Input target = 0x320000 * Interval = 5s * * # model_id: 3 * control : optical-drive-fan * sensor : optical-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x001FAE14 * G_r = 0x00019999 * History = 2 entries * Input target = 0x320000 * Interval = 5s * * GPU Fan control loop. * * # model_id: 2 * control : hard-drive-fan * sensor : gpu-temp * PID params : G_d = 0x00000000 * G_p = 0x002A6666 * G_r = 0x00019999 * History = 2 entries * Input target = 0x5A0000 * Interval = 5s * * # model_id: 3 * control : cpu-fan * sensor : gpu-temp * PID params : G_d = 0x00000000 * G_p = 0x0010CCCC * G_r = 0x00019999 * History = 2 entries * Input target = 0x500000 * Interval = 5s * * KODIAK (aka northbridge) Fan control loop. * * # model_id: 2 * control : optical-drive-fan * sensor : north-bridge-temp * PID params : G_d = 0x00000000 * G_p = 0x003BD70A * G_r = 0x00019999 * History = 2 entries * Input target = 0x550000 * Interval = 5s * * # model_id: 3 * control : hard-drive-fan * sensor : north-bridge-temp * PID params : G_d = 0x00000000 * G_p = 0x0030F5C2 * G_r = 0x00019999 * History = 2 entries * Input target = 0x550000 * Interval = 5s * * CPU Fan control loop. * * control : cpu-fan * sensors : cpu-temp, cpu-power * PID params : from SDB partition * * * CPU Slew control loop. * * control : cpufreq-clamp * sensor : cpu-temp * */ #undef DEBUG #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/kmod.h> #include <linux/device.h> #include <linux/platform_device.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> #include <asm/smu.h> #include "windfarm.h" #include "windfarm_pid.h" #define VERSION "0.3" static int pm121_mach_model; /* machine model id */ /* Controls & sensors */ static struct wf_sensor *sensor_cpu_power; static struct wf_sensor *sensor_cpu_temp; static struct wf_sensor *sensor_cpu_voltage; static struct wf_sensor *sensor_cpu_current; static struct wf_sensor *sensor_gpu_temp; static struct wf_sensor *sensor_north_bridge_temp; static struct wf_sensor *sensor_hard_drive_temp; static struct wf_sensor *sensor_optical_drive_temp; static struct wf_sensor *sensor_incoming_air_temp; /* unused ! */ enum { FAN_CPU, FAN_HD, FAN_OD, CPUFREQ, N_CONTROLS }; static struct wf_control *controls[N_CONTROLS] = {}; /* Set to kick the control loop into life */ static int pm121_all_controls_ok, pm121_all_sensors_ok, pm121_started; enum { FAILURE_FAN = 1 << 0, FAILURE_SENSOR = 1 << 1, FAILURE_OVERTEMP = 1 << 2 }; /* All sys loops. Note the HD before the OD loop in order to have it run before. */ enum { LOOP_GPU, /* control = hd or cpu, but luckily, it doesn't matter */ LOOP_HD, /* control = hd */ LOOP_KODIAK, /* control = hd or od */ LOOP_OD, /* control = od */ N_LOOPS }; static const char *loop_names[N_LOOPS] = { "GPU", "HD", "KODIAK", "OD", }; #define PM121_NUM_CONFIGS 2 static unsigned int pm121_failure_state; static int pm121_readjust, pm121_skipping; static s32 average_power; struct pm121_correction { int offset; int slope; }; static struct pm121_correction corrections[N_CONTROLS][PM121_NUM_CONFIGS] = { /* FAN_OD */ { /* MODEL 2 */ { .offset = -19563152, .slope = 1956315 }, /* MODEL 3 */ { .offset = -15650652, .slope = 1565065 }, }, /* FAN_HD */ { /* MODEL 2 */ { .offset = -15650652, .slope = 1565065 }, /* MODEL 3 */ { .offset = -19563152, .slope = 1956315 }, }, /* FAN_CPU */ { /* MODEL 2 */ { .offset = -25431900, .slope = 2543190 }, /* MODEL 3 */ { .offset = -15650652, .slope = 1565065 }, }, /* CPUFREQ has no correction (and is not implemented at all) */ }; struct pm121_connection { unsigned int control_id; unsigned int ref_id; struct pm121_correction correction; }; static struct pm121_connection pm121_connections[] = { /* MODEL 2 */ { .control_id = FAN_CPU, .ref_id = FAN_OD, { .offset = -32768000, .slope = 65536 } }, /* MODEL 3 */ { .control_id = FAN_OD, .ref_id = FAN_HD, { .offset = -32768000, .slope = 65536 } }, }; /* pointer to the current model connection */ static struct pm121_connection *pm121_connection; /* * ****** System Fans Control Loop ****** * */ /* Since each loop handles only one control and we want to avoid * writing virtual control, we store the control correction with the * loop params. Some data are not set, there are common to all loop * and thus, hardcoded. */ struct pm121_sys_param { /* purely informative since we use mach_model-2 as index */ int model_id; struct wf_sensor **sensor; /* use sensor_id instead ? */ s32 gp, itarget; unsigned int control_id; }; static struct pm121_sys_param pm121_sys_all_params[N_LOOPS][PM121_NUM_CONFIGS] = { /* GPU Fan control loop */ { { .model_id = 2, .sensor = &sensor_gpu_temp, .gp = 0x002A6666, .itarget = 0x5A0000, .control_id = FAN_HD, }, { .model_id = 3, .sensor = &sensor_gpu_temp, .gp = 0x0010CCCC, .itarget = 0x500000, .control_id = FAN_CPU, }, }, /* HD Fan control loop */ { { .model_id = 2, .sensor = &sensor_hard_drive_temp, .gp = 0x002D70A3, .itarget = 0x370000, .control_id = FAN_HD, }, { .model_id = 3, .sensor = &sensor_hard_drive_temp, .gp = 0x002170A3, .itarget = 0x370000, .control_id = FAN_HD, }, }, /* KODIAK Fan control loop */ { { .model_id = 2, .sensor = &sensor_north_bridge_temp, .gp = 0x003BD70A, .itarget = 0x550000, .control_id = FAN_OD, }, { .model_id = 3, .sensor = &sensor_north_bridge_temp, .gp = 0x0030F5C2, .itarget = 0x550000, .control_id = FAN_HD, }, }, /* OD Fan control loop */ { { .model_id = 2, .sensor = &sensor_optical_drive_temp, .gp = 0x001FAE14, .itarget = 0x320000, .control_id = FAN_OD, }, { .model_id = 3, .sensor = &sensor_optical_drive_temp, .gp = 0x001FAE14, .itarget = 0x320000, .control_id = FAN_OD, }, }, }; /* the hardcoded values */ #define PM121_SYS_GD 0x00000000 #define PM121_SYS_GR 0x00019999 #define PM121_SYS_HISTORY_SIZE 2 #define PM121_SYS_INTERVAL 5 /* State data used by the system fans control loop */ struct pm121_sys_state { int ticks; s32 setpoint; struct wf_pid_state pid; }; struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {}; /* * ****** CPU Fans Control Loop ****** * */ #define PM121_CPU_INTERVAL 1 /* State data used by the cpu fans control loop */ struct pm121_cpu_state { int ticks; s32 setpoint; struct wf_cpu_pid_state pid; }; static struct pm121_cpu_state *pm121_cpu_state; /* * ***** Implementation ***** * */ /* correction the value using the output-low-bound correction algo */ static s32 pm121_correct(s32 new_setpoint, unsigned int control_id, s32 min) { s32 new_min; struct pm121_correction *correction; correction = &corrections[control_id][pm121_mach_model - 2]; new_min = (average_power * correction->slope) >> 16; new_min += correction->offset; new_min = (new_min >> 16) + min; return max3(new_setpoint, new_min, 0); } static s32 pm121_connect(unsigned int control_id, s32 setpoint) { s32 new_min, value, new_setpoint; if (pm121_connection->control_id == control_id) { controls[control_id]->ops->get_value(controls[control_id], &value); new_min = value * pm121_connection->correction.slope; new_min += pm121_connection->correction.offset; if (new_min > 0) { new_setpoint = max(setpoint, (new_min >> 16)); if (new_setpoint != setpoint) { pr_debug("pm121: %s depending on %s, " "corrected from %d to %d RPM\n", controls[control_id]->name, controls[pm121_connection->ref_id]->name, (int) setpoint, (int) new_setpoint); } } else new_setpoint = setpoint; } /* no connection */ else new_setpoint = setpoint; return new_setpoint; } /* FAN LOOPS */ static void pm121_create_sys_fans(int loop_id) { struct pm121_sys_param *param = NULL; struct wf_pid_param pid_param; struct wf_control *control = NULL; int i; /* First, locate the params for this model */ for (i = 0; i < PM121_NUM_CONFIGS; i++) { if (pm121_sys_all_params[loop_id][i].model_id == pm121_mach_model) { param = &(pm121_sys_all_params[loop_id][i]); break; } } /* No params found, put fans to max */ if (param == NULL) { printk(KERN_WARNING "pm121: %s fan config not found " " for this machine model\n", loop_names[loop_id]); goto fail; } control = controls[param->control_id]; /* Alloc & initialize state */ pm121_sys_state[loop_id] = kmalloc(sizeof(struct pm121_sys_state), GFP_KERNEL); if (pm121_sys_state[loop_id] == NULL) { printk(KERN_WARNING "pm121: Memory allocation error\n"); goto fail; } pm121_sys_state[loop_id]->ticks = 1; /* Fill PID params */ pid_param.gd = PM121_SYS_GD; pid_param.gp = param->gp; pid_param.gr = PM121_SYS_GR; pid_param.interval = PM121_SYS_INTERVAL; pid_param.history_len = PM121_SYS_HISTORY_SIZE; pid_param.itarget = param->itarget; pid_param.min = control->ops->get_min(control); pid_param.max = control->ops->get_max(control); wf_pid_init(&pm121_sys_state[loop_id]->pid, &pid_param); pr_debug("pm121: %s Fan control loop initialized.\n" " itarged=%d.%03d, min=%d RPM, max=%d RPM\n", loop_names[loop_id], FIX32TOPRINT(pid_param.itarget), pid_param.min, pid_param.max); return; fail: /* note that this is not optimal since another loop may still control the same control */ printk(KERN_WARNING "pm121: failed to set up %s loop " "setting \"%s\" to max speed.\n", loop_names[loop_id], control->name); if (control) wf_control_set_max(control); } static void pm121_sys_fans_tick(int loop_id) { struct pm121_sys_param *param; struct pm121_sys_state *st; struct wf_sensor *sensor; struct wf_control *control; s32 temp, new_setpoint; int rc; param = &(pm121_sys_all_params[loop_id][pm121_mach_model-2]); st = pm121_sys_state[loop_id]; sensor = *(param->sensor); control = controls[param->control_id]; if (--st->ticks != 0) { if (pm121_readjust) goto readjust; return; } st->ticks = PM121_SYS_INTERVAL; rc = sensor->ops->get_value(sensor, &temp); if (rc) { printk(KERN_WARNING "windfarm: %s sensor error %d\n", sensor->name, rc); pm121_failure_state |= FAILURE_SENSOR; return; } pr_debug("pm121: %s Fan tick ! %s: %d.%03d\n", loop_names[loop_id], sensor->name, FIX32TOPRINT(temp)); new_setpoint = wf_pid_run(&st->pid, temp); /* correction */ new_setpoint = pm121_correct(new_setpoint, param->control_id, st->pid.param.min); /* linked corretion */ new_setpoint = pm121_connect(param->control_id, new_setpoint); if (new_setpoint == st->setpoint) return; st->setpoint = new_setpoint; pr_debug("pm121: %s corrected setpoint: %d RPM\n", control->name, (int)new_setpoint); readjust: if (control && pm121_failure_state == 0) { rc = control->ops->set_value(control, st->setpoint); if (rc) { printk(KERN_WARNING "windfarm: %s fan error %d\n", control->name, rc); pm121_failure_state |= FAILURE_FAN; } } } /* CPU LOOP */ static void pm121_create_cpu_fans(void) { struct wf_cpu_pid_param pid_param; const struct smu_sdbp_header *hdr; struct smu_sdbp_cpupiddata *piddata; struct smu_sdbp_fvt *fvt; struct wf_control *fan_cpu; s32 tmax, tdelta, maxpow, powadj; fan_cpu = controls[FAN_CPU]; /* First, locate the PID params in SMU SBD */ hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL); if (hdr == 0) { printk(KERN_WARNING "pm121: CPU PID fan config not found.\n"); goto fail; } piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; /* Get the FVT params for operating point 0 (the only supported one * for now) in order to get tmax */ hdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); if (hdr) { fvt = (struct smu_sdbp_fvt *)&hdr[1]; tmax = ((s32)fvt->maxtemp) << 16; } else tmax = 0x5e0000; /* 94 degree default */ /* Alloc & initialize state */ pm121_cpu_state = kmalloc(sizeof(struct pm121_cpu_state), GFP_KERNEL); if (pm121_cpu_state == NULL) goto fail; pm121_cpu_state->ticks = 1; /* Fill PID params */ pid_param.interval = PM121_CPU_INTERVAL; pid_param.history_len = piddata->history_len; if (pid_param.history_len > WF_CPU_PID_MAX_HISTORY) { printk(KERN_WARNING "pm121: History size overflow on " "CPU control loop (%d)\n", piddata->history_len); pid_param.history_len = WF_CPU_PID_MAX_HISTORY; } pid_param.gd = piddata->gd; pid_param.gp = piddata->gp; pid_param.gr = piddata->gr / pid_param.history_len; tdelta = ((s32)piddata->target_temp_delta) << 16; maxpow = ((s32)piddata->max_power) << 16; powadj = ((s32)piddata->power_adj) << 16; pid_param.tmax = tmax; pid_param.ttarget = tmax - tdelta; pid_param.pmaxadj = maxpow - powadj; pid_param.min = fan_cpu->ops->get_min(fan_cpu); pid_param.max = fan_cpu->ops->get_max(fan_cpu); wf_cpu_pid_init(&pm121_cpu_state->pid, &pid_param); pr_debug("pm121: CPU Fan control initialized.\n"); pr_debug(" ttarged=%d.%03d, tmax=%d.%03d, min=%d RPM, max=%d RPM,\n", FIX32TOPRINT(pid_param.ttarget), FIX32TOPRINT(pid_param.tmax), pid_param.min, pid_param.max); return; fail: printk(KERN_WARNING "pm121: CPU fan config not found, max fan speed\n"); if (controls[CPUFREQ]) wf_control_set_max(controls[CPUFREQ]); if (fan_cpu) wf_control_set_max(fan_cpu); } static void pm121_cpu_fans_tick(struct pm121_cpu_state *st) { s32 new_setpoint, temp, power; struct wf_control *fan_cpu = NULL; int rc; if (--st->ticks != 0) { if (pm121_readjust) goto readjust; return; } st->ticks = PM121_CPU_INTERVAL; fan_cpu = controls[FAN_CPU]; rc = sensor_cpu_temp->ops->get_value(sensor_cpu_temp, &temp); if (rc) { printk(KERN_WARNING "pm121: CPU temp sensor error %d\n", rc); pm121_failure_state |= FAILURE_SENSOR; return; } rc = sensor_cpu_power->ops->get_value(sensor_cpu_power, &power); if (rc) { printk(KERN_WARNING "pm121: CPU power sensor error %d\n", rc); pm121_failure_state |= FAILURE_SENSOR; return; } pr_debug("pm121: CPU Fans tick ! CPU temp: %d.%03d°C, power: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(power)); if (temp > st->pid.param.tmax) pm121_failure_state |= FAILURE_OVERTEMP; new_setpoint = wf_cpu_pid_run(&st->pid, power, temp); /* correction */ new_setpoint = pm121_correct(new_setpoint, FAN_CPU, st->pid.param.min); /* connected correction */ new_setpoint = pm121_connect(FAN_CPU, new_setpoint); if (st->setpoint == new_setpoint) return; st->setpoint = new_setpoint; pr_debug("pm121: CPU corrected setpoint: %d RPM\n", (int)new_setpoint); readjust: if (fan_cpu && pm121_failure_state == 0) { rc = fan_cpu->ops->set_value(fan_cpu, st->setpoint); if (rc) { printk(KERN_WARNING "pm121: %s fan error %d\n", fan_cpu->name, rc); pm121_failure_state |= FAILURE_FAN; } } } /* * ****** Common ****** * */ static void pm121_tick(void) { unsigned int last_failure = pm121_failure_state; unsigned int new_failure; s32 total_power; int i; if (!pm121_started) { pr_debug("pm121: creating control loops !\n"); for (i = 0; i < N_LOOPS; i++) pm121_create_sys_fans(i); pm121_create_cpu_fans(); pm121_started = 1; } /* skipping ticks */ if (pm121_skipping && --pm121_skipping) return; /* compute average power */ total_power = 0; for (i = 0; i < pm121_cpu_state->pid.param.history_len; i++) total_power += pm121_cpu_state->pid.powers[i]; average_power = total_power / pm121_cpu_state->pid.param.history_len; pm121_failure_state = 0; for (i = 0 ; i < N_LOOPS; i++) { if (pm121_sys_state[i]) pm121_sys_fans_tick(i); } if (pm121_cpu_state) pm121_cpu_fans_tick(pm121_cpu_state); pm121_readjust = 0; new_failure = pm121_failure_state & ~last_failure; /* If entering failure mode, clamp cpufreq and ramp all * fans to full speed. */ if (pm121_failure_state && !last_failure) { for (i = 0; i < N_CONTROLS; i++) { if (controls[i]) wf_control_set_max(controls[i]); } } /* If leaving failure mode, unclamp cpufreq and readjust * all fans on next iteration */ if (!pm121_failure_state && last_failure) { if (controls[CPUFREQ]) wf_control_set_min(controls[CPUFREQ]); pm121_readjust = 1; } /* Overtemp condition detected, notify and start skipping a couple * ticks to let the temperature go down */ if (new_failure & FAILURE_OVERTEMP) { wf_set_overtemp(); pm121_skipping = 2; } /* We only clear the overtemp condition if overtemp is cleared * _and_ no other failure is present. Since a sensor error will * clear the overtemp condition (can't measure temperature) at * the control loop levels, but we don't want to keep it clear * here in this case */ if (new_failure == 0 && last_failure & FAILURE_OVERTEMP) wf_clear_overtemp(); } static struct wf_control* pm121_register_control(struct wf_control *ct, const char *match, unsigned int id) { if (controls[id] == NULL && !strcmp(ct->name, match)) { if (wf_get_control(ct) == 0) controls[id] = ct; } return controls[id]; } static void pm121_new_control(struct wf_control *ct) { int all = 1; if (pm121_all_controls_ok) return; all = pm121_register_control(ct, "optical-drive-fan", FAN_OD) && all; all = pm121_register_control(ct, "hard-drive-fan", FAN_HD) && all; all = pm121_register_control(ct, "cpu-fan", FAN_CPU) && all; all = pm121_register_control(ct, "cpufreq-clamp", CPUFREQ) && all; if (all) pm121_all_controls_ok = 1; } static struct wf_sensor* pm121_register_sensor(struct wf_sensor *sensor, const char *match, struct wf_sensor **var) { if (*var == NULL && !strcmp(sensor->name, match)) { if (wf_get_sensor(sensor) == 0) *var = sensor; } return *var; } static void pm121_new_sensor(struct wf_sensor *sr) { int all = 1; if (pm121_all_sensors_ok) return; all = pm121_register_sensor(sr, "cpu-temp", &sensor_cpu_temp) && all; all = pm121_register_sensor(sr, "cpu-current", &sensor_cpu_current) && all; all = pm121_register_sensor(sr, "cpu-voltage", &sensor_cpu_voltage) && all; all = pm121_register_sensor(sr, "cpu-power", &sensor_cpu_power) && all; all = pm121_register_sensor(sr, "hard-drive-temp", &sensor_hard_drive_temp) && all; all = pm121_register_sensor(sr, "optical-drive-temp", &sensor_optical_drive_temp) && all; all = pm121_register_sensor(sr, "incoming-air-temp", &sensor_incoming_air_temp) && all; all = pm121_register_sensor(sr, "north-bridge-temp", &sensor_north_bridge_temp) && all; all = pm121_register_sensor(sr, "gpu-temp", &sensor_gpu_temp) && all; if (all) pm121_all_sensors_ok = 1; } static int pm121_notify(struct notifier_block *self, unsigned long event, void *data) { switch (event) { case WF_EVENT_NEW_CONTROL: pr_debug("pm121: new control %s detected\n", ((struct wf_control *)data)->name); pm121_new_control(data); break; case WF_EVENT_NEW_SENSOR: pr_debug("pm121: new sensor %s detected\n", ((struct wf_sensor *)data)->name); pm121_new_sensor(data); break; case WF_EVENT_TICK: if (pm121_all_controls_ok && pm121_all_sensors_ok) pm121_tick(); break; } return 0; } static struct notifier_block pm121_events = { .notifier_call = pm121_notify, }; static int pm121_init_pm(void) { const struct smu_sdbp_header *hdr; hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL); if (hdr != 0) { struct smu_sdbp_sensortree *st = (struct smu_sdbp_sensortree *)&hdr[1]; pm121_mach_model = st->model_id; } pm121_connection = &pm121_connections[pm121_mach_model - 2]; printk(KERN_INFO "pm121: Initializing for iMac G5 iSight model ID %d\n", pm121_mach_model); return 0; } static int pm121_probe(struct platform_device *ddev) { wf_register_client(&pm121_events); return 0; } static int __devexit pm121_remove(struct platform_device *ddev) { wf_unregister_client(&pm121_events); return 0; } static struct platform_driver pm121_driver = { .probe = pm121_probe, .remove = __devexit_p(pm121_remove), .driver = { .name = "windfarm", .bus = &platform_bus_type, }, }; static int __init pm121_init(void) { int rc = -ENODEV; if (of_machine_is_compatible("PowerMac12,1")) rc = pm121_init_pm(); if (rc == 0) { request_module("windfarm_smu_controls"); request_module("windfarm_smu_sensors"); request_module("windfarm_smu_sat"); request_module("windfarm_lm75_sensor"); request_module("windfarm_max6690_sensor"); request_module("windfarm_cpufreq_clamp"); platform_driver_register(&pm121_driver); } return rc; } static void __exit pm121_exit(void) { platform_driver_unregister(&pm121_driver); } module_init(pm121_init); module_exit(pm121_exit); MODULE_AUTHOR("Étienne Bersac <bersace@gmail.com>"); MODULE_DESCRIPTION("Thermal control logic for iMac G5 (iSight)"); MODULE_LICENSE("GPL");
gpl-2.0
davidmueller13/android_kernel_samsung_lt03lte
drivers/ssb/driver_mipscore.c
4980
7147
/* * Sonics Silicon Backplane * Broadcom MIPS core driver * * Copyright 2005, Broadcom Corporation * Copyright 2006, 2007, Michael Buesch <m@bues.ch> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/ssb/ssb.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/time.h> #include "ssb_private.h" static inline u32 mips_read32(struct ssb_mipscore *mcore, u16 offset) { return ssb_read32(mcore->dev, offset); } static inline void mips_write32(struct ssb_mipscore *mcore, u16 offset, u32 value) { ssb_write32(mcore->dev, offset, value); } static const u32 ipsflag_irq_mask[] = { 0, SSB_IPSFLAG_IRQ1, SSB_IPSFLAG_IRQ2, SSB_IPSFLAG_IRQ3, SSB_IPSFLAG_IRQ4, }; static const u32 ipsflag_irq_shift[] = { 0, SSB_IPSFLAG_IRQ1_SHIFT, SSB_IPSFLAG_IRQ2_SHIFT, SSB_IPSFLAG_IRQ3_SHIFT, SSB_IPSFLAG_IRQ4_SHIFT, }; static inline u32 ssb_irqflag(struct ssb_device *dev) { u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG); if (tpsflag) return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; else /* not irq supported */ return 0x3f; } static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag) { struct ssb_bus *bus = rdev->bus; int i; for (i = 0; i < bus->nr_devices; i++) { struct ssb_device *dev; dev = &(bus->devices[i]); if (ssb_irqflag(dev) == irqflag) return dev; } return NULL; } /* Get the MIPS IRQ assignment for a specified device. * If unassigned, 0 is returned. * If disabled, 5 is returned. * If not supported, 6 is returned. */ unsigned int ssb_mips_irq(struct ssb_device *dev) { struct ssb_bus *bus = dev->bus; struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag; u32 ipsflag; u32 tmp; unsigned int irq; irqflag = ssb_irqflag(dev); if (irqflag == 0x3f) return 6; ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG); for (irq = 1; irq <= 4; irq++) { tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]); if (tmp == irqflag) break; } if (irq == 5) { if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)) irq = 0; } return irq; } static void clear_irq(struct ssb_bus *bus, unsigned int irq) { struct ssb_device *dev = bus->mipscore.dev; /* Clear the IRQ in the MIPScore backplane registers */ if (irq == 0) { ssb_write32(dev, SSB_INTVEC, 0); } else { ssb_write32(dev, SSB_IPSFLAG, ssb_read32(dev, SSB_IPSFLAG) | ipsflag_irq_mask[irq]); } } static void set_irq(struct ssb_device *dev, unsigned int irq) { unsigned int oldirq = ssb_mips_irq(dev); struct ssb_bus *bus = dev->bus; struct ssb_device *mdev = bus->mipscore.dev; u32 irqflag = ssb_irqflag(dev); BUG_ON(oldirq == 6); dev->irq = irq + 2; /* clear the old irq */ if (oldirq == 0) ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); else if (oldirq != 5) clear_irq(bus, oldirq); /* assign the new one */ if (irq == 0) { ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC))); } else { u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG); if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) { u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]; struct ssb_device *olddev = find_device(dev, oldipsflag); if (olddev) set_irq(olddev, 0); } irqflag <<= ipsflag_irq_shift[irq]; irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); ssb_write32(mdev, SSB_IPSFLAG, irqflag); } ssb_dprintk(KERN_INFO PFX "set_irq: core 0x%04x, irq %d => %d\n", dev->id.coreid, oldirq+2, irq+2); } static void print_irq(struct ssb_device *dev, unsigned int irq) { int i; static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; ssb_dprintk(KERN_INFO PFX "core 0x%04x, irq :", dev->id.coreid); for (i = 0; i <= 6; i++) { ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" "); } ssb_dprintk("\n"); } static void dump_irq(struct ssb_bus *bus) { int i; for (i = 0; i < bus->nr_devices; i++) { struct ssb_device *dev; dev = &(bus->devices[i]); print_irq(dev, ssb_mips_irq(dev)); } } static void ssb_mips_serial_init(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; if (bus->extif.dev) mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports); else if (bus->chipco.dev) mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports); else mcore->nr_serial_ports = 0; } static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; mcore->flash_buswidth = 2; if (bus->chipco.dev) { mcore->flash_window = 0x1c000000; mcore->flash_window_size = 0x02000000; if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) & SSB_CHIPCO_CFG_DS16) == 0) mcore->flash_buswidth = 1; } else { mcore->flash_window = 0x1fc00000; mcore->flash_window_size = 0x00400000; } } u32 ssb_cpu_clock(struct ssb_mipscore *mcore) { struct ssb_bus *bus = mcore->dev->bus; u32 pll_type, n, m, rate = 0; if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU) return ssb_pmu_get_cpu_clock(&bus->chipco); if (bus->extif.dev) { ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m); } else if (bus->chipco.dev) { ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m); } else return 0; if ((pll_type == SSB_PLLTYPE_5) || (bus->chip_id == 0x5365)) { rate = 200000000; } else { rate = ssb_calc_clock_rate(pll_type, n, m); } if (pll_type == SSB_PLLTYPE_6) { rate *= 2; } return rate; } void ssb_mipscore_init(struct ssb_mipscore *mcore) { struct ssb_bus *bus; struct ssb_device *dev; unsigned long hz, ns; unsigned int irq, i; if (!mcore->dev) return; /* We don't have a MIPS core */ ssb_dprintk(KERN_INFO PFX "Initializing MIPS core...\n"); bus = mcore->dev->bus; hz = ssb_clockspeed(bus); if (!hz) hz = 100000000; ns = 1000000000 / hz; if (bus->extif.dev) ssb_extif_timing_init(&bus->extif, ns); else if (bus->chipco.dev) ssb_chipco_timing_init(&bus->chipco, ns); /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ for (irq = 2, i = 0; i < bus->nr_devices; i++) { int mips_irq; dev = &(bus->devices[i]); mips_irq = ssb_mips_irq(dev); if (mips_irq > 4) dev->irq = 0; else dev->irq = mips_irq + 2; if (dev->irq > 5) continue; switch (dev->id.coreid) { case SSB_DEV_USB11_HOST: /* shouldn't need a separate irq line for non-4710, most of them have a proper * external usb controller on the pci */ if ((bus->chip_id == 0x4710) && (irq <= 4)) { set_irq(dev, irq++); } break; case SSB_DEV_PCI: case SSB_DEV_ETHERNET: case SSB_DEV_ETHERNET_GBIT: case SSB_DEV_80211: case SSB_DEV_USB20_HOST: /* These devices get their own IRQ line if available, the rest goes on IRQ0 */ if (irq <= 4) { set_irq(dev, irq++); break; } /* fallthrough */ case SSB_DEV_EXTIF: set_irq(dev, 0); break; } } ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n"); dump_irq(bus); ssb_mips_serial_init(mcore); ssb_mips_flash_detect(mcore); }
gpl-2.0
angpysha/KitKatExtendedKernel
arch/powerpc/perf/power6-pmu.c
7284
15833
/* * Performance counter support for POWER6 processors. * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Bits in event code for POWER6 */ #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0x7 #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */ #define PM_UNIT_MSK 0xf #define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH) #define PM_LLAV 0x8000 /* Load lookahead match value */ #define PM_LLA 0x4000 /* Load lookahead match enable */ #define PM_BYTE_SH 12 /* Byte of event bus to use */ #define PM_BYTE_MSK 3 #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */ #define PM_SUBUNIT_MSK 7 #define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH) #define PM_PMCSEL_MSK 0xff /* PMCxSEL value */ #define PM_BUSEVENT_MSK 0xf3700 /* * Bits in MMCR1 for POWER6 */ #define MMCR1_TTM0SEL_SH 60 #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) #define MMCR1_TTMSEL_MSK 0xf #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) #define MMCR1_NESTSEL_SH 45 #define MMCR1_NESTSEL_MSK 0x7 #define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) #define MMCR1_PMC1_LLA (1ul << 44) #define MMCR1_PMC1_LLA_VALUE (1ul << 39) #define MMCR1_PMC1_ADDR_SEL (1ul << 35) #define MMCR1_PMC1SEL_SH 24 #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff /* * Map of which direct events on which PMCs are marked instruction events. * Indexed by PMCSEL value >> 1. * Bottom 4 bits are a map of which PMCs are interesting, * top 4 bits say what sort of event: * 0 = direct marked event, * 1 = byte decode event, * 4 = add/and event (PMC1 -> bits 0 & 4), * 5 = add/and event (PMC1 -> bits 1 & 5), * 6 = add/and event (PMC1 -> bits 2 & 6), * 7 = add/and event (PMC1 -> bits 3 & 7). */ static unsigned char direct_event_is_marked[0x60 >> 1] = { 0, /* 00 */ 0, /* 02 */ 0, /* 04 */ 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ 0x04, /* 08 PM_MRK_DFU_FIN */ 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */ 0, /* 0c */ 0, /* 0e */ 0x02, /* 10 PM_MRK_INST_DISP */ 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */ 0, /* 14 */ 0, /* 16 */ 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */ 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */ 0x01, /* 1c PM_MRK_INST_ISSUED */ 0, /* 1e */ 0, /* 20 */ 0, /* 22 */ 0, /* 24 */ 0, /* 26 */ 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */ 0, /* 2a */ 0, /* 2c */ 0, /* 2e */ 0x4f, /* 30 */ 0x7f, /* 32 */ 0x4f, /* 34 */ 0x5f, /* 36 */ 0x6f, /* 38 */ 0x4f, /* 3a */ 0, /* 3c */ 0x08, /* 3e PM_MRK_INST_TIMEO */ 0x1f, /* 40 */ 0x1f, /* 42 */ 0x1f, /* 44 */ 0x1f, /* 46 */ 0x1f, /* 48 */ 0x1f, /* 4a */ 0x1f, /* 4c */ 0x1f, /* 4e */ 0, /* 50 */ 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */ 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */ 0x02, /* 56 PM_MRK_LD_MISS_L1 */ 0, /* 58 */ 0, /* 5a */ 0, /* 5c */ 0, /* 5e */ }; /* * Masks showing for each unit which bits are marked events. * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0. */ static u32 marked_bus_events[16] = { 0x01000000, /* direct events set 1: byte 3 bit 0 */ 0x00010000, /* direct events set 2: byte 2 bit 0 */ 0, 0, 0, 0, /* IDU, IFU, nest: nothing */ 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */ 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */ 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */ 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */ 0, /* LSU set 3 */ 0x00000010, /* VMX set 3: byte 0 bit 4 */ 0, /* BFP set 1 */ 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */ 0, 0 }; /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int power6_marked_instr_event(u64 event) { int pmc, psel, ptype; int bit, byte, unit; u32 mask; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ if (pmc >= 5) return 0; bit = -1; if (psel < sizeof(direct_event_is_marked)) { ptype = direct_event_is_marked[psel]; if (pmc == 0 || !(ptype & (1 << (pmc - 1)))) return 0; ptype >>= 4; if (ptype == 0) return 1; if (ptype == 1) bit = 0; else bit = ptype ^ (pmc - 1); } else if ((psel & 0x48) == 0x40) bit = psel & 7; if (!(event & PM_BUSEVENT_MSK) || bit == -1) return 0; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; mask = marked_bus_events[unit]; return (mask >> (byte * 8 + bit)) & 1; } /* * Assign PMC numbers and compute MMCR1 value for a set of events */ static int p6_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; int i; unsigned int pmc, ev, b, u, s, psel; unsigned int ttmset = 0; unsigned int pmc_inuse = 0; if (n_ev > 6) return -1; for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc_inuse & (1 << (pmc - 1))) return -1; /* collision! */ pmc_inuse |= 1 << (pmc - 1); } } for (i = 0; i < n_ev; ++i) { ev = event[i]; pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { --pmc; } else { /* can go on any PMC; find a free one */ for (pmc = 0; pmc < 4; ++pmc) if (!(pmc_inuse & (1 << pmc))) break; if (pmc >= 4) return -1; pmc_inuse |= 1 << pmc; } hwc[i] = pmc; psel = ev & PM_PMCSEL_MSK; if (ev & PM_BUSEVENT_MSK) { /* this event uses the event bus */ b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK; u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK; /* check for conflict on this byte of event bus */ if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) return -1; mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b); ttmset |= 1 << b; if (u == 5) { /* Nest events have a further mux */ s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; if ((ttmset & 0x10) && MMCR1_NESTSEL(mmcr1) != s) return -1; ttmset |= 0x10; mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH; } if (0x30 <= psel && psel <= 0x3d) { /* these need the PMCx_ADDR_SEL bits */ if (b >= 2) mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc; } /* bus select values are different for PMC3/4 */ if (pmc >= 2 && (psel & 0x90) == 0x80) psel ^= 0x20; } if (ev & PM_LLA) { mmcr1 |= MMCR1_PMC1_LLA >> pmc; if (ev & PM_LLAV) mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; } if (power6_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; if (pmc < 4) mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc); } mmcr[0] = 0; if (pmc_inuse & 1) mmcr[0] = MMCR0_PMC1CE; if (pmc_inuse & 0xe) mmcr[0] |= MMCR0_PMCjCE; mmcr[1] = mmcr1; mmcr[2] = mmcra; return 0; } /* * Layout of constraint bits: * * 0-1 add field: number of uses of PMC1 (max 1) * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6 * 12-15 add field: number of uses of PMC1-4 (max 4) * 16-19 select field: unit on byte 0 of event bus * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 * 32-34 select field: nest (subunit) event selector */ static int p6_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { int pmc, byte, sh, subunit; unsigned long mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; } if (event & PM_BUSEVENT_MSK) { byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; sh = byte * 4 + (16 - PM_UNIT_SH); mask |= PM_UNIT_MSKS << sh; value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; mask |= (unsigned long)PM_SUBUNIT_MSK << 32; value |= (unsigned long)subunit << 32; } } if (pmc <= 4) { mask |= 0x8000; /* add field for count of PMC1-4 uses */ value |= 0x1000; } *maskp = mask; *valp = value; return 0; } static int p6_limited_pmc_event(u64 event) { int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; return pmc == 5 || pmc == 6; } #define MAX_ALT 4 /* at most 4 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */ { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ { 0x10000e, 0x400010 }, /* PM_PURR */ { 0x100010, 0x4000f8 }, /* PM_FLUSH */ { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */ { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */ { 0x100054, 0x2000f0 }, /* PM_ST_FIN */ { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */ { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */ { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */ { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */ { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */ { 0x200012, 0x300012 }, /* PM_INST_DISP */ { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */ { 0x2000f8, 0x300010 }, /* PM_EXT_INT */ { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */ { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */ { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */ { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */ { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */ }; /* * This could be made more efficient with a binary search on * a presorted list, if necessary */ static int find_alternatives_list(u64 event) { int i, j; unsigned int alt; for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { if (event < event_alternatives[i][0]) return -1; for (j = 0; j < MAX_ALT; ++j) { alt = event_alternatives[i][j]; if (!alt || event < alt) break; if (event == alt) return i; } } return -1; } static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nlim; unsigned int psel, pmc; unsigned int nalt = 1; u64 aevent; alt[0] = event; nlim = p6_limited_pmc_event(event); /* check the alternatives table */ i = find_alternatives_list(event); if (i >= 0) { /* copy out alternatives from list */ for (j = 0; j < MAX_ALT; ++j) { aevent = event_alternatives[i][j]; if (!aevent) break; if (aevent != event) alt[nalt++] = aevent; nlim += p6_limited_pmc_event(aevent); } } else { /* Check for alternative ways of computing sum events */ /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */ psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc && (psel == 0x32 || psel == 0x34)) alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | ((5 - pmc) << PM_PMC_SH); /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */ if (pmc && (psel == 0x38 || psel == 0x3a)) alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); } if (flags & PPMU_ONLY_COUNT_RUN) { /* * We're only counting in RUN state, * so PM_CYC is equivalent to PM_RUN_CYC, * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR. * This doesn't include alternatives that don't provide * any extra flexibility in assigning PMCs (e.g. * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC). * Note that even with these additional alternatives * we never end up with more than 4 alternatives for any event. */ j = nalt; for (i = 0; i < nalt; ++i) { switch (alt[i]) { case 0x1e: /* PM_CYC */ alt[j++] = 0x600005; /* PM_RUN_CYC */ ++nlim; break; case 0x10000a: /* PM_RUN_CYC */ alt[j++] = 0x1e; /* PM_CYC */ break; case 2: /* PM_INST_CMPL */ alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ ++nlim; break; case 0x500009: /* PM_RUN_INST_CMPL */ alt[j++] = 2; /* PM_INST_CMPL */ break; case 0x10000e: /* PM_PURR */ alt[j++] = 0x4000f4; /* PM_RUN_PURR */ break; case 0x4000f4: /* PM_RUN_PURR */ alt[j++] = 0x10000e; /* PM_PURR */ break; } } nalt = j; } if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { /* remove the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (!p6_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { /* remove all but the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (p6_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } return nalt; } static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[]) { /* Set PMCxSEL to 0 to disable PMCx */ if (pmc <= 3) mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power6_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, [PERF_COUNT_HW_INSTRUCTIONS] = 2, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. * The "DTLB" and "ITLB" events relate to the DERAT and IERAT. */ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x280030, 0x80080 }, [C(OP_WRITE)] = { 0x180032, 0x80088 }, [C(OP_PREFETCH)] = { 0x810a4, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x100056 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0x4008c, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x150730, 0x250532 }, [C(OP_WRITE)] = { 0x250432, 0x150432 }, [C(OP_PREFETCH)] = { 0x810a6, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x20000e }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x420ce }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x430e6, 0x400052 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power6_pmu = { .name = "POWER6", .n_counter = 6, .max_alternatives = MAX_ALT, .add_fields = 0x1555, .test_adder = 0x3000, .compute_mmcr = p6_compute_mmcr, .get_constraint = p6_get_constraint, .get_alternatives = p6_get_alternatives, .disable_pmc = p6_disable_pmc, .limited_pmc_event = p6_limited_pmc_event, .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, .n_generic = ARRAY_SIZE(power6_generic_events), .generic_events = power6_generic_events, .cache_events = &power6_cache_events, }; static int __init init_power6_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) return -ENODEV; return register_power_pmu(&power6_pmu); } early_initcall(init_power6_pmu);
gpl-2.0
tony0924/itri
arch/powerpc/perf/power6-pmu.c
7284
15833
/* * Performance counter support for POWER6 processors. * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Bits in event code for POWER6 */ #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0x7 #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */ #define PM_UNIT_MSK 0xf #define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH) #define PM_LLAV 0x8000 /* Load lookahead match value */ #define PM_LLA 0x4000 /* Load lookahead match enable */ #define PM_BYTE_SH 12 /* Byte of event bus to use */ #define PM_BYTE_MSK 3 #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */ #define PM_SUBUNIT_MSK 7 #define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH) #define PM_PMCSEL_MSK 0xff /* PMCxSEL value */ #define PM_BUSEVENT_MSK 0xf3700 /* * Bits in MMCR1 for POWER6 */ #define MMCR1_TTM0SEL_SH 60 #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) #define MMCR1_TTMSEL_MSK 0xf #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) #define MMCR1_NESTSEL_SH 45 #define MMCR1_NESTSEL_MSK 0x7 #define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) #define MMCR1_PMC1_LLA (1ul << 44) #define MMCR1_PMC1_LLA_VALUE (1ul << 39) #define MMCR1_PMC1_ADDR_SEL (1ul << 35) #define MMCR1_PMC1SEL_SH 24 #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff /* * Map of which direct events on which PMCs are marked instruction events. * Indexed by PMCSEL value >> 1. * Bottom 4 bits are a map of which PMCs are interesting, * top 4 bits say what sort of event: * 0 = direct marked event, * 1 = byte decode event, * 4 = add/and event (PMC1 -> bits 0 & 4), * 5 = add/and event (PMC1 -> bits 1 & 5), * 6 = add/and event (PMC1 -> bits 2 & 6), * 7 = add/and event (PMC1 -> bits 3 & 7). */ static unsigned char direct_event_is_marked[0x60 >> 1] = { 0, /* 00 */ 0, /* 02 */ 0, /* 04 */ 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ 0x04, /* 08 PM_MRK_DFU_FIN */ 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */ 0, /* 0c */ 0, /* 0e */ 0x02, /* 10 PM_MRK_INST_DISP */ 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */ 0, /* 14 */ 0, /* 16 */ 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */ 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */ 0x01, /* 1c PM_MRK_INST_ISSUED */ 0, /* 1e */ 0, /* 20 */ 0, /* 22 */ 0, /* 24 */ 0, /* 26 */ 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */ 0, /* 2a */ 0, /* 2c */ 0, /* 2e */ 0x4f, /* 30 */ 0x7f, /* 32 */ 0x4f, /* 34 */ 0x5f, /* 36 */ 0x6f, /* 38 */ 0x4f, /* 3a */ 0, /* 3c */ 0x08, /* 3e PM_MRK_INST_TIMEO */ 0x1f, /* 40 */ 0x1f, /* 42 */ 0x1f, /* 44 */ 0x1f, /* 46 */ 0x1f, /* 48 */ 0x1f, /* 4a */ 0x1f, /* 4c */ 0x1f, /* 4e */ 0, /* 50 */ 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */ 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */ 0x02, /* 56 PM_MRK_LD_MISS_L1 */ 0, /* 58 */ 0, /* 5a */ 0, /* 5c */ 0, /* 5e */ }; /* * Masks showing for each unit which bits are marked events. * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0. */ static u32 marked_bus_events[16] = { 0x01000000, /* direct events set 1: byte 3 bit 0 */ 0x00010000, /* direct events set 2: byte 2 bit 0 */ 0, 0, 0, 0, /* IDU, IFU, nest: nothing */ 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */ 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */ 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */ 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */ 0, /* LSU set 3 */ 0x00000010, /* VMX set 3: byte 0 bit 4 */ 0, /* BFP set 1 */ 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */ 0, 0 }; /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int power6_marked_instr_event(u64 event) { int pmc, psel, ptype; int bit, byte, unit; u32 mask; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ if (pmc >= 5) return 0; bit = -1; if (psel < sizeof(direct_event_is_marked)) { ptype = direct_event_is_marked[psel]; if (pmc == 0 || !(ptype & (1 << (pmc - 1)))) return 0; ptype >>= 4; if (ptype == 0) return 1; if (ptype == 1) bit = 0; else bit = ptype ^ (pmc - 1); } else if ((psel & 0x48) == 0x40) bit = psel & 7; if (!(event & PM_BUSEVENT_MSK) || bit == -1) return 0; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; mask = marked_bus_events[unit]; return (mask >> (byte * 8 + bit)) & 1; } /* * Assign PMC numbers and compute MMCR1 value for a set of events */ static int p6_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; int i; unsigned int pmc, ev, b, u, s, psel; unsigned int ttmset = 0; unsigned int pmc_inuse = 0; if (n_ev > 6) return -1; for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc_inuse & (1 << (pmc - 1))) return -1; /* collision! */ pmc_inuse |= 1 << (pmc - 1); } } for (i = 0; i < n_ev; ++i) { ev = event[i]; pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { --pmc; } else { /* can go on any PMC; find a free one */ for (pmc = 0; pmc < 4; ++pmc) if (!(pmc_inuse & (1 << pmc))) break; if (pmc >= 4) return -1; pmc_inuse |= 1 << pmc; } hwc[i] = pmc; psel = ev & PM_PMCSEL_MSK; if (ev & PM_BUSEVENT_MSK) { /* this event uses the event bus */ b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK; u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK; /* check for conflict on this byte of event bus */ if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) return -1; mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b); ttmset |= 1 << b; if (u == 5) { /* Nest events have a further mux */ s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; if ((ttmset & 0x10) && MMCR1_NESTSEL(mmcr1) != s) return -1; ttmset |= 0x10; mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH; } if (0x30 <= psel && psel <= 0x3d) { /* these need the PMCx_ADDR_SEL bits */ if (b >= 2) mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc; } /* bus select values are different for PMC3/4 */ if (pmc >= 2 && (psel & 0x90) == 0x80) psel ^= 0x20; } if (ev & PM_LLA) { mmcr1 |= MMCR1_PMC1_LLA >> pmc; if (ev & PM_LLAV) mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; } if (power6_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; if (pmc < 4) mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc); } mmcr[0] = 0; if (pmc_inuse & 1) mmcr[0] = MMCR0_PMC1CE; if (pmc_inuse & 0xe) mmcr[0] |= MMCR0_PMCjCE; mmcr[1] = mmcr1; mmcr[2] = mmcra; return 0; } /* * Layout of constraint bits: * * 0-1 add field: number of uses of PMC1 (max 1) * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6 * 12-15 add field: number of uses of PMC1-4 (max 4) * 16-19 select field: unit on byte 0 of event bus * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 * 32-34 select field: nest (subunit) event selector */ static int p6_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { int pmc, byte, sh, subunit; unsigned long mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; } if (event & PM_BUSEVENT_MSK) { byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; sh = byte * 4 + (16 - PM_UNIT_SH); mask |= PM_UNIT_MSKS << sh; value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; mask |= (unsigned long)PM_SUBUNIT_MSK << 32; value |= (unsigned long)subunit << 32; } } if (pmc <= 4) { mask |= 0x8000; /* add field for count of PMC1-4 uses */ value |= 0x1000; } *maskp = mask; *valp = value; return 0; } static int p6_limited_pmc_event(u64 event) { int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; return pmc == 5 || pmc == 6; } #define MAX_ALT 4 /* at most 4 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */ { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ { 0x10000e, 0x400010 }, /* PM_PURR */ { 0x100010, 0x4000f8 }, /* PM_FLUSH */ { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */ { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */ { 0x100054, 0x2000f0 }, /* PM_ST_FIN */ { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */ { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */ { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */ { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */ { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */ { 0x200012, 0x300012 }, /* PM_INST_DISP */ { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */ { 0x2000f8, 0x300010 }, /* PM_EXT_INT */ { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */ { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */ { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */ { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */ { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */ }; /* * This could be made more efficient with a binary search on * a presorted list, if necessary */ static int find_alternatives_list(u64 event) { int i, j; unsigned int alt; for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { if (event < event_alternatives[i][0]) return -1; for (j = 0; j < MAX_ALT; ++j) { alt = event_alternatives[i][j]; if (!alt || event < alt) break; if (event == alt) return i; } } return -1; } static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nlim; unsigned int psel, pmc; unsigned int nalt = 1; u64 aevent; alt[0] = event; nlim = p6_limited_pmc_event(event); /* check the alternatives table */ i = find_alternatives_list(event); if (i >= 0) { /* copy out alternatives from list */ for (j = 0; j < MAX_ALT; ++j) { aevent = event_alternatives[i][j]; if (!aevent) break; if (aevent != event) alt[nalt++] = aevent; nlim += p6_limited_pmc_event(aevent); } } else { /* Check for alternative ways of computing sum events */ /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */ psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc && (psel == 0x32 || psel == 0x34)) alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | ((5 - pmc) << PM_PMC_SH); /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */ if (pmc && (psel == 0x38 || psel == 0x3a)) alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); } if (flags & PPMU_ONLY_COUNT_RUN) { /* * We're only counting in RUN state, * so PM_CYC is equivalent to PM_RUN_CYC, * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR. * This doesn't include alternatives that don't provide * any extra flexibility in assigning PMCs (e.g. * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC). * Note that even with these additional alternatives * we never end up with more than 4 alternatives for any event. */ j = nalt; for (i = 0; i < nalt; ++i) { switch (alt[i]) { case 0x1e: /* PM_CYC */ alt[j++] = 0x600005; /* PM_RUN_CYC */ ++nlim; break; case 0x10000a: /* PM_RUN_CYC */ alt[j++] = 0x1e; /* PM_CYC */ break; case 2: /* PM_INST_CMPL */ alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ ++nlim; break; case 0x500009: /* PM_RUN_INST_CMPL */ alt[j++] = 2; /* PM_INST_CMPL */ break; case 0x10000e: /* PM_PURR */ alt[j++] = 0x4000f4; /* PM_RUN_PURR */ break; case 0x4000f4: /* PM_RUN_PURR */ alt[j++] = 0x10000e; /* PM_PURR */ break; } } nalt = j; } if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { /* remove the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (!p6_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { /* remove all but the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (p6_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } return nalt; } static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[]) { /* Set PMCxSEL to 0 to disable PMCx */ if (pmc <= 3) mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power6_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, [PERF_COUNT_HW_INSTRUCTIONS] = 2, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. * The "DTLB" and "ITLB" events relate to the DERAT and IERAT. */ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x280030, 0x80080 }, [C(OP_WRITE)] = { 0x180032, 0x80088 }, [C(OP_PREFETCH)] = { 0x810a4, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x100056 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0x4008c, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x150730, 0x250532 }, [C(OP_WRITE)] = { 0x250432, 0x150432 }, [C(OP_PREFETCH)] = { 0x810a6, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x20000e }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x420ce }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x430e6, 0x400052 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power6_pmu = { .name = "POWER6", .n_counter = 6, .max_alternatives = MAX_ALT, .add_fields = 0x1555, .test_adder = 0x3000, .compute_mmcr = p6_compute_mmcr, .get_constraint = p6_get_constraint, .get_alternatives = p6_get_alternatives, .disable_pmc = p6_disable_pmc, .limited_pmc_event = p6_limited_pmc_event, .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, .n_generic = ARRAY_SIZE(power6_generic_events), .generic_events = power6_generic_events, .cache_events = &power6_cache_events, }; static int __init init_power6_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) return -ENODEV; return register_power_pmu(&power6_pmu); } early_initcall(init_power6_pmu);
gpl-2.0
g7755725/android_kernel_samsung_jf
drivers/net/wireless/iwlegacy/4965-debug.c
7540
30479
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include "common.h" #include "4965.h" static const char *fmt_value = " %-30s %10u\n"; static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; static const char *fmt_header = "%-32s current cumulative delta max\n"; static int il4965_stats_flag(struct il_priv *il, char *buf, int bufsz) { int p = 0; u32 flag; flag = le32_to_cpu(il->_4965.stats.flag); p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); if (flag & UCODE_STATS_CLEAR_MSK) p += scnprintf(buf + p, bufsz - p, "\tStatistics have been cleared\n"); p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz"); p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled"); return p; } ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct stats_rx_phy) * 40 + sizeof(struct stats_rx_non_phy) * 40 + sizeof(struct stats_rx_ht_phy) * 40 + 400; ssize_t ret; struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; struct stats_rx_non_phy *general, *accum_general; struct stats_rx_non_phy *delta_general, *max_general; struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * the statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ ofdm = &il->_4965.stats.rx.ofdm; cck = &il->_4965.stats.rx.cck; general = &il->_4965.stats.rx.general; ht = &il->_4965.stats.rx.ofdm_ht; accum_ofdm = &il->_4965.accum_stats.rx.ofdm; accum_cck = &il->_4965.accum_stats.rx.cck; accum_general = &il->_4965.accum_stats.rx.general; accum_ht = &il->_4965.accum_stats.rx.ofdm_ht; delta_ofdm = &il->_4965.delta_stats.rx.ofdm; delta_cck = &il->_4965.delta_stats.rx.cck; delta_general = &il->_4965.delta_stats.rx.general; delta_ht = &il->_4965.delta_stats.rx.ofdm_ht; max_ofdm = &il->_4965.max_delta.rx.ofdm; max_cck = &il->_4965.max_delta.rx.cck; max_general = &il->_4965.max_delta.rx.general; max_ht = &il->_4965.max_delta.rx.ofdm_ht; pos += il4965_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, delta_ofdm->ina_cnt, max_ofdm->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, delta_ofdm->fina_cnt, max_ofdm->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, delta_ofdm->plcp_err, max_ofdm->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, delta_ofdm->crc32_err, max_ofdm->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, delta_ofdm->overrun_err, max_ofdm->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", le32_to_cpu(ofdm->early_overrun_err), accum_ofdm->early_overrun_err, delta_ofdm->early_overrun_err, max_ofdm->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, delta_ofdm->crc32_good, max_ofdm->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", le32_to_cpu(ofdm->false_alarm_cnt), accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, max_ofdm->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", le32_to_cpu(ofdm->fina_sync_err_cnt), accum_ofdm->fina_sync_err_cnt, delta_ofdm->fina_sync_err_cnt, max_ofdm->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, max_ofdm->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", le32_to_cpu(ofdm->unresponded_rts), accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, max_ofdm->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", le32_to_cpu(ofdm->rxe_frame_limit_overrun), accum_ofdm->rxe_frame_limit_overrun, delta_ofdm->rxe_frame_limit_overrun, max_ofdm->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", le32_to_cpu(ofdm->sent_ba_rsp_cnt), accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt, max_ofdm->sent_ba_rsp_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", le32_to_cpu(ofdm->dsp_self_kill), accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill, max_ofdm->dsp_self_kill); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", le32_to_cpu(ofdm->mh_format_err), accum_ofdm->mh_format_err, delta_ofdm->mh_format_err, max_ofdm->mh_format_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "re_acq_main_rssi_sum:", le32_to_cpu(ofdm->re_acq_main_rssi_sum), accum_ofdm->re_acq_main_rssi_sum, delta_ofdm->re_acq_main_rssi_sum, max_ofdm->re_acq_main_rssi_sum); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, delta_cck->ina_cnt, max_cck->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, delta_cck->fina_cnt, max_cck->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, delta_cck->plcp_err, max_cck->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, delta_cck->crc32_err, max_cck->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, delta_cck->overrun_err, max_cck->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", le32_to_cpu(cck->early_overrun_err), accum_cck->early_overrun_err, delta_cck->early_overrun_err, max_cck->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, delta_cck->crc32_good, max_cck->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", le32_to_cpu(cck->false_alarm_cnt), accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", le32_to_cpu(cck->fina_sync_err_cnt), accum_cck->fina_sync_err_cnt, delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, delta_cck->sfd_timeout, max_cck->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, delta_cck->fina_timeout, max_cck->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", le32_to_cpu(cck->unresponded_rts), accum_cck->unresponded_rts, delta_cck->unresponded_rts, max_cck->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", le32_to_cpu(cck->rxe_frame_limit_overrun), accum_cck->rxe_frame_limit_overrun, delta_cck->rxe_frame_limit_overrun, max_cck->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", le32_to_cpu(cck->sent_ba_rsp_cnt), accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt, max_cck->sent_ba_rsp_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, max_cck->dsp_self_kill); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err, delta_cck->mh_format_err, max_cck->mh_format_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "re_acq_main_rssi_sum:", le32_to_cpu(cck->re_acq_main_rssi_sum), accum_cck->re_acq_main_rssi_sum, delta_cck->re_acq_main_rssi_sum, max_cck->re_acq_main_rssi_sum); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:", le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, delta_general->bogus_cts, max_general->bogus_cts); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:", le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, delta_general->bogus_ack, max_general->bogus_ack); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:", le32_to_cpu(general->non_bssid_frames), accum_general->non_bssid_frames, delta_general->non_bssid_frames, max_general->non_bssid_frames); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:", le32_to_cpu(general->filtered_frames), accum_general->filtered_frames, delta_general->filtered_frames, max_general->filtered_frames); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:", le32_to_cpu(general->non_channel_beacons), accum_general->non_channel_beacons, delta_general->non_channel_beacons, max_general->non_channel_beacons); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:", le32_to_cpu(general->channel_beacons), accum_general->channel_beacons, delta_general->channel_beacons, max_general->channel_beacons); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:", le32_to_cpu(general->num_missed_bcon), accum_general->num_missed_bcon, delta_general->num_missed_bcon, max_general->num_missed_bcon); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "adc_rx_saturation_time:", le32_to_cpu(general->adc_rx_saturation_time), accum_general->adc_rx_saturation_time, delta_general->adc_rx_saturation_time, max_general->adc_rx_saturation_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_detect_search_tm:", le32_to_cpu(general->ina_detection_search_time), accum_general->ina_detection_search_time, delta_general->ina_detection_search_time, max_general->ina_detection_search_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_silence_rssi_a:", le32_to_cpu(general->beacon_silence_rssi_a), accum_general->beacon_silence_rssi_a, delta_general->beacon_silence_rssi_a, max_general->beacon_silence_rssi_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_silence_rssi_b:", le32_to_cpu(general->beacon_silence_rssi_b), accum_general->beacon_silence_rssi_b, delta_general->beacon_silence_rssi_b, max_general->beacon_silence_rssi_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_silence_rssi_c:", le32_to_cpu(general->beacon_silence_rssi_c), accum_general->beacon_silence_rssi_c, delta_general->beacon_silence_rssi_c, max_general->beacon_silence_rssi_c); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "interference_data_flag:", le32_to_cpu(general->interference_data_flag), accum_general->interference_data_flag, delta_general->interference_data_flag, max_general->interference_data_flag); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:", le32_to_cpu(general->channel_load), accum_general->channel_load, delta_general->channel_load, max_general->channel_load); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:", le32_to_cpu(general->dsp_false_alarms), accum_general->dsp_false_alarms, delta_general->dsp_false_alarms, max_general->dsp_false_alarms); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:", le32_to_cpu(general->beacon_rssi_a), accum_general->beacon_rssi_a, delta_general->beacon_rssi_a, max_general->beacon_rssi_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:", le32_to_cpu(general->beacon_rssi_b), accum_general->beacon_rssi_b, delta_general->beacon_rssi_b, max_general->beacon_rssi_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:", le32_to_cpu(general->beacon_rssi_c), accum_general->beacon_rssi_c, delta_general->beacon_rssi_c, max_general->beacon_rssi_c); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:", le32_to_cpu(general->beacon_energy_a), accum_general->beacon_energy_a, delta_general->beacon_energy_a, max_general->beacon_energy_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:", le32_to_cpu(general->beacon_energy_b), accum_general->beacon_energy_b, delta_general->beacon_energy_b, max_general->beacon_energy_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:", le32_to_cpu(general->beacon_energy_c), accum_general->beacon_energy_c, delta_general->beacon_energy_c, max_general->beacon_energy_c); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM_HT:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, delta_ht->plcp_err, max_ht->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, delta_ht->overrun_err, max_ht->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", le32_to_cpu(ht->early_overrun_err), accum_ht->early_overrun_err, delta_ht->early_overrun_err, max_ht->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, delta_ht->crc32_good, max_ht->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, delta_ht->crc32_err, max_ht->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err, delta_ht->mh_format_err, max_ht->mh_format_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:", le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good, delta_ht->agg_crc32_good, max_ht->agg_crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:", le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt, delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:", le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, delta_ht->agg_cnt, max_ht->agg_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:", le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs, delta_ht->unsupport_mcs, max_ht->unsupport_mcs); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = (sizeof(struct stats_tx) * 48) + 250; ssize_t ret; struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* the statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ tx = &il->_4965.stats.tx; accum_tx = &il->_4965.accum_stats.tx; delta_tx = &il->_4965.delta_stats.tx; max_tx = &il->_4965.max_delta.tx; pos += il4965_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:", le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, delta_tx->preamble_cnt, max_tx->preamble_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:", le32_to_cpu(tx->rx_detected_cnt), accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:", le32_to_cpu(tx->bt_prio_defer_cnt), accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, max_tx->bt_prio_defer_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:", le32_to_cpu(tx->bt_prio_kill_cnt), accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, max_tx->bt_prio_kill_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:", le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:", le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, delta_tx->cts_timeout, max_tx->cts_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:", le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, delta_tx->ack_timeout, max_tx->ack_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:", le32_to_cpu(tx->expected_ack_cnt), accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, max_tx->expected_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:", le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:", le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt, delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "abort_nxt_frame_mismatch:", le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), accum_tx->burst_abort_next_frame_mismatch_cnt, delta_tx->burst_abort_next_frame_mismatch_cnt, max_tx->burst_abort_next_frame_mismatch_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "abort_missing_nxt_frame:", le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), accum_tx->burst_abort_missing_next_frame_cnt, delta_tx->burst_abort_missing_next_frame_cnt, max_tx->burst_abort_missing_next_frame_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout_collision:", le32_to_cpu(tx->cts_timeout_collision), accum_tx->cts_timeout_collision, delta_tx->cts_timeout_collision, max_tx->cts_timeout_collision); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_ba_timeout_collision:", le32_to_cpu(tx->ack_or_ba_timeout_collision), accum_tx->ack_or_ba_timeout_collision, delta_tx->ack_or_ba_timeout_collision, max_tx->ack_or_ba_timeout_collision); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:", le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout, delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_resched_frames:", le32_to_cpu(tx->agg.ba_reschedule_frames), accum_tx->agg.ba_reschedule_frames, delta_tx->agg.ba_reschedule_frames, max_tx->agg.ba_reschedule_frames); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg_frame:", le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), accum_tx->agg.scd_query_agg_frame_cnt, delta_tx->agg.scd_query_agg_frame_cnt, max_tx->agg.scd_query_agg_frame_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_no_agg:", le32_to_cpu(tx->agg.scd_query_no_agg), accum_tx->agg.scd_query_no_agg, delta_tx->agg.scd_query_no_agg, max_tx->agg.scd_query_no_agg); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:", le32_to_cpu(tx->agg.scd_query_agg), accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg, max_tx->agg.scd_query_agg); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_mismatch:", le32_to_cpu(tx->agg.scd_query_mismatch), accum_tx->agg.scd_query_mismatch, delta_tx->agg.scd_query_mismatch, max_tx->agg.scd_query_mismatch); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:", le32_to_cpu(tx->agg.frame_not_ready), accum_tx->agg.frame_not_ready, delta_tx->agg.frame_not_ready, max_tx->agg.frame_not_ready); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:", le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun, delta_tx->agg.underrun, max_tx->agg.underrun); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:", le32_to_cpu(tx->agg.bt_prio_kill), accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill, max_tx->agg.bt_prio_kill); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:", le32_to_cpu(tx->agg.rx_ba_rsp_cnt), accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt, max_tx->agg.rx_ba_rsp_cnt); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } ssize_t il4965_ucode_general_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct stats_general) * 10 + 300; ssize_t ret; struct stats_general_common *general, *accum_general; struct stats_general_common *delta_general, *max_general; struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; struct stats_div *div, *accum_div, *delta_div, *max_div; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* the statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ general = &il->_4965.stats.general.common; dbg = &il->_4965.stats.general.common.dbg; div = &il->_4965.stats.general.common.div; accum_general = &il->_4965.accum_stats.general.common; accum_dbg = &il->_4965.accum_stats.general.common.dbg; accum_div = &il->_4965.accum_stats.general.common.div; delta_general = &il->_4965.delta_stats.general.common; max_general = &il->_4965.max_delta.general.common; delta_dbg = &il->_4965.delta_stats.general.common.dbg; max_dbg = &il->_4965.max_delta.general.common.dbg; delta_div = &il->_4965.delta_stats.general.common.div; max_div = &il->_4965.max_delta.general.common.div; pos += il4965_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_General:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:", le32_to_cpu(general->temperature)); pos += scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:", le32_to_cpu(general->ttl_timestamp)); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:", le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, delta_dbg->burst_check, max_dbg->burst_check); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:", le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, delta_dbg->burst_count, max_dbg->burst_count); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "wait_for_silence_timeout_count:", le32_to_cpu(dbg->wait_for_silence_timeout_cnt), accum_dbg->wait_for_silence_timeout_cnt, delta_dbg->wait_for_silence_timeout_cnt, max_dbg->wait_for_silence_timeout_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:", le32_to_cpu(general->sleep_time), accum_general->sleep_time, delta_general->sleep_time, max_general->sleep_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:", le32_to_cpu(general->slots_out), accum_general->slots_out, delta_general->slots_out, max_general->slots_out); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:", le32_to_cpu(general->slots_idle), accum_general->slots_idle, delta_general->slots_idle, max_general->slots_idle); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:", le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, delta_div->tx_on_a, max_div->tx_on_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:", le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, delta_div->tx_on_b, max_div->tx_on_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:", le32_to_cpu(div->exec_time), accum_div->exec_time, delta_div->exec_time, max_div->exec_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:", le32_to_cpu(div->probe_time), accum_div->probe_time, delta_div->probe_time, max_div->probe_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:", le32_to_cpu(general->rx_enable_counter), accum_general->rx_enable_counter, delta_general->rx_enable_counter, max_general->rx_enable_counter); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:", le32_to_cpu(general->num_of_sos_states), accum_general->num_of_sos_states, delta_general->num_of_sos_states, max_general->num_of_sos_states); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } const struct il_debugfs_ops il4965_debugfs_ops = { .rx_stats_read = il4965_ucode_rx_stats_read, .tx_stats_read = il4965_ucode_tx_stats_read, .general_stats_read = il4965_ucode_general_stats_read, };
gpl-2.0
tarunkapadia93/gk_a6k
drivers/scsi/sr_ioctl.c
8052
15538
#include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/blkpg.h> #include <linux/cdrom.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_cmnd.h> #include "sr.h" #if 0 #define DEBUG #endif /* The sr_is_xa() seems to trigger firmware bugs with some drives :-( * It is off by default and can be turned on with this module parameter */ static int xa_test = 0; module_param(xa_test, int, S_IRUGO | S_IWUSR); /* primitive to determine whether we need to have GFP_DMA set based on * the status of the unchecked_isa_dma flag in the host structure */ #define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0) static int sr_read_tochdr(struct cdrom_device_info *cdi, struct cdrom_tochdr *tochdr) { struct scsi_cd *cd = cdi->handle; struct packet_command cgc; int result; unsigned char *buffer; buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); if (!buffer) return -ENOMEM; memset(&cgc, 0, sizeof(struct packet_command)); cgc.timeout = IOCTL_TIMEOUT; cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; cgc.cmd[8] = 12; /* LSB of length */ cgc.buffer = buffer; cgc.buflen = 12; cgc.quiet = 1; cgc.data_direction = DMA_FROM_DEVICE; result = sr_do_ioctl(cd, &cgc); tochdr->cdth_trk0 = buffer[2]; tochdr->cdth_trk1 = buffer[3]; kfree(buffer); return result; } static int sr_read_tocentry(struct cdrom_device_info *cdi, struct cdrom_tocentry *tocentry) { struct scsi_cd *cd = cdi->handle; struct packet_command cgc; int result; unsigned char *buffer; buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); if (!buffer) return -ENOMEM; memset(&cgc, 0, sizeof(struct packet_command)); cgc.timeout = IOCTL_TIMEOUT; cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; cgc.cmd[1] |= (tocentry->cdte_format == CDROM_MSF) ? 0x02 : 0; cgc.cmd[6] = tocentry->cdte_track; cgc.cmd[8] = 12; /* LSB of length */ cgc.buffer = buffer; cgc.buflen = 12; cgc.data_direction = DMA_FROM_DEVICE; result = sr_do_ioctl(cd, &cgc); tocentry->cdte_ctrl = buffer[5] & 0xf; tocentry->cdte_adr = buffer[5] >> 4; tocentry->cdte_datamode = (tocentry->cdte_ctrl & 0x04) ? 1 : 0; if (tocentry->cdte_format == CDROM_MSF) { tocentry->cdte_addr.msf.minute = buffer[9]; tocentry->cdte_addr.msf.second = buffer[10]; tocentry->cdte_addr.msf.frame = buffer[11]; } else tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) + buffer[10]) << 8) + buffer[11]; kfree(buffer); return result; } #define IOCTL_RETRIES 3 /* ATAPI drives don't have a SCMD_PLAYAUDIO_TI command. When these drives are emulating a SCSI device via the idescsi module, they need to have CDROMPLAYTRKIND commands translated into CDROMPLAYMSF commands for them */ static int sr_fake_playtrkind(struct cdrom_device_info *cdi, struct cdrom_ti *ti) { struct cdrom_tocentry trk0_te, trk1_te; struct cdrom_tochdr tochdr; struct packet_command cgc; int ntracks, ret; ret = sr_read_tochdr(cdi, &tochdr); if (ret) return ret; ntracks = tochdr.cdth_trk1 - tochdr.cdth_trk0 + 1; if (ti->cdti_trk1 == ntracks) ti->cdti_trk1 = CDROM_LEADOUT; else if (ti->cdti_trk1 != CDROM_LEADOUT) ti->cdti_trk1 ++; trk0_te.cdte_track = ti->cdti_trk0; trk0_te.cdte_format = CDROM_MSF; trk1_te.cdte_track = ti->cdti_trk1; trk1_te.cdte_format = CDROM_MSF; ret = sr_read_tocentry(cdi, &trk0_te); if (ret) return ret; ret = sr_read_tocentry(cdi, &trk1_te); if (ret) return ret; memset(&cgc, 0, sizeof(struct packet_command)); cgc.cmd[0] = GPCMD_PLAY_AUDIO_MSF; cgc.cmd[3] = trk0_te.cdte_addr.msf.minute; cgc.cmd[4] = trk0_te.cdte_addr.msf.second; cgc.cmd[5] = trk0_te.cdte_addr.msf.frame; cgc.cmd[6] = trk1_te.cdte_addr.msf.minute; cgc.cmd[7] = trk1_te.cdte_addr.msf.second; cgc.cmd[8] = trk1_te.cdte_addr.msf.frame; cgc.data_direction = DMA_NONE; cgc.timeout = IOCTL_TIMEOUT; return sr_do_ioctl(cdi->handle, &cgc); } static int sr_play_trkind(struct cdrom_device_info *cdi, struct cdrom_ti *ti) { struct scsi_cd *cd = cdi->handle; struct packet_command cgc; int result; memset(&cgc, 0, sizeof(struct packet_command)); cgc.timeout = IOCTL_TIMEOUT; cgc.cmd[0] = GPCMD_PLAYAUDIO_TI; cgc.cmd[4] = ti->cdti_trk0; cgc.cmd[5] = ti->cdti_ind0; cgc.cmd[7] = ti->cdti_trk1; cgc.cmd[8] = ti->cdti_ind1; cgc.data_direction = DMA_NONE; result = sr_do_ioctl(cd, &cgc); if (result == -EDRIVE_CANT_DO_THIS) result = sr_fake_playtrkind(cdi, ti); return result; } /* We do our own retries because we want to know what the specific error code is. Normally the UNIT_ATTENTION code will automatically clear after one error */ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) { struct scsi_device *SDev; struct scsi_sense_hdr sshdr; int result, err = 0, retries = 0; struct request_sense *sense = cgc->sense; SDev = cd->device; if (!sense) { sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); if (!sense) { err = -ENOMEM; goto out; } } retry: if (!scsi_block_when_processing_errors(SDev)) { err = -ENODEV; goto out; } memset(sense, 0, sizeof(*sense)); result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, cgc->buffer, cgc->buflen, (char *)sense, cgc->timeout, IOCTL_RETRIES, 0, NULL); scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr); /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { switch (sshdr.sense_key) { case UNIT_ATTENTION: SDev->changed = 1; if (!cgc->quiet) printk(KERN_INFO "%s: disc change detected.\n", cd->cdi.name); if (retries++ < 10) goto retry; err = -ENOMEDIUM; break; case NOT_READY: /* This happens if there is no disc in drive */ if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { /* sense: Logical unit is in process of becoming ready */ if (!cgc->quiet) printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name); if (retries++ < 10) { /* sleep 2 sec and try again */ ssleep(2); goto retry; } else { /* 20 secs are enough? */ err = -ENOMEDIUM; break; } } if (!cgc->quiet) printk(KERN_INFO "%s: CDROM not ready. Make sure there is a disc in the drive.\n", cd->cdi.name); #ifdef DEBUG scsi_print_sense_hdr("sr", &sshdr); #endif err = -ENOMEDIUM; break; case ILLEGAL_REQUEST: err = -EIO; if (sshdr.asc == 0x20 && sshdr.ascq == 0x00) /* sense: Invalid command operation code */ err = -EDRIVE_CANT_DO_THIS; #ifdef DEBUG __scsi_print_command(cgc->cmd); scsi_print_sense_hdr("sr", &sshdr); #endif break; default: printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name); __scsi_print_command(cgc->cmd); scsi_print_sense_hdr("sr", &sshdr); err = -EIO; } } /* Wake up a process waiting for device */ out: if (!cgc->sense) kfree(sense); cgc->stat = err; return err; } /* ---------------------------------------------------------------------- */ /* interface to cdrom.c */ int sr_tray_move(struct cdrom_device_info *cdi, int pos) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; memset(&cgc, 0, sizeof(struct packet_command)); cgc.cmd[0] = GPCMD_START_STOP_UNIT; cgc.cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ; cgc.data_direction = DMA_NONE; cgc.timeout = IOCTL_TIMEOUT; return sr_do_ioctl(cd, &cgc); } int sr_lock_door(struct cdrom_device_info *cdi, int lock) { Scsi_CD *cd = cdi->handle; return scsi_set_medium_removal(cd->device, lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW); } int sr_drive_status(struct cdrom_device_info *cdi, int slot) { struct scsi_cd *cd = cdi->handle; struct scsi_sense_hdr sshdr; struct media_event_desc med; if (CDSL_CURRENT != slot) { /* we have no changer support */ return -EINVAL; } if (!scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) return CDS_DISC_OK; /* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY && sshdr.asc == 0x04 && sshdr.ascq == 0x01) return CDS_DRIVE_NOT_READY; if (!cdrom_get_media_event(cdi, &med)) { if (med.media_present) return CDS_DISC_OK; else if (med.door_open) return CDS_TRAY_OPEN; else return CDS_NO_DISC; } /* * SK/ASC/ASCQ of 2/4/2 means "initialization required" * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close * the tray, which resolves the initialization requirement. */ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY && sshdr.asc == 0x04 && sshdr.ascq == 0x02) return CDS_TRAY_OPEN; /* * 0x04 is format in progress .. but there must be a disc present! */ if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04) return CDS_DISC_OK; /* * If not using Mt Fuji extended media tray reports, * just return TRAY_OPEN since ATAPI doesn't provide * any other way to detect this... */ if (scsi_sense_valid(&sshdr) && /* 0x3a is medium not present */ sshdr.asc == 0x3a) return CDS_NO_DISC; else return CDS_TRAY_OPEN; return CDS_DRIVE_NOT_READY; } int sr_disk_status(struct cdrom_device_info *cdi) { Scsi_CD *cd = cdi->handle; struct cdrom_tochdr toc_h; struct cdrom_tocentry toc_e; int i, rc, have_datatracks = 0; /* look for data tracks */ rc = sr_read_tochdr(cdi, &toc_h); if (rc) return (rc == -ENOMEDIUM) ? CDS_NO_DISC : CDS_NO_INFO; for (i = toc_h.cdth_trk0; i <= toc_h.cdth_trk1; i++) { toc_e.cdte_track = i; toc_e.cdte_format = CDROM_LBA; if (sr_read_tocentry(cdi, &toc_e)) return CDS_NO_INFO; if (toc_e.cdte_ctrl & CDROM_DATA_TRACK) { have_datatracks = 1; break; } } if (!have_datatracks) return CDS_AUDIO; if (cd->xa_flag) return CDS_XA_2_1; else return CDS_DATA_1; } int sr_get_last_session(struct cdrom_device_info *cdi, struct cdrom_multisession *ms_info) { Scsi_CD *cd = cdi->handle; ms_info->addr.lba = cd->ms_offset; ms_info->xa_flag = cd->xa_flag || cd->ms_offset > 0; return 0; } int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; char *buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); int result; if (!buffer) return -ENOMEM; memset(&cgc, 0, sizeof(struct packet_command)); cgc.cmd[0] = GPCMD_READ_SUBCHANNEL; cgc.cmd[2] = 0x40; /* I do want the subchannel info */ cgc.cmd[3] = 0x02; /* Give me medium catalog number info */ cgc.cmd[8] = 24; cgc.buffer = buffer; cgc.buflen = 24; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = IOCTL_TIMEOUT; result = sr_do_ioctl(cd, &cgc); memcpy(mcn->medium_catalog_number, buffer + 9, 13); mcn->medium_catalog_number[13] = 0; kfree(buffer); return result; } int sr_reset(struct cdrom_device_info *cdi) { return 0; } int sr_select_speed(struct cdrom_device_info *cdi, int speed) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; if (speed == 0) speed = 0xffff; /* set to max */ else speed *= 177; /* Nx to kbyte/s */ memset(&cgc, 0, sizeof(struct packet_command)); cgc.cmd[0] = GPCMD_SET_SPEED; /* SET CD SPEED */ cgc.cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */ cgc.cmd[3] = speed & 0xff; /* LSB */ cgc.data_direction = DMA_NONE; cgc.timeout = IOCTL_TIMEOUT; if (sr_do_ioctl(cd, &cgc)) return -EIO; return 0; } /* ----------------------------------------------------------------------- */ /* this is called by the generic cdrom driver. arg is a _kernel_ pointer, */ /* because the generic cdrom driver does the user access stuff for us. */ /* only cdromreadtochdr and cdromreadtocentry are left - for use with the */ /* sr_disk_status interface for the generic cdrom driver. */ int sr_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { switch (cmd) { case CDROMREADTOCHDR: return sr_read_tochdr(cdi, arg); case CDROMREADTOCENTRY: return sr_read_tocentry(cdi, arg); case CDROMPLAYTRKIND: return sr_play_trkind(cdi, arg); default: return -EINVAL; } } /* ----------------------------------------------------------------------- * a function to read all sorts of funny cdrom sectors using the READ_CD * scsi-3 mmc command * * lba: linear block address * format: 0 = data (anything) * 1 = audio * 2 = data (mode 1) * 3 = data (mode 2) * 4 = data (mode 2 form1) * 5 = data (mode 2 form2) * blksize: 2048 | 2336 | 2340 | 2352 */ static int sr_read_cd(Scsi_CD *cd, unsigned char *dest, int lba, int format, int blksize) { struct packet_command cgc; #ifdef DEBUG printk("%s: sr_read_cd lba=%d format=%d blksize=%d\n", cd->cdi.name, lba, format, blksize); #endif memset(&cgc, 0, sizeof(struct packet_command)); cgc.cmd[0] = GPCMD_READ_CD; /* READ_CD */ cgc.cmd[1] = ((format & 7) << 2); cgc.cmd[2] = (unsigned char) (lba >> 24) & 0xff; cgc.cmd[3] = (unsigned char) (lba >> 16) & 0xff; cgc.cmd[4] = (unsigned char) (lba >> 8) & 0xff; cgc.cmd[5] = (unsigned char) lba & 0xff; cgc.cmd[8] = 1; switch (blksize) { case 2336: cgc.cmd[9] = 0x58; break; case 2340: cgc.cmd[9] = 0x78; break; case 2352: cgc.cmd[9] = 0xf8; break; default: cgc.cmd[9] = 0x10; break; } cgc.buffer = dest; cgc.buflen = blksize; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = IOCTL_TIMEOUT; return sr_do_ioctl(cd, &cgc); } /* * read sectors with blocksizes other than 2048 */ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest) { struct packet_command cgc; int rc; /* we try the READ CD command first... */ if (cd->readcd_known) { rc = sr_read_cd(cd, dest, lba, 0, blksize); if (-EDRIVE_CANT_DO_THIS != rc) return rc; cd->readcd_known = 0; printk("CDROM does'nt support READ CD (0xbe) command\n"); /* fall & retry the other way */ } /* ... if this fails, we switch the blocksize using MODE SELECT */ if (blksize != cd->device->sector_size) { if (0 != (rc = sr_set_blocklength(cd, blksize))) return rc; } #ifdef DEBUG printk("%s: sr_read_sector lba=%d blksize=%d\n", cd->cdi.name, lba, blksize); #endif memset(&cgc, 0, sizeof(struct packet_command)); cgc.cmd[0] = GPCMD_READ_10; cgc.cmd[2] = (unsigned char) (lba >> 24) & 0xff; cgc.cmd[3] = (unsigned char) (lba >> 16) & 0xff; cgc.cmd[4] = (unsigned char) (lba >> 8) & 0xff; cgc.cmd[5] = (unsigned char) lba & 0xff; cgc.cmd[8] = 1; cgc.buffer = dest; cgc.buflen = blksize; cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = IOCTL_TIMEOUT; rc = sr_do_ioctl(cd, &cgc); return rc; } /* * read a sector in raw mode to check the sector format * ret: 1 == mode2 (XA), 0 == mode1, <0 == error */ int sr_is_xa(Scsi_CD *cd) { unsigned char *raw_sector; int is_xa; if (!xa_test) return 0; raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd)); if (!raw_sector) return -ENOMEM; if (0 == sr_read_sector(cd, cd->ms_offset + 16, CD_FRAMESIZE_RAW1, raw_sector)) { is_xa = (raw_sector[3] == 0x02) ? 1 : 0; } else { /* read a raw sector failed for some reason. */ is_xa = -1; } kfree(raw_sector); #ifdef DEBUG printk("%s: sr_is_xa: %d\n", cd->cdi.name, is_xa); #endif return is_xa; }
gpl-2.0
Blackburn29/PsycoKernel
arch/arm/mach-msm/board-mem-audio-amp.c
117
2093
/* arch/arm/mach-msm/htc_acoustic_alsa.c * * Copyright (C) 2012 HTC Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/device.h> #include <linux/module.h> #include <linux/ioctl.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/switch.h> #include <linux/gpio.h> #include <mach/htc_acoustic_alsa.h> #define D(fmt, args...) printk(KERN_INFO "[AUD] htc-acoustic: "fmt, ##args) #define E(fmt, args...) printk(KERN_ERR "[AUD] htc-acoustic: "fmt, ##args) #define GPIO_AUD_RT_1V8_EN 120 static bool power_on = false; void mem_ul_amp_power_enable(bool enable) { if (enable && !power_on) { D("%s: %s\n", __func__, enable?"ture":"false"); gpio_request(GPIO_AUD_RT_1V8_EN, "AUD_RT_1V8_EN"); gpio_tlmm_config(GPIO_CFG(GPIO_AUD_RT_1V8_EN, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_free(GPIO_AUD_RT_1V8_EN); power_on = true; } else if(!enable && power_on){ D("%s: %s\n", __func__, enable?"ture":"false"); gpio_request(GPIO_AUD_RT_1V8_EN, "AUD_RT_1V8_EN"); gpio_tlmm_config(GPIO_CFG(GPIO_AUD_RT_1V8_EN, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_free(GPIO_AUD_RT_1V8_EN); power_on = false; } else { D("%s: %s but do nothing\n", __func__, enable?"ture":"false"); } } static struct amp_power_ops amp_power = { .set_amp_power_enable = mem_ul_amp_power_enable, }; static int __init amp_power_init(void) { int ret = 0; D("%s", __func__); htc_amp_power_register_ops(&amp_power); return ret; } static void __exit amp_power_exit(void) { } arch_initcall(amp_power_init); module_exit(amp_power_exit);
gpl-2.0
johnnyslt/android_kernel_htc_msm8660
drivers/crypto/msm/qcrypto.c
117
87805
/* Qualcomm Crypto driver * * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/rtnetlink.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <crypto/ctr.h> #include <crypto/des.h> #include <crypto/aes.h> #include <crypto/sha.h> #include <crypto/hash.h> #include <crypto/algapi.h> #include <crypto/aead.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> #include <crypto/internal/hash.h> #include <mach/scm.h> #include <linux/platform_data/qcom_crypto_device.h> #include "qce.h" #define MAX_CRYPTO_DEVICE 3 #define DEBUG_MAX_FNAME 16 #define DEBUG_MAX_RW_BUF 1024 struct crypto_stat { u32 aead_sha1_aes_enc; u32 aead_sha1_aes_dec; u32 aead_sha1_des_enc; u32 aead_sha1_des_dec; u32 aead_sha1_3des_enc; u32 aead_sha1_3des_dec; u32 aead_op_success; u32 aead_op_fail; u32 ablk_cipher_aes_enc; u32 ablk_cipher_aes_dec; u32 ablk_cipher_des_enc; u32 ablk_cipher_des_dec; u32 ablk_cipher_3des_enc; u32 ablk_cipher_3des_dec; u32 ablk_cipher_op_success; u32 ablk_cipher_op_fail; u32 sha1_digest; u32 sha256_digest; u32 sha_op_success; u32 sha_op_fail; u32 sha1_hmac_digest; u32 sha256_hmac_digest; u32 sha_hmac_op_success; u32 sha_hmac_op_fail; }; static struct crypto_stat _qcrypto_stat[MAX_CRYPTO_DEVICE]; static struct dentry *_debug_dent; static char _debug_read_buf[DEBUG_MAX_RW_BUF]; struct crypto_priv { /* CE features supported by target device*/ struct msm_ce_hw_support platform_support; /* CE features/algorithms supported by HW engine*/ struct ce_hw_support ce_support; /* the lock protects queue and req*/ spinlock_t lock; /* qce handle */ void *qce; /* list of registered algorithms */ struct list_head alg_list; /* platform device */ struct platform_device *pdev; /* current active request */ struct crypto_async_request *req; int res; /* request queue */ struct crypto_queue queue; uint32_t ce_lock_count; struct work_struct unlock_ce_ws; struct tasklet_struct done_tasklet; }; /*------------------------------------------------------------------------- * Resource Locking Service * ------------------------------------------------------------------------*/ #define QCRYPTO_CMD_ID 1 #define QCRYPTO_CE_LOCK_CMD 1 #define QCRYPTO_CE_UNLOCK_CMD 0 #define NUM_RETRY 1000 #define CE_BUSY 55 static int qcrypto_scm_cmd(int resource, int cmd, int *response) { #ifdef CONFIG_MSM_SCM struct { int resource; int cmd; } cmd_buf; cmd_buf.resource = resource; cmd_buf.cmd = cmd; return scm_call(SCM_SVC_TZ, QCRYPTO_CMD_ID, &cmd_buf, sizeof(cmd_buf), response, sizeof(*response)); #else return 0; #endif } static void qcrypto_unlock_ce(struct work_struct *work) { int response = 0; unsigned long flags; struct crypto_priv *cp = container_of(work, struct crypto_priv, unlock_ce_ws); if (cp->ce_lock_count == 1) BUG_ON(qcrypto_scm_cmd(cp->platform_support.shared_ce_resource, QCRYPTO_CE_UNLOCK_CMD, &response) != 0); spin_lock_irqsave(&cp->lock, flags); cp->ce_lock_count--; spin_unlock_irqrestore(&cp->lock, flags); } static int qcrypto_lock_ce(struct crypto_priv *cp) { unsigned long flags; int response = -CE_BUSY; int i = 0; if (cp->ce_lock_count == 0) { do { if (qcrypto_scm_cmd( cp->platform_support.shared_ce_resource, QCRYPTO_CE_LOCK_CMD, &response)) { response = -EINVAL; break; } } while ((response == -CE_BUSY) && (i++ < NUM_RETRY)); if ((response == -CE_BUSY) && (i >= NUM_RETRY)) return -EUSERS; if (response < 0) return -EINVAL; } spin_lock_irqsave(&cp->lock, flags); cp->ce_lock_count++; spin_unlock_irqrestore(&cp->lock, flags); return 0; } enum qcrypto_alg_type { QCRYPTO_ALG_CIPHER = 0, QCRYPTO_ALG_SHA = 1, QCRYPTO_ALG_LAST }; struct qcrypto_alg { struct list_head entry; struct crypto_alg cipher_alg; struct ahash_alg sha_alg; enum qcrypto_alg_type alg_type; struct crypto_priv *cp; }; #define QCRYPTO_MAX_KEY_SIZE 64 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ #define QCRYPTO_MAX_IV_LENGTH 16 struct qcrypto_cipher_ctx { u8 auth_key[QCRYPTO_MAX_KEY_SIZE]; u8 iv[QCRYPTO_MAX_IV_LENGTH]; u8 enc_key[QCRYPTO_MAX_KEY_SIZE]; unsigned int enc_key_len; unsigned int authsize; unsigned int auth_key_len; struct crypto_priv *cp; }; struct qcrypto_cipher_req_ctx { u8 *iv; unsigned int ivsize; int aead; struct scatterlist asg; /* Formatted associated data sg */ unsigned char *assoc; /* Pointer to formatted assoc data */ unsigned int assoclen; /* Save Unformatted assoc data length */ struct scatterlist *assoc_sg; /* Save Unformatted assoc data sg */ enum qce_cipher_alg_enum alg; enum qce_cipher_dir_enum dir; enum qce_cipher_mode_enum mode; }; #define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE #define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32)) #define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE static uint8_t _std_init_vector_sha1_uint8[] = { 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, 0xC3, 0xD2, 0xE1, 0xF0 }; /* standard initialization vector for SHA-256, source: FIPS 180-2 */ static uint8_t _std_init_vector_sha256_uint8[] = { 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85, 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A, 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C, 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 }; struct qcrypto_sha_ctx { enum qce_hash_alg_enum alg; uint32_t byte_count[4]; uint8_t digest[SHA_MAX_DIGEST_SIZE]; uint32_t diglen; uint8_t *tmp_tbuf; uint8_t *trailing_buf; uint8_t *in_buf; uint32_t authkey_in_len; uint32_t trailing_buf_len; uint8_t first_blk; uint8_t last_blk; uint8_t authkey[SHA_MAX_BLOCK_SIZE]; struct ahash_request *ahash_req; struct completion ahash_req_complete; struct scatterlist *sg; struct scatterlist tmp_sg; struct crypto_priv *cp; }; struct qcrypto_sha_req_ctx { union { struct sha1_state sha1_state_ctx; struct sha256_state sha256_state_ctx; }; struct scatterlist *src; uint32_t nbytes; }; static void _byte_stream_to_words(uint32_t *iv, unsigned char *b, unsigned int len) { unsigned n; n = len / sizeof(uint32_t) ; for (; n > 0; n--) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) | (((*(b+2)) << 8) & 0xff00) | (*(b+3) & 0xff); b += sizeof(uint32_t); iv++; } n = len % sizeof(uint32_t); if (n == 3) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) | (((*(b+2)) << 8) & 0xff00) ; } else if (n == 2) { *iv = ((*b << 24) & 0xff000000) | (((*(b+1)) << 16) & 0xff0000) ; } else if (n == 1) { *iv = ((*b << 24) & 0xff000000) ; } } static void _words_to_byte_stream(uint32_t *iv, unsigned char *b, unsigned int len) { unsigned n = len / sizeof(uint32_t); for (; n > 0; n--) { *b++ = (unsigned char) ((*iv >> 24) & 0xff); *b++ = (unsigned char) ((*iv >> 16) & 0xff); *b++ = (unsigned char) ((*iv >> 8) & 0xff); *b++ = (unsigned char) (*iv & 0xff); iv++; } n = len % sizeof(uint32_t); if (n == 3) { *b++ = (unsigned char) ((*iv >> 24) & 0xff); *b++ = (unsigned char) ((*iv >> 16) & 0xff); *b = (unsigned char) ((*iv >> 8) & 0xff); } else if (n == 2) { *b++ = (unsigned char) ((*iv >> 24) & 0xff); *b = (unsigned char) ((*iv >> 16) & 0xff); } else if (n == 1) { *b = (unsigned char) ((*iv >> 24) & 0xff); } } static void _start_qcrypto_process(struct crypto_priv *cp); static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp, struct ahash_alg *template) { struct qcrypto_alg *q_alg; q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL); if (!q_alg) { pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n", PTR_ERR(q_alg)); return ERR_PTR(-ENOMEM); } q_alg->alg_type = QCRYPTO_ALG_SHA; q_alg->sha_alg = *template; q_alg->cp = cp; return q_alg; }; static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp, struct crypto_alg *template) { struct qcrypto_alg *q_alg; q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL); if (!q_alg) { pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n", PTR_ERR(q_alg)); return ERR_PTR(-ENOMEM); } q_alg->alg_type = QCRYPTO_ALG_CIPHER; q_alg->cipher_alg = *template; q_alg->cp = cp; return q_alg; }; static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; struct qcrypto_alg *q_alg; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); q_alg = container_of(alg, struct qcrypto_alg, cipher_alg); /* update context with ptr to cp */ ctx->cp = q_alg->cp; /* random first IV */ get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH); return 0; }; static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm) { struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash), struct ahash_alg, halg); struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg, sha_alg); crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx)); /* update context with ptr to cp */ sha_ctx->cp = q_alg->cp; sha_ctx->sg = NULL; sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE + SHA_MAX_DIGEST_SIZE, GFP_KERNEL); if (sha_ctx->tmp_tbuf == NULL) { pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n", PTR_ERR(sha_ctx->tmp_tbuf)); return -ENOMEM; } sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL); if (sha_ctx->trailing_buf == NULL) { kfree(sha_ctx->tmp_tbuf); sha_ctx->tmp_tbuf = NULL; pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n", PTR_ERR(sha_ctx->trailing_buf)); return -ENOMEM; } sha_ctx->ahash_req = NULL; return 0; }; static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); kfree(sha_ctx->tmp_tbuf); sha_ctx->tmp_tbuf = NULL; kfree(sha_ctx->trailing_buf); sha_ctx->trailing_buf = NULL; if (sha_ctx->sg != NULL) { kfree(sha_ctx->sg); sha_ctx->sg = NULL; } if (sha_ctx->ahash_req != NULL) { ahash_request_free(sha_ctx->ahash_req); sha_ctx->ahash_req = NULL; } }; static void _crypto_sha_hmac_ahash_req_complete( struct crypto_async_request *req, int err); static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm) { struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); int ret = 0; ret = _qcrypto_ahash_cra_init(tfm); if (ret) return ret; sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL); if (sha_ctx->ahash_req == NULL) { _qcrypto_ahash_cra_exit(tfm); return -ENOMEM; } init_completion(&sha_ctx->ahash_req_complete); ahash_request_set_callback(sha_ctx->ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _crypto_sha_hmac_ahash_req_complete, &sha_ctx->ahash_req_complete); crypto_ahash_clear_flags(ahash, ~0); return 0; }; static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm) { tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx); return _qcrypto_cipher_cra_init(tfm); }; static int _qcrypto_cra_aead_init(struct crypto_tfm *tfm) { tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx); return _qcrypto_cipher_cra_init(tfm); }; static int _disp_stats(int id) { struct crypto_stat *pstat; int len = 0; pstat = &_qcrypto_stat[id]; len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, "\nQualcomm crypto accelerator %d Statistics:\n", id + 1); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK AES CIPHER encryption : %d\n", pstat->ablk_cipher_aes_enc); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK AES CIPHER decryption : %d\n", pstat->ablk_cipher_aes_dec); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK DES CIPHER encryption : %d\n", pstat->ablk_cipher_des_enc); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK DES CIPHER decryption : %d\n", pstat->ablk_cipher_des_dec); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK 3DES CIPHER encryption : %d\n", pstat->ablk_cipher_3des_enc); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK 3DES CIPHER decryption : %d\n", pstat->ablk_cipher_3des_dec); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK CIPHER operation success: %d\n", pstat->ablk_cipher_op_success); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " ABLK CIPHER operation fail : %d\n", pstat->ablk_cipher_op_fail); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD SHA1-AES encryption : %d\n", pstat->aead_sha1_aes_enc); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD SHA1-AES decryption : %d\n", pstat->aead_sha1_aes_dec); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD SHA1-DES encryption : %d\n", pstat->aead_sha1_des_enc); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD SHA1-DES decryption : %d\n", pstat->aead_sha1_des_dec); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD SHA1-3DES encryption : %d\n", pstat->aead_sha1_3des_enc); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD SHA1-3DES decryption : %d\n", pstat->aead_sha1_3des_dec); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD operation success : %d\n", pstat->aead_op_success); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AEAD operation fail : %d\n", pstat->aead_op_fail); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA1 digest : %d\n", pstat->sha1_digest); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA256 digest : %d\n", pstat->sha256_digest); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA operation fail : %d\n", pstat->sha_op_fail); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA operation success : %d\n", pstat->sha_op_success); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA1 HMAC digest : %d\n", pstat->sha1_hmac_digest); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA256 HMAC digest : %d\n", pstat->sha256_hmac_digest); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA HMAC operation fail : %d\n", pstat->sha_hmac_op_fail); len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " SHA HMAC operation success : %d\n", pstat->sha_hmac_op_success); return len; } static int _qcrypto_remove(struct platform_device *pdev) { struct crypto_priv *cp; struct qcrypto_alg *q_alg; struct qcrypto_alg *n; cp = platform_get_drvdata(pdev); if (!cp) return 0; list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) { if (q_alg->alg_type == QCRYPTO_ALG_CIPHER) crypto_unregister_alg(&q_alg->cipher_alg); if (q_alg->alg_type == QCRYPTO_ALG_SHA) crypto_unregister_ahash(&q_alg->sha_alg); list_del(&q_alg->entry); kfree(q_alg); } if (cp->qce) qce_close(cp->qce); tasklet_kill(&cp->done_tasklet); kfree(cp); return 0; }; static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_priv *cp = ctx->cp; switch (len) { case AES_KEYSIZE_128: case AES_KEYSIZE_256: break; case AES_KEYSIZE_192: if (cp->ce_support.aes_key_192) break; default: crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; }; ctx->enc_key_len = len; memcpy(ctx->enc_key, key, len); return 0; }; static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); u32 tmp[DES_EXPKEY_WORDS]; int ret = des_ekey(tmp, key); if (len != DES_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; }; if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } ctx->enc_key_len = len; memcpy(ctx->enc_key, key, len); return 0; }; static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); if (len != DES3_EDE_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; }; ctx->enc_key_len = len; memcpy(ctx->enc_key, key, len); return 0; }; static void req_done(unsigned long data) { struct crypto_async_request *areq; struct crypto_priv *cp = (struct crypto_priv *)data; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); areq = cp->req; cp->req = NULL; spin_unlock_irqrestore(&cp->lock, flags); if (areq) areq->complete(areq, cp->res); _start_qcrypto_process(cp); }; static void _update_sha1_ctx(struct ahash_request *req) { struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); if (sha_ctx->last_blk == 1) memset(sha_state_ctx, 0x00, sizeof(struct sha1_state)); else { memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE); memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf, sha_ctx->trailing_buf_len); _byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest, SHA1_DIGEST_SIZE); } return; } static void _update_sha256_ctx(struct ahash_request *req) { struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); if (sha_ctx->last_blk == 1) memset(sha_state_ctx, 0x00, sizeof(struct sha256_state)); else { memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE); memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf, sha_ctx->trailing_buf_len); _byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest, SHA256_DIGEST_SIZE); } return; } static void _qce_ahash_complete(void *cookie, unsigned char *digest, unsigned char *authdata, int ret) { struct ahash_request *areq = (struct ahash_request *) cookie; struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm); struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq); struct crypto_priv *cp = sha_ctx->cp; struct crypto_stat *pstat; uint32_t diglen = crypto_ahash_digestsize(ahash); uint32_t *auth32 = (uint32_t *)authdata; pstat = &_qcrypto_stat[cp->pdev->id]; #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n", areq, ret); #endif if (digest) { memcpy(sha_ctx->digest, digest, diglen); memcpy(areq->result, digest, diglen); } if (authdata) { sha_ctx->byte_count[0] = auth32[0]; sha_ctx->byte_count[1] = auth32[1]; sha_ctx->byte_count[2] = auth32[2]; sha_ctx->byte_count[3] = auth32[3]; } areq->src = rctx->src; areq->nbytes = rctx->nbytes; if (sha_ctx->sg != NULL) { kfree(sha_ctx->sg); sha_ctx->sg = NULL; } if (sha_ctx->alg == QCE_HASH_SHA1) _update_sha1_ctx(areq); if (sha_ctx->alg == QCE_HASH_SHA256) _update_sha256_ctx(areq); sha_ctx->last_blk = 0; sha_ctx->first_blk = 0; if (ret) { cp->res = -ENXIO; pstat->sha_op_fail++; } else { cp->res = 0; pstat->sha_op_success++; } if (cp->platform_support.ce_shared) schedule_work(&cp->unlock_ce_ws); tasklet_schedule(&cp->done_tasklet); }; static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb, unsigned char *iv, int ret) { struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie; struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n", areq, ret); #endif if (iv) memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk)); if (ret) { cp->res = -ENXIO; pstat->ablk_cipher_op_fail++; } else { cp->res = 0; pstat->ablk_cipher_op_success++; } if (cp->platform_support.ce_shared) schedule_work(&cp->unlock_ce_ws); tasklet_schedule(&cp->done_tasklet); }; static void _qce_aead_complete(void *cookie, unsigned char *icv, unsigned char *iv, int ret) { struct aead_request *areq = (struct aead_request *) cookie; struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct qcrypto_cipher_req_ctx *rctx; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(areq); if (rctx->mode == QCE_MODE_CCM) { kzfree(rctx->assoc); areq->assoc = rctx->assoc_sg; areq->assoclen = rctx->assoclen; if (ret) { if (ret == 0x2000000) ret = -EBADMSG; else ret = -ENXIO; } } else { if (ret == 0) { if (rctx->dir == QCE_ENCRYPT) { /* copy the icv to dst */ scatterwalk_map_and_copy(icv, areq->dst, areq->cryptlen, ctx->authsize, 1); } else { unsigned char tmp[SHA256_DIGESTSIZE]; /* compare icv from src */ scatterwalk_map_and_copy(tmp, areq->src, areq->cryptlen - ctx->authsize, ctx->authsize, 0); ret = memcmp(icv, tmp, ctx->authsize); if (ret != 0) ret = -EBADMSG; } } else { ret = -ENXIO; } if (iv) memcpy(ctx->iv, iv, crypto_aead_ivsize(aead)); } if (ret) pstat->aead_op_fail++; else pstat->aead_op_success++; if (cp->platform_support.ce_shared) schedule_work(&cp->unlock_ce_ws); tasklet_schedule(&cp->done_tasklet); } static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) { __be32 data; memset(block, 0, csize); block += csize; if (csize >= 4) csize = 4; else if (msglen > (1 << (8 * csize))) return -EOVERFLOW; data = cpu_to_be32(msglen); memcpy(block - csize, (u8 *)&data + 4 - csize, csize); return 0; } static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq) { struct aead_request *areq = (struct aead_request *) qreq->areq; unsigned int i = ((unsigned int)qreq->iv[0]) + 1; memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize); /* * Format control info per RFC 3610 and * NIST Special Publication 800-38C */ qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2)); if (areq->assoclen) qreq->nonce[0] |= 64; if (i > MAX_NONCE) return -EINVAL; return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i); } static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen, struct scatterlist *sg) { unsigned char *adata; uint32_t len, l; qreq->assoc = kzalloc((alen + 0x64), (GFP_KERNEL | __GFP_DMA)); if (!qreq->assoc) { pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n", PTR_ERR(qreq->assoc)); return -ENOMEM; } adata = qreq->assoc; /* * Add control info for associated data * RFC 3610 and NIST Special Publication 800-38C */ if (alen < 65280) { *(__be16 *)adata = cpu_to_be16(alen); len = 2; } else { if ((alen >= 65280) && (alen <= 0xffffffff)) { *(__be16 *)adata = cpu_to_be16(0xfffe); *(__be32 *)&adata[2] = cpu_to_be32(alen); len = 6; } else { *(__be16 *)adata = cpu_to_be16(0xffff); *(__be32 *)&adata[6] = cpu_to_be32(alen); len = 10; } } adata += len; qreq->assoclen = ALIGN((alen + len), 16); for (l = alen; l > 0; sg = sg_next(sg)) { memcpy(adata, sg_virt(sg), sg->length); l -= sg->length; adata += sg->length; } return 0; } static void _start_qcrypto_process(struct crypto_priv *cp) { struct crypto_async_request *async_req = NULL; struct crypto_async_request *backlog = NULL; unsigned long flags; u32 type; struct qce_req qreq; int ret; struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *cipher_ctx; struct qcrypto_sha_ctx *sha_ctx; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; again: spin_lock_irqsave(&cp->lock, flags); if (cp->req == NULL) { backlog = crypto_get_backlog(&cp->queue); async_req = crypto_dequeue_request(&cp->queue); cp->req = async_req; } spin_unlock_irqrestore(&cp->lock, flags); if (!async_req) return; if (backlog) backlog->complete(backlog, -EINPROGRESS); type = crypto_tfm_alg_type(async_req->tfm); if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) { struct ablkcipher_request *req; struct crypto_ablkcipher *tfm; req = container_of(async_req, struct ablkcipher_request, base); cipher_ctx = crypto_tfm_ctx(async_req->tfm); rctx = ablkcipher_request_ctx(req); tfm = crypto_ablkcipher_reqtfm(req); qreq.op = QCE_REQ_ABLK_CIPHER; qreq.qce_cb = _qce_ablk_cipher_complete; qreq.areq = req; qreq.alg = rctx->alg; qreq.dir = rctx->dir; qreq.mode = rctx->mode; qreq.enckey = cipher_ctx->enc_key; qreq.encklen = cipher_ctx->enc_key_len; qreq.iv = req->info; qreq.ivsize = crypto_ablkcipher_ivsize(tfm); qreq.cryptlen = req->nbytes; qreq.use_pmem = 0; if ((cipher_ctx->enc_key_len == 0) && (cp->platform_support.hw_key_support == 0)) ret = -EINVAL; else ret = qce_ablk_cipher_req(cp->qce, &qreq); } else { if (type == CRYPTO_ALG_TYPE_AHASH) { struct ahash_request *req; struct qce_sha_req sreq; req = container_of(async_req, struct ahash_request, base); sha_ctx = crypto_tfm_ctx(async_req->tfm); sreq.qce_cb = _qce_ahash_complete; sreq.digest = &sha_ctx->digest[0]; sreq.src = req->src; sreq.auth_data[0] = sha_ctx->byte_count[0]; sreq.auth_data[1] = sha_ctx->byte_count[1]; sreq.auth_data[2] = sha_ctx->byte_count[2]; sreq.auth_data[3] = sha_ctx->byte_count[3]; sreq.first_blk = sha_ctx->first_blk; sreq.last_blk = sha_ctx->last_blk; sreq.size = req->nbytes; sreq.areq = req; switch (sha_ctx->alg) { case QCE_HASH_SHA1: sreq.alg = QCE_HASH_SHA1; sreq.authkey = NULL; break; case QCE_HASH_SHA256: sreq.alg = QCE_HASH_SHA256; sreq.authkey = NULL; break; case QCE_HASH_SHA1_HMAC: sreq.alg = QCE_HASH_SHA1_HMAC; sreq.authkey = &sha_ctx->authkey[0]; break; case QCE_HASH_SHA256_HMAC: sreq.alg = QCE_HASH_SHA256_HMAC; sreq.authkey = &sha_ctx->authkey[0]; break; default: break; }; ret = qce_process_sha_req(cp->qce, &sreq); } else { struct aead_request *req = container_of(async_req, struct aead_request, base); struct crypto_aead *aead = crypto_aead_reqtfm(req); rctx = aead_request_ctx(req); cipher_ctx = crypto_tfm_ctx(async_req->tfm); qreq.op = QCE_REQ_AEAD; qreq.qce_cb = _qce_aead_complete; qreq.areq = req; qreq.alg = rctx->alg; qreq.dir = rctx->dir; qreq.mode = rctx->mode; qreq.iv = rctx->iv; qreq.enckey = cipher_ctx->enc_key; qreq.encklen = cipher_ctx->enc_key_len; qreq.authkey = cipher_ctx->auth_key; qreq.authklen = cipher_ctx->auth_key_len; qreq.authsize = crypto_aead_authsize(aead); qreq.ivsize = crypto_aead_ivsize(aead); if (qreq.mode == QCE_MODE_CCM) { if (qreq.dir == QCE_ENCRYPT) qreq.cryptlen = req->cryptlen; else qreq.cryptlen = req->cryptlen - qreq.authsize; /* Get NONCE */ ret = qccrypto_set_aead_ccm_nonce(&qreq); if (ret) goto done; /* Format Associated data */ ret = qcrypto_aead_ccm_format_adata(&qreq, req->assoclen, req->assoc); if (ret) goto done; /* * Save the original associated data * length and sg */ rctx->assoc_sg = req->assoc; rctx->assoclen = req->assoclen; rctx->assoc = qreq.assoc; /* * update req with new formatted associated * data info */ req->assoc = &rctx->asg; req->assoclen = qreq.assoclen; sg_set_buf(req->assoc, qreq.assoc, req->assoclen); sg_mark_end(req->assoc); } ret = qce_aead_req(cp->qce, &qreq); } }; done: if (ret) { spin_lock_irqsave(&cp->lock, flags); cp->req = NULL; spin_unlock_irqrestore(&cp->lock, flags); if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) pstat->ablk_cipher_op_fail++; else if (type == CRYPTO_ALG_TYPE_AHASH) pstat->sha_op_fail++; else pstat->aead_op_fail++; async_req->complete(async_req, ret); goto again; }; }; static int _qcrypto_queue_req(struct crypto_priv *cp, struct crypto_async_request *req) { int ret; unsigned long flags; if (cp->platform_support.ce_shared) { ret = qcrypto_lock_ce(cp); if (ret) return ret; } spin_lock_irqsave(&cp->lock, flags); ret = crypto_enqueue_request(&cp->queue, req); spin_unlock_irqrestore(&cp->lock, flags); _start_qcrypto_process(cp); return ret; } static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req); #endif rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_ECB; pstat->ablk_cipher_aes_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req); #endif rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; pstat->ablk_cipher_aes_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req); #endif rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CTR; pstat->ablk_cipher_aes_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_XTS; pstat->ablk_cipher_aes_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1)) return -EINVAL; if ((ctx->auth_key_len != AES_KEYSIZE_128) && (ctx->auth_key_len != AES_KEYSIZE_256)) return -EINVAL; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CCM; rctx->iv = req->iv; pstat->aead_sha1_aes_enc++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_ECB; pstat->ablk_cipher_des_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; pstat->ablk_cipher_des_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_3DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_ECB; pstat->ablk_cipher_3des_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_3DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; pstat->ablk_cipher_3des_enc++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req); #endif rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_ECB; pstat->ablk_cipher_aes_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req); #endif rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; pstat->ablk_cipher_aes_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req); #endif rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->mode = QCE_MODE_CTR; /* Note. There is no such thing as aes/counter mode, decrypt */ rctx->dir = QCE_ENCRYPT; pstat->ablk_cipher_aes_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_DES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_ECB; pstat->ablk_cipher_des_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_DES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; pstat->ablk_cipher_des_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_3DES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_ECB; pstat->ablk_cipher_3des_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_3DES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; pstat->ablk_cipher_3des_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); rctx = ablkcipher_request_ctx(req); rctx->aead = 0; rctx->alg = CIPHER_ALG_AES; rctx->mode = QCE_MODE_XTS; rctx->dir = QCE_DECRYPT; pstat->ablk_cipher_aes_dec++; return _qcrypto_queue_req(cp, &req->base); }; static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1)) return -EINVAL; if ((ctx->auth_key_len != AES_KEYSIZE_128) && (ctx->auth_key_len != AES_KEYSIZE_256)) return -EINVAL; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CCM; rctx->iv = req->iv; pstat->aead_sha1_aes_dec++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc); ctx->authsize = authsize; return 0; } static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc); switch (authsize) { case 4: case 6: case 8: case 10: case 12: case 14: case 16: break; default: return -EINVAL; } ctx->authsize = authsize; return 0; } static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); struct rtattr *rta = (struct rtattr *)key; struct crypto_authenc_key_param *param; if (!RTA_OK(rta, keylen)) goto badkey; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) goto badkey; if (RTA_PAYLOAD(rta) < sizeof(*param)) goto badkey; param = RTA_DATA(rta); ctx->enc_key_len = be32_to_cpu(param->enckeylen); key += RTA_ALIGN(rta->rta_len); keylen -= RTA_ALIGN(rta->rta_len); if (keylen < ctx->enc_key_len) goto badkey; ctx->auth_key_len = keylen - ctx->enc_key_len; if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE || ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE) goto badkey; memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE); memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len); memcpy(ctx->auth_key, key, ctx->auth_key_len); return 0; badkey: ctx->enc_key_len = 0; crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_priv *cp = ctx->cp; switch (keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_256: break; case AES_KEYSIZE_192: if (cp->ce_support.aes_key_192) break; default: ctx->enc_key_len = 0; crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; }; ctx->enc_key_len = keylen; memcpy(ctx->enc_key, key, keylen); ctx->auth_key_len = keylen; memcpy(ctx->auth_key, key, keylen); return 0; } static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req); #endif rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; pstat->aead_sha1_aes_enc++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; #ifdef QCRYPTO_DEBUG dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req); #endif rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; pstat->aead_sha1_aes_dec++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req) { struct aead_request *areq = &req->areq; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct qcrypto_cipher_req_ctx *rctx; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(areq); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->giv; /* generated iv */ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); /* avoid consecutive packets going out with same IV */ *(__be64 *)req->giv ^= cpu_to_be64(req->seq); pstat->aead_sha1_aes_enc++; return _qcrypto_queue_req(cp, &areq->base); } #ifdef QCRYPTO_AEAD_AES_CTR static int _qcrypto_aead_encrypt_aes_ctr(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CTR; rctx->iv = req->iv; pstat->aead_sha1_aes_enc++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; /* Note. There is no such thing as aes/counter mode, decrypt */ rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CTR; rctx->iv = req->iv; pstat->aead_sha1_aes_dec++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req) { struct aead_request *areq = &req->areq; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct qcrypto_cipher_req_ctx *rctx; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(areq); rctx->aead = 1; rctx->alg = CIPHER_ALG_AES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CTR; rctx->iv = req->giv; /* generated iv */ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); /* avoid consecutive packets going out with same IV */ *(__be64 *)req->giv ^= cpu_to_be64(req->seq); pstat->aead_sha1_aes_enc++; return _qcrypto_queue_req(cp, &areq->base); }; #endif /* QCRYPTO_AEAD_AES_CTR */ static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; pstat->aead_sha1_des_enc++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_DES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; pstat->aead_sha1_des_dec++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req) { struct aead_request *areq = &req->areq; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct qcrypto_cipher_req_ctx *rctx; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(areq); rctx->aead = 1; rctx->alg = CIPHER_ALG_DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->giv; /* generated iv */ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); /* avoid consecutive packets going out with same IV */ *(__be64 *)req->giv ^= cpu_to_be64(req->seq); pstat->aead_sha1_des_enc++; return _qcrypto_queue_req(cp, &areq->base); } static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_3DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; pstat->aead_sha1_3des_enc++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(req); rctx->aead = 1; rctx->alg = CIPHER_ALG_3DES; rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; pstat->aead_sha1_3des_dec++; return _qcrypto_queue_req(cp, &req->base); } static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req) { struct aead_request *areq = &req->areq; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct qcrypto_cipher_req_ctx *rctx; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; rctx = aead_request_ctx(areq); rctx->aead = 1; rctx->alg = CIPHER_ALG_3DES; rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->giv; /* generated iv */ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); /* avoid consecutive packets going out with same IV */ *(__be64 *)req->giv ^= cpu_to_be64(req->seq); pstat->aead_sha1_3des_enc++; return _qcrypto_queue_req(cp, &areq->base); } static int qcrypto_count_sg(struct scatterlist *sg, int nbytes) { int i; for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) nbytes -= sg->length; return i; } static int _sha_init(struct qcrypto_sha_ctx *ctx) { ctx->first_blk = 1; ctx->last_blk = 0; ctx->byte_count[0] = 0; ctx->byte_count[1] = 0; ctx->byte_count[2] = 0; ctx->byte_count[3] = 0; ctx->trailing_buf_len = 0; return 0; }; static int _sha1_init(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; _sha_init(sha_ctx); sha_ctx->alg = QCE_HASH_SHA1; memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE); memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); sha_ctx->diglen = SHA1_DIGEST_SIZE; _update_sha1_ctx(req); pstat->sha1_digest++; return 0; }; static int _sha256_init(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; _sha_init(sha_ctx); sha_ctx->alg = QCE_HASH_SHA256; memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE); memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE); sha_ctx->diglen = SHA256_DIGEST_SIZE; _update_sha256_ctx(req); pstat->sha256_digest++; return 0; }; static int _sha1_export(struct ahash_request *req, void *out) { struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; struct sha1_state *out_ctx = (struct sha1_state *)out; out_ctx->count = sha_state_ctx->count; memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state)); memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE); return 0; }; static int _sha1_import(struct ahash_request *req, const void *in) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; struct sha1_state *in_ctx = (struct sha1_state *)in; sha_state_ctx->count = in_ctx->count; memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state)); memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE); memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE); sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0); sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32); _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen); sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count & (SHA1_BLOCK_SIZE-1)); if (!(in_ctx->count)) sha_ctx->first_blk = 1; else sha_ctx->first_blk = 0; return 0; } static int _sha256_export(struct ahash_request *req, void *out) { struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; struct sha256_state *out_ctx = (struct sha256_state *)out; out_ctx->count = sha_state_ctx->count; memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state)); memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE); return 0; }; static int _sha256_import(struct ahash_request *req, const void *in) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; struct sha256_state *in_ctx = (struct sha256_state *)in; sha_state_ctx->count = in_ctx->count; memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state)); memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE); memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE); sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0); sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32); _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen); sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count & (SHA256_BLOCK_SIZE-1)); if (!(in_ctx->count)) sha_ctx->first_blk = 1; else sha_ctx->first_blk = 0; return 0; } static int _sha_update(struct ahash_request *req, uint32_t sha_block_size) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); uint32_t total, len, i, num_sg; uint8_t *k_src = NULL; uint32_t sha_pad_len = 0; uint32_t end_src = 0; uint32_t trailing_buf_len = 0; uint32_t nbytes, index = 0; uint32_t saved_length = 0; int ret = 0; /* check for trailing buffer from previous updates and append it */ total = req->nbytes + sha_ctx->trailing_buf_len; len = req->nbytes; if (total <= sha_block_size) { i = 0; k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len]; while (len > 0) { memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length); len -= req->src[i].length; k_src += req->src[i].length; i++; } sha_ctx->trailing_buf_len = total; if (sha_ctx->alg == QCE_HASH_SHA1) _update_sha1_ctx(req); if (sha_ctx->alg == QCE_HASH_SHA256) _update_sha256_ctx(req); return 0; } /* save the original req structure fields*/ rctx->src = req->src; rctx->nbytes = req->nbytes; memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf, sha_ctx->trailing_buf_len); k_src = &sha_ctx->trailing_buf[0]; /* get new trailing buffer */ sha_pad_len = ALIGN(total, sha_block_size) - total; trailing_buf_len = sha_block_size - sha_pad_len; nbytes = total - trailing_buf_len; num_sg = qcrypto_count_sg(req->src, req->nbytes); len = sha_ctx->trailing_buf_len; i = 0; while (len < nbytes) { if ((len + req->src[i].length) > nbytes) break; len += req->src[i].length; i++; } end_src = i; if (len < nbytes) { uint32_t remnant = (nbytes - len); memcpy(k_src, (sg_virt(&req->src[i]) + remnant), (req->src[i].length - remnant)); k_src += (req->src[i].length - remnant); saved_length = req->src[i].length; index = i; req->src[i].length = remnant; i++; } while (i < num_sg) { memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length); k_src += req->src[i].length; i++; } if (sha_ctx->trailing_buf_len) { num_sg = end_src + 2; sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)), GFP_KERNEL); if (sha_ctx->sg == NULL) { pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n", PTR_ERR(sha_ctx->sg)); return -ENOMEM; } sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf, sha_ctx->trailing_buf_len); for (i = 1; i < num_sg; i++) sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]), req->src[i-1].length); req->src = sha_ctx->sg; sg_mark_end(&sha_ctx->sg[num_sg - 1]); } else sg_mark_end(&req->src[end_src]); req->nbytes = nbytes; if (saved_length > 0) rctx->src[index].length = saved_length; sha_ctx->trailing_buf_len = trailing_buf_len; ret = _qcrypto_queue_req(cp, &req->base); return ret; }; static int _sha1_update(struct ahash_request *req) { struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; sha_state_ctx->count += req->nbytes; return _sha_update(req, SHA1_BLOCK_SIZE); } static int _sha256_update(struct ahash_request *req) { struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; sha_state_ctx->count += req->nbytes; return _sha_update(req, SHA256_BLOCK_SIZE); } static int _sha_final(struct ahash_request *req, uint32_t sha_block_size) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); int ret = 0; sha_ctx->last_blk = 1; /* save the original req structure fields*/ rctx->src = req->src; rctx->nbytes = req->nbytes; sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf, sha_ctx->trailing_buf_len); sg_mark_end(&sha_ctx->tmp_sg); req->src = &sha_ctx->tmp_sg; req->nbytes = sha_ctx->trailing_buf_len; ret = _qcrypto_queue_req(cp, &req->base); return ret; }; static int _sha1_final(struct ahash_request *req) { return _sha_final(req, SHA1_BLOCK_SIZE); } static int _sha256_final(struct ahash_request *req) { return _sha_final(req, SHA256_BLOCK_SIZE); } static int _sha_digest(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct crypto_priv *cp = sha_ctx->cp; int ret = 0; /* save the original req structure fields*/ rctx->src = req->src; rctx->nbytes = req->nbytes; sha_ctx->last_blk = 1; ret = _qcrypto_queue_req(cp, &req->base); return ret; } static int _sha1_digest(struct ahash_request *req) { _sha1_init(req); return _sha_digest(req); } static int _sha256_digest(struct ahash_request *req) { _sha256_init(req); return _sha_digest(req); } static void _crypto_sha_hmac_ahash_req_complete( struct crypto_async_request *req, int err) { struct completion *ahash_req_complete = req->data; if (err == -EINPROGRESS) return; complete(ahash_req_complete); } static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); int ret = 0; sha_ctx->in_buf = kzalloc(len, GFP_KERNEL); if (sha_ctx->in_buf == NULL) { pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n", PTR_ERR(sha_ctx->in_buf)); return -ENOMEM; } memcpy(sha_ctx->in_buf, key, len); sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len); sg_mark_end(&sha_ctx->tmp_sg); ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg, &sha_ctx->authkey[0], len); ret = _sha_digest(sha_ctx->ahash_req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible( &sha_ctx->ahash_req_complete); INIT_COMPLETION(sha_ctx->ahash_req_complete); } sha_ctx->authkey_in_len = len; kfree(sha_ctx->in_buf); sha_ctx->in_buf = NULL; return ret; } static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); if (len <= SHA1_BLOCK_SIZE) memcpy(&sha_ctx->authkey[0], key, len); else { _sha_init(sha_ctx); sha_ctx->alg = QCE_HASH_SHA1; memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); sha_ctx->diglen = SHA1_DIGEST_SIZE; _sha_hmac_setkey(tfm, key, len); } return 0; } static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); if (len <= SHA256_BLOCK_SIZE) memcpy(&sha_ctx->authkey[0], key, len); else { _sha_init(sha_ctx); sha_ctx->alg = QCE_HASH_SHA256; memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE); sha_ctx->diglen = SHA256_DIGEST_SIZE; _sha_hmac_setkey(tfm, key, len); } return 0; } static int _sha_hmac_init_ihash(struct ahash_request *req, uint32_t sha_block_size) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); int i; for (i = 0; i < sha_block_size; i++) sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36; sha_ctx->trailing_buf_len = sha_block_size; return 0; } static int _sha1_hmac_init(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct crypto_stat *pstat; int ret = 0; pstat = &_qcrypto_stat[cp->pdev->id]; pstat->sha1_hmac_digest++; _sha_init(sha_ctx); memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE); memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); sha_ctx->diglen = SHA1_DIGEST_SIZE; _update_sha1_ctx(req); if (cp->ce_support.sha_hmac) sha_ctx->alg = QCE_HASH_SHA1_HMAC; else { sha_ctx->alg = QCE_HASH_SHA1; ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE); } return ret; } static int _sha256_hmac_init(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct crypto_stat *pstat; int ret = 0; pstat = &_qcrypto_stat[cp->pdev->id]; pstat->sha256_hmac_digest++; _sha_init(sha_ctx); memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE); memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE); sha_ctx->diglen = SHA256_DIGEST_SIZE; _update_sha256_ctx(req); if (cp->ce_support.sha_hmac) sha_ctx->alg = QCE_HASH_SHA256_HMAC; else { sha_ctx->alg = QCE_HASH_SHA256; ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE); } return ret; } static int _sha1_hmac_update(struct ahash_request *req) { return _sha1_update(req); } static int _sha256_hmac_update(struct ahash_request *req) { return _sha256_update(req); } static int _sha_hmac_outer_hash(struct ahash_request *req, uint32_t sha_digest_size, uint32_t sha_block_size) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); struct crypto_priv *cp = sha_ctx->cp; int i; for (i = 0; i < sha_block_size; i++) sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c; /* save the original req structure fields*/ rctx->src = req->src; rctx->nbytes = req->nbytes; memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0], sha_digest_size); sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size + sha_digest_size); sg_mark_end(&sha_ctx->tmp_sg); req->src = &sha_ctx->tmp_sg; req->nbytes = sha_block_size + sha_digest_size; _sha_init(sha_ctx); if (sha_ctx->alg == QCE_HASH_SHA1) { memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); sha_ctx->diglen = SHA1_DIGEST_SIZE; } else { memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE); sha_ctx->diglen = SHA256_DIGEST_SIZE; } sha_ctx->last_blk = 1; return _qcrypto_queue_req(cp, &req->base); } static int _sha_hmac_inner_hash(struct ahash_request *req, uint32_t sha_digest_size, uint32_t sha_block_size) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct ahash_request *areq = sha_ctx->ahash_req; struct crypto_priv *cp = sha_ctx->cp; int ret = 0; sha_ctx->last_blk = 1; sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf, sha_ctx->trailing_buf_len); sg_mark_end(&sha_ctx->tmp_sg); ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0], sha_ctx->trailing_buf_len); sha_ctx->last_blk = 1; ret = _qcrypto_queue_req(cp, &areq->base); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&sha_ctx->ahash_req_complete); INIT_COMPLETION(sha_ctx->ahash_req_complete); } return ret; } static int _sha1_hmac_final(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; int ret = 0; if (cp->ce_support.sha_hmac) return _sha_final(req, SHA1_BLOCK_SIZE); else { ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE); if (ret) return ret; return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE); } } static int _sha256_hmac_final(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; int ret = 0; if (cp->ce_support.sha_hmac) return _sha_final(req, SHA256_BLOCK_SIZE); else { ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE); if (ret) return ret; return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE); } return 0; } static int _sha1_hmac_digest(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; pstat->sha1_hmac_digest++; _sha_init(sha_ctx); memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); sha_ctx->diglen = SHA1_DIGEST_SIZE; sha_ctx->alg = QCE_HASH_SHA1_HMAC; return _sha_digest(req); } static int _sha256_hmac_digest(struct ahash_request *req) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = sha_ctx->cp; struct crypto_stat *pstat; pstat = &_qcrypto_stat[cp->pdev->id]; pstat->sha256_hmac_digest++; _sha_init(sha_ctx); memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE); sha_ctx->diglen = SHA256_DIGEST_SIZE; sha_ctx->alg = QCE_HASH_SHA256_HMAC; return _sha_digest(req); } static struct ahash_alg _qcrypto_ahash_algos[] = { { .init = _sha1_init, .update = _sha1_update, .final = _sha1_final, .export = _sha1_export, .import = _sha1_import, .digest = _sha1_digest, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "qcrypto-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), .cra_alignmask = 0, .cra_type = &crypto_ahash_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_ahash_cra_init, .cra_exit = _qcrypto_ahash_cra_exit, }, }, }, { .init = _sha256_init, .update = _sha256_update, .final = _sha256_final, .export = _sha256_export, .import = _sha256_import, .digest = _sha256_digest, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "qcrypto-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), .cra_alignmask = 0, .cra_type = &crypto_ahash_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_ahash_cra_init, .cra_exit = _qcrypto_ahash_cra_exit, }, }, }, }; static struct ahash_alg _qcrypto_sha_hmac_algos[] = { { .init = _sha1_hmac_init, .update = _sha1_hmac_update, .final = _sha1_hmac_final, .export = _sha1_export, .import = _sha1_import, .digest = _sha1_hmac_digest, .setkey = _sha1_hmac_setkey, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "qcrypto-hmac-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), .cra_alignmask = 0, .cra_type = &crypto_ahash_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_ahash_hmac_cra_init, .cra_exit = _qcrypto_ahash_cra_exit, }, }, }, { .init = _sha256_hmac_init, .update = _sha256_hmac_update, .final = _sha256_hmac_final, .export = _sha256_export, .import = _sha256_import, .digest = _sha256_hmac_digest, .setkey = _sha256_hmac_setkey, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "qcrypto-hmac-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), .cra_alignmask = 0, .cra_type = &crypto_ahash_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_ahash_hmac_cra_init, .cra_exit = _qcrypto_ahash_cra_exit, }, }, }, }; static struct crypto_alg _qcrypto_ablk_cipher_algos[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "qcrypto-ecb-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = _qcrypto_setkey_aes, .encrypt = _qcrypto_enc_aes_ecb, .decrypt = _qcrypto_dec_aes_ecb, }, }, }, { .cra_name = "cbc(aes)", .cra_driver_name = "qcrypto-cbc-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = _qcrypto_setkey_aes, .encrypt = _qcrypto_enc_aes_cbc, .decrypt = _qcrypto_dec_aes_cbc, }, }, }, { .cra_name = "ctr(aes)", .cra_driver_name = "qcrypto-ctr-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = _qcrypto_setkey_aes, .encrypt = _qcrypto_enc_aes_ctr, .decrypt = _qcrypto_dec_aes_ctr, }, }, }, { .cra_name = "ecb(des)", .cra_driver_name = "qcrypto-ecb-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = _qcrypto_setkey_des, .encrypt = _qcrypto_enc_des_ecb, .decrypt = _qcrypto_dec_des_ecb, }, }, }, { .cra_name = "cbc(des)", .cra_driver_name = "qcrypto-cbc-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = _qcrypto_setkey_des, .encrypt = _qcrypto_enc_des_cbc, .decrypt = _qcrypto_dec_des_cbc, }, }, }, { .cra_name = "ecb(des3_ede)", .cra_driver_name = "qcrypto-ecb-3des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = _qcrypto_setkey_3des, .encrypt = _qcrypto_enc_3des_ecb, .decrypt = _qcrypto_dec_3des_ecb, }, }, }, { .cra_name = "cbc(des3_ede)", .cra_driver_name = "qcrypto-cbc-3des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .ivsize = DES3_EDE_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = _qcrypto_setkey_3des, .encrypt = _qcrypto_enc_3des_cbc, .decrypt = _qcrypto_dec_3des_cbc, }, }, }, }; static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = { .cra_name = "xts(aes)", .cra_driver_name = "qcrypto-xts-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_ablkcipher_init, .cra_u = { .ablkcipher = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = _qcrypto_setkey_aes, .encrypt = _qcrypto_enc_aes_xts, .decrypt = _qcrypto_dec_aes_xts, }, }, }; static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = { { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_aead_init, .cra_u = { .aead = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .setkey = _qcrypto_aead_setkey, .setauthsize = _qcrypto_aead_setauthsize, .encrypt = _qcrypto_aead_encrypt_aes_cbc, .decrypt = _qcrypto_aead_decrypt_aes_cbc, .givencrypt = _qcrypto_aead_givencrypt_aes_cbc, .geniv = "<built-in>", } } }, #ifdef QCRYPTO_AEAD_AES_CTR { .cra_name = "authenc(hmac(sha1),ctr(aes))", .cra_driver_name = "qcrypto-aead-hmac-sha1-ctr-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_aead_init, .cra_u = { .aead = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .setkey = _qcrypto_aead_setkey, .setauthsize = _qcrypto_aead_setauthsize, .encrypt = _qcrypto_aead_encrypt_aes_ctr, .decrypt = _qcrypto_aead_decrypt_aes_ctr, .givencrypt = _qcrypto_aead_givencrypt_aes_ctr, .geniv = "<built-in>", } } }, #endif /* QCRYPTO_AEAD_AES_CTR */ { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_aead_init, .cra_u = { .aead = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .setkey = _qcrypto_aead_setkey, .setauthsize = _qcrypto_aead_setauthsize, .encrypt = _qcrypto_aead_encrypt_des_cbc, .decrypt = _qcrypto_aead_decrypt_des_cbc, .givencrypt = _qcrypto_aead_givencrypt_des_cbc, .geniv = "<built-in>", } } }, { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_aead_init, .cra_u = { .aead = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .setkey = _qcrypto_aead_setkey, .setauthsize = _qcrypto_aead_setauthsize, .encrypt = _qcrypto_aead_encrypt_3des_cbc, .decrypt = _qcrypto_aead_decrypt_3des_cbc, .givencrypt = _qcrypto_aead_givencrypt_3des_cbc, .geniv = "<built-in>", } } }, }; static struct crypto_alg _qcrypto_aead_ccm_algo = { .cra_name = "ccm(aes)", .cra_driver_name = "qcrypto-aes-ccm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_init = _qcrypto_cra_aead_init, .cra_u = { .aead = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .setkey = _qcrypto_aead_ccm_setkey, .setauthsize = _qcrypto_aead_ccm_setauthsize, .encrypt = _qcrypto_aead_encrypt_aes_ccm, .decrypt = _qcrypto_aead_decrypt_aes_ccm, .geniv = "<built-in>", } } }; static int _qcrypto_probe(struct platform_device *pdev) { int rc = 0; void *handle; struct crypto_priv *cp; int i; struct msm_ce_hw_support *platform_support; if (pdev->id >= MAX_CRYPTO_DEVICE) { pr_err("%s: device id %d exceeds allowed %d\n", __func__, pdev->id, MAX_CRYPTO_DEVICE); return -ENOENT; } cp = kzalloc(sizeof(*cp), GFP_KERNEL); if (!cp) { pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n", PTR_ERR(cp)); return -ENOMEM; } /* open qce */ handle = qce_open(pdev, &rc); if (handle == NULL) { kfree(cp); platform_set_drvdata(pdev, NULL); return rc; } INIT_LIST_HEAD(&cp->alg_list); platform_set_drvdata(pdev, cp); spin_lock_init(&cp->lock); tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp); crypto_init_queue(&cp->queue, 50); cp->qce = handle; cp->pdev = pdev; qce_hw_support(cp->qce, &cp->ce_support); platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data; cp->platform_support.ce_shared = platform_support->ce_shared; cp->platform_support.shared_ce_resource = platform_support->shared_ce_resource; cp->platform_support.hw_key_support = platform_support->hw_key_support; cp->ce_lock_count = 0; cp->platform_support.sha_hmac = platform_support->sha_hmac; if (cp->platform_support.ce_shared) INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce); /* register crypto cipher algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) { struct qcrypto_alg *q_alg; q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_ablk_cipher_algos[i]); if (IS_ERR(q_alg)) { rc = PTR_ERR(q_alg); goto err; } rc = crypto_register_alg(&q_alg->cipher_alg); if (rc) { dev_err(&pdev->dev, "%s alg registration failed\n", q_alg->cipher_alg.cra_driver_name); kfree(q_alg); } else { list_add_tail(&q_alg->entry, &cp->alg_list); dev_info(&pdev->dev, "%s\n", q_alg->cipher_alg.cra_driver_name); } } /* register crypto cipher algorithms the device supports */ if (cp->ce_support.aes_xts) { struct qcrypto_alg *q_alg; q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_ablk_cipher_xts_algo); if (IS_ERR(q_alg)) { rc = PTR_ERR(q_alg); goto err; } rc = crypto_register_alg(&q_alg->cipher_alg); if (rc) { dev_err(&pdev->dev, "%s alg registration failed\n", q_alg->cipher_alg.cra_driver_name); kfree(q_alg); } else { list_add_tail(&q_alg->entry, &cp->alg_list); dev_info(&pdev->dev, "%s\n", q_alg->cipher_alg.cra_driver_name); } } /* * Register crypto hash (sha1 and sha256) algorithms the * device supports */ for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) { struct qcrypto_alg *q_alg = NULL; q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]); if (IS_ERR(q_alg)) { rc = PTR_ERR(q_alg); goto err; } rc = crypto_register_ahash(&q_alg->sha_alg); if (rc) { dev_err(&pdev->dev, "%s alg registration failed\n", q_alg->sha_alg.halg.base.cra_driver_name); kfree(q_alg); } else { list_add_tail(&q_alg->entry, &cp->alg_list); dev_info(&pdev->dev, "%s\n", q_alg->sha_alg.halg.base.cra_driver_name); } } /* register crypto aead (hmac-sha1) algorithms the device supports */ if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac) { for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos); i++) { struct qcrypto_alg *q_alg; q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_sha1_hmac_algos[i]); if (IS_ERR(q_alg)) { rc = PTR_ERR(q_alg); goto err; } rc = crypto_register_alg(&q_alg->cipher_alg); if (rc) { dev_err(&pdev->dev, "%s alg registration failed\n", q_alg->cipher_alg.cra_driver_name); kfree(q_alg); } else { list_add_tail(&q_alg->entry, &cp->alg_list); dev_info(&pdev->dev, "%s\n", q_alg->cipher_alg.cra_driver_name); } } } if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) { /* register crypto hmac algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) { struct qcrypto_alg *q_alg = NULL; q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_sha_hmac_algos[i]); if (IS_ERR(q_alg)) { rc = PTR_ERR(q_alg); goto err; } rc = crypto_register_ahash(&q_alg->sha_alg); if (rc) { dev_err(&pdev->dev, "%s alg registration failed\n", q_alg->sha_alg.halg.base.cra_driver_name); kfree(q_alg); } else { list_add_tail(&q_alg->entry, &cp->alg_list); dev_info(&pdev->dev, "%s\n", q_alg->sha_alg.halg.base.cra_driver_name); } } } /* * Register crypto cipher (aes-ccm) algorithms the * device supports */ if (cp->ce_support.aes_ccm) { struct qcrypto_alg *q_alg; q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo); if (IS_ERR(q_alg)) { rc = PTR_ERR(q_alg); goto err; } rc = crypto_register_alg(&q_alg->cipher_alg); if (rc) { dev_err(&pdev->dev, "%s alg registration failed\n", q_alg->cipher_alg.cra_driver_name); kfree(q_alg); } else { list_add_tail(&q_alg->entry, &cp->alg_list); dev_info(&pdev->dev, "%s\n", q_alg->cipher_alg.cra_driver_name); } } return 0; err: _qcrypto_remove(pdev); return rc; }; static struct platform_driver _qualcomm_crypto = { .probe = _qcrypto_probe, .remove = _qcrypto_remove, .driver = { .owner = THIS_MODULE, .name = "qcrypto", }, }; static int _debug_qcrypto[MAX_CRYPTO_DEVICE]; static int _debug_stats_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t _debug_stats_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int rc = -EINVAL; int qcrypto = *((int *) file->private_data); int len; len = _disp_stats(qcrypto); rc = simple_read_from_buffer((void __user *) buf, len, ppos, (void *) _debug_read_buf, len); return rc; } static ssize_t _debug_stats_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int qcrypto = *((int *) file->private_data); memset((char *)&_qcrypto_stat[qcrypto], 0, sizeof(struct crypto_stat)); return count; }; static const struct file_operations _debug_stats_ops = { .open = _debug_stats_open, .read = _debug_stats_read, .write = _debug_stats_write, }; static int _qcrypto_debug_init(void) { int rc; char name[DEBUG_MAX_FNAME]; int i; struct dentry *dent; _debug_dent = debugfs_create_dir("qcrypto", NULL); if (IS_ERR(_debug_dent)) { pr_err("qcrypto debugfs_create_dir fail, error %ld\n", PTR_ERR(_debug_dent)); return PTR_ERR(_debug_dent); } for (i = 0; i < MAX_CRYPTO_DEVICE; i++) { snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1); _debug_qcrypto[i] = i; dent = debugfs_create_file(name, 0644, _debug_dent, &_debug_qcrypto[i], &_debug_stats_ops); if (dent == NULL) { pr_err("qcrypto debugfs_create_file fail, error %ld\n", PTR_ERR(dent)); rc = PTR_ERR(dent); goto err; } } return 0; err: debugfs_remove_recursive(_debug_dent); return rc; } static int __init _qcrypto_init(void) { int rc; rc = _qcrypto_debug_init(); if (rc) return rc; return platform_driver_register(&_qualcomm_crypto); } static void __exit _qcrypto_exit(void) { pr_debug("%s Unregister QCRYPTO\n", __func__); debugfs_remove_recursive(_debug_dent); platform_driver_unregister(&_qualcomm_crypto); } module_init(_qcrypto_init); module_exit(_qcrypto_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>"); MODULE_DESCRIPTION("Qualcomm Crypto driver"); MODULE_VERSION("1.19");
gpl-2.0
logicbricks/linux-xlnx
arch/arm/mach-imx/devices/platform-imx-ssi.c
629
2927
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include "../hardware.h" #include "devices-common.h" #define imx_imx_ssi_data_entry(soc, _id, _hwid, _size) \ [_id] = { \ .id = _id, \ .iobase = soc ## _SSI ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_SSI ## _hwid, \ .dmatx0 = soc ## _DMA_REQ_SSI ## _hwid ## _TX0, \ .dmarx0 = soc ## _DMA_REQ_SSI ## _hwid ## _RX0, \ .dmatx1 = soc ## _DMA_REQ_SSI ## _hwid ## _TX1, \ .dmarx1 = soc ## _DMA_REQ_SSI ## _hwid ## _RX1, \ } #ifdef CONFIG_SOC_IMX21 const struct imx_imx_ssi_data imx21_imx_ssi_data[] __initconst = { #define imx21_imx_ssi_data_entry(_id, _hwid) \ imx_imx_ssi_data_entry(MX21, _id, _hwid, SZ_4K) imx21_imx_ssi_data_entry(0, 1), imx21_imx_ssi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX21 */ #ifdef CONFIG_SOC_IMX25 const struct imx_imx_ssi_data imx25_imx_ssi_data[] __initconst = { #define imx25_imx_ssi_data_entry(_id, _hwid) \ imx_imx_ssi_data_entry(MX25, _id, _hwid, SZ_4K) imx25_imx_ssi_data_entry(0, 1), imx25_imx_ssi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_imx_ssi_data imx27_imx_ssi_data[] __initconst = { #define imx27_imx_ssi_data_entry(_id, _hwid) \ imx_imx_ssi_data_entry(MX27, _id, _hwid, SZ_4K) imx27_imx_ssi_data_entry(0, 1), imx27_imx_ssi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_imx_ssi_data imx31_imx_ssi_data[] __initconst = { #define imx31_imx_ssi_data_entry(_id, _hwid) \ imx_imx_ssi_data_entry(MX31, _id, _hwid, SZ_4K) imx31_imx_ssi_data_entry(0, 1), imx31_imx_ssi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 const struct imx_imx_ssi_data imx35_imx_ssi_data[] __initconst = { #define imx35_imx_ssi_data_entry(_id, _hwid) \ imx_imx_ssi_data_entry(MX35, _id, _hwid, SZ_4K) imx35_imx_ssi_data_entry(0, 1), imx35_imx_ssi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX35 */ struct platform_device *__init imx_add_imx_ssi( const struct imx_imx_ssi_data *data, const struct imx_ssi_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, #define DMARES(_name) { \ .name = #_name, \ .start = data->dma ## _name, \ .end = data->dma ## _name, \ .flags = IORESOURCE_DMA, \ } DMARES(tx0), DMARES(rx0), DMARES(tx1), DMARES(rx1), }; return imx_add_platform_device("imx-ssi", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
HeeroLuca/ONEPONE
drivers/media/platform/msm/wfd/wfd-ioctl.c
1397
47396
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/ioctl.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/version.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/time.h> #include <linux/slab.h> #include <mach/board.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-core.h> #include <media/videobuf2-msm-mem.h> #include "wfd-util.h" #include "mdp-subdev.h" #include "enc-subdev.h" #include "vsg-subdev.h" #define WFD_VERSION KERNEL_VERSION(0, 0, 1) #define WFD_NUM_DEVICES 2 #define WFD_DEVICE_NUMBER_BASE 38 #define WFD_DEVICE_SECURE (WFD_DEVICE_NUMBER_BASE + 1) #define DEFAULT_WFD_WIDTH 1280 #define DEFAULT_WFD_HEIGHT 720 #define VENC_INPUT_BUFFERS 4 #define MAX_EVENTS 16 struct wfd_device { struct mutex dev_lock; struct platform_device *pdev; struct v4l2_device v4l2_dev; struct video_device *pvdev; struct v4l2_subdev mdp_sdev; struct v4l2_subdev enc_sdev; struct v4l2_subdev vsg_sdev; struct ion_client *ion_client; bool secure; bool in_use; bool mdp_iommu_split_domain; }; struct mem_info { u32 fd; u32 offset; }; struct mem_info_entry { struct list_head list; unsigned long userptr; struct mem_info minfo; }; struct mem_region_pair { struct mem_region *enc; struct mem_region *mdp; struct list_head list; }; struct wfd_inst { struct vb2_queue vid_bufq; struct mutex lock; struct mutex vb2_lock; u32 buf_count; struct task_struct *mdp_task; void *mdp_inst; void *venc_inst; u32 height; u32 width; u32 pixelformat; struct list_head minfo_list; bool streamoff; u32 input_bufs_allocated; u32 input_buf_size; u32 out_buf_size; struct list_head input_mem_list; struct wfd_stats stats; struct completion stop_mdp_thread; struct v4l2_fh event_handler; }; struct wfd_vid_buffer { struct vb2_buffer vidbuf; }; static inline struct wfd_inst *file_to_inst(struct file *filp) { return container_of(filp->private_data, struct wfd_inst, event_handler); } static int wfd_vidbuf_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_inst *inst = file_to_inst(priv_data); int i; WFD_MSG_DBG("In %s\n", __func__); if (num_buffers == NULL || num_planes == NULL) return -EINVAL; *num_planes = 1; mutex_lock(&inst->lock); for (i = 0; i < *num_planes; ++i) { sizes[i] = inst->out_buf_size; alloc_ctxs[i] = inst; } mutex_unlock(&inst->lock); return 0; } static void wfd_vidbuf_wait_prepare(struct vb2_queue *q) { struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_inst *inst = file_to_inst(priv_data); mutex_unlock(&inst->vb2_lock); } static void wfd_vidbuf_wait_finish(struct vb2_queue *q) { struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_inst *inst = file_to_inst(priv_data); mutex_lock(&inst->vb2_lock); } static unsigned long wfd_enc_addr_to_mdp_addr(struct wfd_inst *inst, unsigned long addr) { struct list_head *ptr, *next; struct mem_region_pair *mpair; if (!list_empty(&inst->input_mem_list)) { list_for_each_safe(ptr, next, &inst->input_mem_list) { mpair = list_entry(ptr, struct mem_region_pair, list); if (mpair->enc->paddr == (u8 *)addr) return (unsigned long)mpair->mdp->paddr; } } return (unsigned long)NULL; } #ifdef CONFIG_MSM_WFD_DEBUG static void *wfd_map_kernel(struct ion_client *client, struct ion_handle *handle) { return ion_map_kernel(client, handle); } static void wfd_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { ion_unmap_kernel(client, handle); } #else static void *wfd_map_kernel(struct ion_client *client, struct ion_handle *handle) { return NULL; } static void wfd_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { return; } #endif static int wfd_allocate_ion_buffer(struct ion_client *client, bool secure, struct mem_region *mregion) { struct ion_handle *handle = NULL; unsigned int alloc_regions = 0, ion_flags = 0, align = 0; int rc = 0; if (secure) { alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID); ion_flags = ION_FLAG_SECURE; align = SZ_1M; } else { alloc_regions = ION_HEAP(ION_IOMMU_HEAP_ID); align = SZ_4K; } handle = ion_alloc(client, mregion->size, align, alloc_regions, ion_flags); if (IS_ERR_OR_NULL(handle)) { WFD_MSG_ERR("Failed to allocate input buffer\n"); rc = PTR_ERR(handle); goto alloc_fail; } mregion->kvaddr = secure ? NULL : wfd_map_kernel(client, handle); mregion->ion_handle = handle; return rc; alloc_fail: if (!IS_ERR_OR_NULL(handle)) { if (!IS_ERR_OR_NULL(mregion->kvaddr)) wfd_unmap_kernel(client, handle); ion_free(client, handle); mregion->kvaddr = NULL; mregion->paddr = NULL; mregion->ion_handle = NULL; } return rc; } /* Doesn't do iommu unmap */ static int wfd_free_ion_buffer(struct ion_client *client, struct mem_region *mregion) { if (!client || !mregion) { WFD_MSG_ERR("Failed to free ion buffer: " "Invalid client or region"); return -EINVAL; } if (!IS_ERR_OR_NULL(mregion->kvaddr)) wfd_unmap_kernel(client, mregion->ion_handle); ion_free(client, mregion->ion_handle); return 0; } static int wfd_flush_ion_buffer(struct ion_client *client, struct mem_region *mregion) { if (!client || !mregion) { WFD_MSG_ERR("Failed to flush ion buffer: " "Invalid client or region"); return -EINVAL; } else if (!mregion->ion_handle) { WFD_MSG_ERR("Failed to flush ion buffer: " "not an ion buffer"); return -EINVAL; } return msm_ion_do_cache_op(client, mregion->ion_handle, mregion->kvaddr, mregion->size, ION_IOC_INV_CACHES); } static int wfd_allocate_input_buffers(struct wfd_device *wfd_dev, struct wfd_inst *inst) { int i; struct mem_region *enc_mregion, *mdp_mregion; struct mem_region_pair *mpair; int rc; struct mdp_buf_info mdp_buf = {0}; struct mem_region_map mmap_context = {0}; mutex_lock(&inst->lock); if (inst->input_bufs_allocated) { mutex_unlock(&inst->lock); return 0; } inst->input_bufs_allocated = true; mutex_unlock(&inst->lock); for (i = 0; i < VENC_INPUT_BUFFERS; ++i) { mpair = kzalloc(sizeof(*mpair), GFP_KERNEL); enc_mregion = kzalloc(sizeof(*enc_mregion), GFP_KERNEL); mdp_mregion = kzalloc(sizeof(*enc_mregion), GFP_KERNEL); enc_mregion->size = ALIGN(inst->input_buf_size, SZ_4K); rc = wfd_allocate_ion_buffer(wfd_dev->ion_client, wfd_dev->secure, enc_mregion); if (rc) { WFD_MSG_ERR("Failed to allocate input memory\n"); goto alloc_fail; } mmap_context.mregion = enc_mregion; mmap_context.ion_client = wfd_dev->ion_client; rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ENC_MMAP, &mmap_context); if (rc) { WFD_MSG_ERR("Failed to map input memory\n"); goto alloc_fail; } else if (!enc_mregion->paddr) { WFD_MSG_ERR("ENC_MMAP returned success" \ "but failed to map input memory\n"); rc = -EINVAL; goto alloc_fail; } WFD_MSG_DBG("NOTE: enc paddr = [%p->%p], kvaddr = %p\n", enc_mregion->paddr, (int8_t *) enc_mregion->paddr + enc_mregion->size, enc_mregion->kvaddr); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_INPUT_BUFFER, (void *)enc_mregion); if (rc) { WFD_MSG_ERR("Setting enc input buffer failed\n"); goto set_input_fail; } /* map the buffer from encoder to mdp */ mdp_mregion->kvaddr = enc_mregion->kvaddr; mdp_mregion->size = enc_mregion->size; mdp_mregion->offset = enc_mregion->offset; mdp_mregion->fd = enc_mregion->fd; mdp_mregion->cookie = 0; mdp_mregion->ion_handle = enc_mregion->ion_handle; memset(&mmap_context, 0, sizeof(mmap_context)); mmap_context.mregion = mdp_mregion; mmap_context.ion_client = wfd_dev->ion_client; mmap_context.cookie = inst->mdp_inst; rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_MMAP, (void *)&mmap_context); if (rc) { WFD_MSG_ERR( "Failed to map to mdp, rc = %d, paddr = 0x%p\n", rc, mdp_mregion->paddr); mdp_mregion->kvaddr = NULL; mdp_mregion->paddr = NULL; mdp_mregion->ion_handle = NULL; goto mdp_mmap_fail; } else if (!mdp_mregion->paddr) { WFD_MSG_ERR("MDP_MMAP returned success" \ "but failed to map to MDP\n"); rc = -EINVAL; mdp_mregion->kvaddr = NULL; mdp_mregion->paddr = NULL; mdp_mregion->ion_handle = NULL; goto mdp_mmap_fail; } mdp_buf.inst = inst->mdp_inst; mdp_buf.cookie = enc_mregion; mdp_buf.kvaddr = (u32) mdp_mregion->kvaddr; mdp_buf.paddr = (u32) mdp_mregion->paddr; WFD_MSG_DBG("NOTE: mdp paddr = [%p->%p], kvaddr = %p\n", mdp_mregion->paddr, (void *) ((int)mdp_mregion->paddr + mdp_mregion->size), mdp_mregion->kvaddr); rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_Q_BUFFER, (void *)&mdp_buf); if (rc) { WFD_MSG_ERR("Unable to queue the" " buffer to mdp\n"); goto mdp_q_fail; } else { wfd_stats_update(&inst->stats, WFD_STAT_EVENT_MDP_QUEUE); } INIT_LIST_HEAD(&mpair->list); mpair->enc = enc_mregion; mpair->mdp = mdp_mregion; list_add_tail(&mpair->list, &inst->input_mem_list); } rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ALLOC_RECON_BUFFERS, NULL); if (rc) { WFD_MSG_ERR("Failed to allocate recon buffers\n"); goto recon_alloc_fail; } return rc; /* * Clean up only the buffer that we failed in setting up. * Caller will clean up the rest by calling free_input_buffers() */ mdp_q_fail: memset(&mmap_context, 0, sizeof(mmap_context)); mmap_context.mregion = mdp_mregion; mmap_context.ion_client = wfd_dev->ion_client; mmap_context.cookie = inst->mdp_inst; v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_MUNMAP, (void *)&mmap_context); mdp_mmap_fail: v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, FREE_INPUT_BUFFER, (void *)enc_mregion); set_input_fail: memset(&mmap_context, 0, sizeof(mmap_context)); mmap_context.ion_client = wfd_dev->ion_client; mmap_context.mregion = enc_mregion; v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ENC_MUNMAP, &mmap_context); alloc_fail: kfree(mpair); kfree(enc_mregion); kfree(mdp_mregion); recon_alloc_fail: return rc; } static void wfd_free_input_buffers(struct wfd_device *wfd_dev, struct wfd_inst *inst) { struct list_head *ptr, *next; struct mem_region_pair *mpair; int rc = 0; mutex_lock(&inst->lock); if (!inst->input_bufs_allocated) { mutex_unlock(&inst->lock); return; } inst->input_bufs_allocated = false; mutex_unlock(&inst->lock); if (!list_empty(&inst->input_mem_list)) { list_for_each_safe(ptr, next, &inst->input_mem_list) { mpair = list_entry(ptr, struct mem_region_pair, list); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, FREE_INPUT_BUFFER, (void *)mpair->enc); if (rc) WFD_MSG_ERR("Failed to free buffers " "from encoder\n"); if (mpair->mdp->paddr) { struct mem_region_map temp = {0}; temp.ion_client = wfd_dev->ion_client; temp.mregion = mpair->mdp; temp.cookie = inst->mdp_inst; v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_MUNMAP, (void *)&temp); } if (mpair->enc->paddr) { struct mem_region_map temp = {0}; temp.ion_client = wfd_dev->ion_client; temp.mregion = mpair->enc; v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ENC_MUNMAP, &temp); } wfd_free_ion_buffer(wfd_dev->ion_client, mpair->enc); list_del(&mpair->list); kfree(mpair->enc); kfree(mpair->mdp); kfree(mpair); } } rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, FREE_RECON_BUFFERS, NULL); if (rc) WFD_MSG_ERR("Failed to free recon buffers\n"); } static struct mem_info *wfd_get_mem_info(struct wfd_inst *inst, unsigned long userptr) { struct mem_info_entry *temp; struct mem_info *ret = NULL; mutex_lock(&inst->lock); if (!list_empty(&inst->minfo_list)) { list_for_each_entry(temp, &inst->minfo_list, list) { if (temp && temp->userptr == userptr) { ret = &temp->minfo; break; } } } mutex_unlock(&inst->lock); return ret; } static void wfd_put_mem_info(struct wfd_inst *inst, struct mem_info *minfo) { struct list_head *ptr, *next; struct mem_info_entry *temp; mutex_lock(&inst->lock); if (!list_empty(&inst->minfo_list)) { list_for_each_safe(ptr, next, &inst->minfo_list) { temp = list_entry(ptr, struct mem_info_entry, list); if (temp && (&temp->minfo == minfo)) { list_del(&temp->list); kfree(temp); } } } mutex_unlock(&inst->lock); } static void wfd_unregister_out_buf(struct wfd_inst *inst, struct mem_info *minfo) { if (!minfo || !inst) { WFD_MSG_ERR("Invalid arguments\n"); return; } wfd_put_mem_info(inst, minfo); } static int wfd_vidbuf_buf_init(struct vb2_buffer *vb) { int rc = 0; struct vb2_queue *q = vb->vb2_queue; struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_inst *inst = file_to_inst(priv_data); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(priv_data); struct mem_info *minfo = vb2_plane_cookie(vb, 0); struct mem_region mregion; mregion.fd = minfo->fd; mregion.offset = minfo->offset; mregion.cookie = (u32)vb; /*TODO: should be fixed in kernel 3.2*/ mregion.size = inst->out_buf_size; if (inst && !inst->vid_bufq.streaming) { rc = wfd_allocate_input_buffers(wfd_dev, inst); if (rc) { WFD_MSG_ERR("Failed to allocate input buffers\n"); goto free_input_bufs; } rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_OUTPUT_BUFFER, (void *)&mregion); if (rc) { WFD_MSG_ERR("Failed to set output buffer\n"); goto free_input_bufs; } } return rc; free_input_bufs: wfd_free_input_buffers(wfd_dev, inst); return rc; } static int wfd_vidbuf_buf_prepare(struct vb2_buffer *vb) { return 0; } static int wfd_vidbuf_buf_finish(struct vb2_buffer *vb) { return 0; } static void wfd_vidbuf_buf_cleanup(struct vb2_buffer *vb) { int rc = 0; struct vb2_queue *q = vb->vb2_queue; struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(priv_data); struct wfd_inst *inst = file_to_inst(priv_data); struct mem_info *minfo = vb2_plane_cookie(vb, 0); struct mem_region mregion; if (minfo == NULL) { WFD_MSG_DBG("not freeing buffers since allocation failed"); return; } mregion.fd = minfo->fd; mregion.offset = minfo->offset; mregion.cookie = (u32)vb; mregion.size = inst->out_buf_size; rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, FREE_OUTPUT_BUFFER, (void *)&mregion); if (rc) WFD_MSG_ERR("Failed to free output buffer\n"); wfd_unregister_out_buf(inst, minfo); } static int mdp_output_thread(void *data) { int rc = 0, no_sig_wait = 0; struct file *filp = (struct file *)data; struct wfd_inst *inst = file_to_inst(filp); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(filp); struct mdp_buf_info obuf_mdp = {inst->mdp_inst, 0, 0, 0}; struct mem_region *mregion; struct vsg_buf_info ibuf_vsg; while (!kthread_should_stop()) { if (rc) { WFD_MSG_DBG("%s() error in output thread\n", __func__); if (!no_sig_wait) { wait_for_completion(&inst->stop_mdp_thread); no_sig_wait = 1; } continue; } WFD_MSG_DBG("waiting for mdp output\n"); rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_DQ_BUFFER, (void *)&obuf_mdp); if (rc) { if (rc != -ENOBUFS) WFD_MSG_ERR("MDP reported err %d\n", rc); WFD_MSG_ERR("Streamoff called\n"); continue; } else { wfd_stats_update(&inst->stats, WFD_STAT_EVENT_MDP_DEQUEUE); } mregion = obuf_mdp.cookie; if (!mregion) { WFD_MSG_ERR("mdp cookie is null\n"); rc = -EINVAL; continue; } ibuf_vsg.mdp_buf_info = obuf_mdp; ibuf_vsg.mdp_buf_info.inst = inst->mdp_inst; ibuf_vsg.mdp_buf_info.cookie = mregion; ibuf_vsg.mdp_buf_info.kvaddr = (u32) mregion->kvaddr; ibuf_vsg.mdp_buf_info.paddr = (u32)wfd_enc_addr_to_mdp_addr(inst, (unsigned long)mregion->paddr); rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_Q_BUFFER, (void *)&ibuf_vsg); if (rc) { WFD_MSG_ERR("Failed to queue frame to vsg\n"); continue; } else { wfd_stats_update(&inst->stats, WFD_STAT_EVENT_VSG_QUEUE); } } WFD_MSG_DBG("Exiting the thread\n"); return rc; } static int wfd_vidbuf_start_streaming(struct vb2_queue *q, unsigned int count) { struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(priv_data); struct wfd_inst *inst = file_to_inst(priv_data); int rc = 0; WFD_MSG_ERR("Stream on called\n"); WFD_MSG_DBG("enc start\n"); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ENCODE_START, (void *)inst->venc_inst); if (rc) { WFD_MSG_ERR("Failed to start encoder\n"); goto subdev_start_fail; } WFD_MSG_DBG("vsg start\n"); rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_START, NULL); if (rc) { WFD_MSG_ERR("Failed to start vsg\n"); goto subdev_start_fail; } init_completion(&inst->stop_mdp_thread); inst->mdp_task = kthread_run(mdp_output_thread, priv_data, "mdp_output_thread"); if (IS_ERR(inst->mdp_task)) { rc = PTR_ERR(inst->mdp_task); goto subdev_start_fail; } WFD_MSG_DBG("mdp start\n"); rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_START, (void *)inst->mdp_inst); if (rc) WFD_MSG_ERR("Failed to start MDP\n"); subdev_start_fail: return rc; } static int wfd_vidbuf_stop_streaming(struct vb2_queue *q) { struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(priv_data); struct wfd_inst *inst = file_to_inst(priv_data); int rc = 0; WFD_MSG_DBG("mdp stop\n"); rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_STOP, (void *)inst->mdp_inst); if (rc) WFD_MSG_ERR("Failed to stop MDP\n"); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ENCODE_FLUSH, (void *)inst->venc_inst); if (rc) WFD_MSG_ERR("Failed to flush encoder\n"); WFD_MSG_DBG("vsg stop\n"); rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_STOP, NULL); if (rc) WFD_MSG_ERR("Failed to stop VSG\n"); complete(&inst->stop_mdp_thread); kthread_stop(inst->mdp_task); WFD_MSG_DBG("enc stop\n"); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ENCODE_STOP, (void *)inst->venc_inst); if (rc) WFD_MSG_ERR("Failed to stop encoder\n"); return rc; } static void wfd_vidbuf_buf_queue(struct vb2_buffer *vb) { int rc = 0; struct vb2_queue *q = vb->vb2_queue; struct file *priv_data = (struct file *)(q->drv_priv); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(priv_data); struct wfd_inst *inst = file_to_inst(priv_data); struct mem_region mregion; struct mem_info *minfo = vb2_plane_cookie(vb, 0); mregion.fd = minfo->fd; mregion.offset = minfo->offset; mregion.cookie = (u32)vb; mregion.size = inst->out_buf_size; rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, FILL_OUTPUT_BUFFER, (void *)&mregion); if (rc) { WFD_MSG_ERR("Failed to fill output buffer\n"); } } static struct vb2_ops wfd_vidbuf_ops = { .queue_setup = wfd_vidbuf_queue_setup, .wait_prepare = wfd_vidbuf_wait_prepare, .wait_finish = wfd_vidbuf_wait_finish, .buf_init = wfd_vidbuf_buf_init, .buf_prepare = wfd_vidbuf_buf_prepare, .buf_finish = wfd_vidbuf_buf_finish, .buf_cleanup = wfd_vidbuf_buf_cleanup, .start_streaming = wfd_vidbuf_start_streaming, .stop_streaming = wfd_vidbuf_stop_streaming, .buf_queue = wfd_vidbuf_buf_queue, }; static const struct v4l2_subdev_core_ops mdp_subdev_core_ops = { .init = mdp_init, .ioctl = mdp_ioctl, }; static const struct v4l2_subdev_ops mdp_subdev_ops = { .core = &mdp_subdev_core_ops, }; static const struct v4l2_subdev_core_ops enc_subdev_core_ops = { .init = venc_init, .load_fw = venc_load_fw, .ioctl = venc_ioctl, }; static const struct v4l2_subdev_ops enc_subdev_ops = { .core = &enc_subdev_core_ops, }; static const struct v4l2_subdev_core_ops vsg_subdev_core_ops = { .init = vsg_init, .ioctl = vsg_ioctl, }; static const struct v4l2_subdev_ops vsg_subdev_ops = { .core = &vsg_subdev_core_ops, }; static int wfdioc_querycap(struct file *filp, void *fh, struct v4l2_capability *cap) { WFD_MSG_DBG("wfdioc_querycap: E\n"); memset(cap, 0, sizeof(struct v4l2_capability)); strlcpy(cap->driver, "wifi-display", sizeof(cap->driver)); strlcpy(cap->card, "msm", sizeof(cap->card)); cap->version = WFD_VERSION; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; WFD_MSG_DBG("wfdioc_querycap: X\n"); return 0; } static int wfdioc_g_fmt(struct file *filp, void *fh, struct v4l2_format *fmt) { struct wfd_inst *inst = file_to_inst(filp); if (!fmt) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { WFD_MSG_ERR("Only V4L2_BUF_TYPE_VIDEO_CAPTURE is supported\n"); return -EINVAL; } mutex_lock(&inst->lock); fmt->fmt.pix.width = inst->width; fmt->fmt.pix.height = inst->height; fmt->fmt.pix.pixelformat = inst->pixelformat; fmt->fmt.pix.sizeimage = inst->out_buf_size; fmt->fmt.pix.priv = 0; mutex_unlock(&inst->lock); return 0; } static int wfdioc_s_fmt(struct file *filp, void *fh, struct v4l2_format *fmt) { int rc = 0; struct wfd_inst *inst = file_to_inst(filp); struct wfd_device *wfd_dev = video_drvdata(filp); struct mdp_prop prop; struct bufreq breq; if (!fmt) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_H264) { WFD_MSG_ERR("Only V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_PIX_FMT_H264 are supported\n"); return -EINVAL; } if (fmt->fmt.pix.width % 16) { WFD_MSG_ERR("Only 16 byte aligned widths are supported\n"); return -ENOTSUPP; } rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_FORMAT, (void *)fmt); if (rc) { WFD_MSG_ERR("Failed to set format on encoder, rc = %d\n", rc); return rc; } breq.count = VENC_INPUT_BUFFERS; breq.height = fmt->fmt.pix.height; breq.width = fmt->fmt.pix.width; rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_BUFFER_REQ, (void *)&breq); if (rc) { WFD_MSG_ERR("Failed to set buffer reqs on encoder\n"); return rc; } mutex_lock(&inst->lock); inst->input_buf_size = breq.size; inst->out_buf_size = fmt->fmt.pix.sizeimage; prop.height = inst->height = fmt->fmt.pix.height; prop.width = inst->width = fmt->fmt.pix.width; prop.inst = inst->mdp_inst; mutex_unlock(&inst->lock); rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_SET_PROP, (void *)&prop); if (rc) WFD_MSG_ERR("Failed to set height/width property on mdp\n"); return rc; } static int wfdioc_reqbufs(struct file *filp, void *fh, struct v4l2_requestbuffers *b) { struct wfd_inst *inst = file_to_inst(filp); struct wfd_device *wfd_dev = video_drvdata(filp); int rc = 0; if (b->type != V4L2_CAP_VIDEO_CAPTURE || b->memory != V4L2_MEMORY_USERPTR) { WFD_MSG_ERR("Only V4L2_CAP_VIDEO_CAPTURE and " "V4L2_MEMORY_USERPTR are supported\n"); return -EINVAL; } rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, GET_BUFFER_REQ, (void *)b); if (rc) { WFD_MSG_ERR("Failed to get buf reqs from encoder\n"); return rc; } mutex_lock(&inst->lock); inst->buf_count = b->count; mutex_unlock(&inst->lock); rc = vb2_reqbufs(&inst->vid_bufq, b); return rc; } static int wfd_register_out_buf(struct wfd_inst *inst, struct v4l2_buffer *b) { struct mem_info_entry *minfo_entry; struct mem_info *minfo; if (!b || !inst || !b->reserved) { WFD_MSG_ERR("Invalid arguments\n"); return -EINVAL; } minfo = wfd_get_mem_info(inst, b->m.userptr); if (!minfo) { minfo_entry = kzalloc(sizeof(struct mem_info_entry), GFP_KERNEL); if (copy_from_user(&minfo_entry->minfo, (void *)b->reserved, sizeof(struct mem_info))) { WFD_MSG_ERR(" copy_from_user failed. Populate" " v4l2_buffer->reserved with meminfo\n"); return -EINVAL; } minfo_entry->userptr = b->m.userptr; mutex_lock(&inst->lock); list_add_tail(&minfo_entry->list, &inst->minfo_list); mutex_unlock(&inst->lock); } else WFD_MSG_DBG("Buffer already registered\n"); return 0; } static int wfdioc_qbuf(struct file *filp, void *fh, struct v4l2_buffer *b) { int rc = 0; struct wfd_inst *inst = file_to_inst(filp); if (!inst || !b || (b->index < 0 || b->index >= inst->buf_count)) { WFD_MSG_ERR("Invalid input parameters to QBUF IOCTL\n"); return -EINVAL; } rc = wfd_register_out_buf(inst, b); if (rc) { WFD_MSG_ERR("Failed to register buffer\n"); return rc; } mutex_lock(&inst->vb2_lock); rc = vb2_qbuf(&inst->vid_bufq, b); mutex_unlock(&inst->vb2_lock); if (rc) WFD_MSG_ERR("Failed to queue buffer\n"); else wfd_stats_update(&inst->stats, WFD_STAT_EVENT_CLIENT_QUEUE); return rc; } static int wfdioc_streamon(struct file *filp, void *fh, enum v4l2_buf_type i) { int rc = 0; struct wfd_inst *inst = file_to_inst(filp); if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) { WFD_MSG_ERR("stream on for buffer type = %d is not " "supported.\n", i); return -EINVAL; } mutex_lock(&inst->lock); inst->streamoff = false; mutex_unlock(&inst->lock); rc = vb2_streamon(&inst->vid_bufq, i); if (rc) { WFD_MSG_ERR("videobuf_streamon failed with err = %d\n", rc); goto vidbuf_streamon_failed; } return rc; vidbuf_streamon_failed: vb2_streamoff(&inst->vid_bufq, i); return rc; } static int wfdioc_streamoff(struct file *filp, void *fh, enum v4l2_buf_type i) { struct wfd_inst *inst = file_to_inst(filp); if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) { WFD_MSG_ERR("stream off for buffer type = %d is not " "supported.\n", i); return -EINVAL; } mutex_lock(&inst->lock); if (inst->streamoff) { WFD_MSG_ERR("Module is already in streamoff state\n"); mutex_unlock(&inst->lock); return -EINVAL; } inst->streamoff = true; mutex_unlock(&inst->lock); WFD_MSG_DBG("Calling videobuf_streamoff\n"); vb2_streamoff(&inst->vid_bufq, i); wake_up(&inst->event_handler.wait); return 0; } static int wfdioc_dqbuf(struct file *filp, void *fh, struct v4l2_buffer *b) { struct wfd_inst *inst = file_to_inst(filp); int rc; WFD_MSG_DBG("Waiting to dequeue buffer\n"); mutex_lock(&inst->vb2_lock); rc = vb2_dqbuf(&inst->vid_bufq, b, false); mutex_unlock(&inst->vb2_lock); if (rc) WFD_MSG_ERR("Failed to dequeue buffer\n"); else wfd_stats_update(&inst->stats, WFD_STAT_EVENT_CLIENT_DEQUEUE); return rc; } static int wfdioc_g_ctrl(struct file *filp, void *fh, struct v4l2_control *a) { int rc = 0; struct wfd_device *wfd_dev = video_drvdata(filp); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, GET_PROP, a); if (rc) WFD_MSG_ERR("Failed to get encoder property\n"); return rc; } static int wfdioc_s_ctrl(struct file *filp, void *fh, struct v4l2_control *a) { int rc = 0; struct wfd_device *wfd_dev = video_drvdata(filp); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_PROP, a); if (rc) WFD_MSG_ERR("Failed to set encoder property\n"); return rc; } static int wfdioc_g_parm(struct file *filp, void *fh, struct v4l2_streamparm *a) { int rc = 0; struct wfd_device *wfd_dev = video_drvdata(filp); struct wfd_inst *inst = file_to_inst(filp); int64_t frame_interval = 0, max_frame_interval = 0; /* both in nsecs*/ struct v4l2_qcom_frameskip frameskip, *usr_frameskip; usr_frameskip = (struct v4l2_qcom_frameskip *) a->parm.capture.extendedmode; if (!usr_frameskip) { rc = -EINVAL; goto get_parm_fail; } rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_GET_FRAME_INTERVAL, &frame_interval); if (rc < 0) goto get_parm_fail; rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_GET_MAX_FRAME_INTERVAL, &max_frame_interval); if (rc < 0) goto get_parm_fail; frameskip = (struct v4l2_qcom_frameskip) { .maxframeinterval = max_frame_interval, }; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->parm.capture = (struct v4l2_captureparm) { .capability = V4L2_CAP_TIMEPERFRAME, .capturemode = 0, .timeperframe = (struct v4l2_fract) { .numerator = frame_interval, .denominator = NSEC_PER_SEC, }, .readbuffers = inst->buf_count, .extendedmode = (__u32)usr_frameskip, .reserved = {0} }; rc = copy_to_user((void *)a->parm.capture.extendedmode, &frameskip, sizeof(frameskip)); if (rc < 0) goto get_parm_fail; get_parm_fail: return rc; } static int wfdioc_s_parm(struct file *filp, void *fh, struct v4l2_streamparm *a) { int rc = 0; struct wfd_device *wfd_dev = video_drvdata(filp); struct wfd_inst *inst = file_to_inst(filp); struct v4l2_qcom_frameskip frameskip; int64_t frame_interval = 0, max_frame_interval = 0, frame_interval_variance = 0; void *extendedmode = NULL; enum vsg_modes vsg_mode = VSG_MODE_VFR; enum venc_framerate_modes venc_mode = VENC_MODE_VFR; if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { rc = -ENOTSUPP; goto set_parm_fail; } if (a->parm.capture.readbuffers == 0 || a->parm.capture.readbuffers == inst->buf_count) { a->parm.capture.readbuffers = inst->buf_count; } else { rc = -EINVAL; goto set_parm_fail; } extendedmode = (void *)a->parm.capture.extendedmode; if (a->parm.capture.capability & V4L2_CAP_TIMEPERFRAME) { if (a->parm.capture.timeperframe.denominator == 0) { rc = -EINVAL; goto set_parm_fail; } frame_interval = a->parm.capture.timeperframe.numerator * NSEC_PER_SEC / a->parm.capture.timeperframe.denominator; rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_SET_FRAME_INTERVAL, &frame_interval); if (rc) goto set_parm_fail; rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_FRAMERATE, &a->parm.capture.timeperframe); if (rc) goto set_parm_fail; } if (a->parm.capture.capability & V4L2_CAP_QCOM_FRAMESKIP && extendedmode) { rc = copy_from_user(&frameskip, extendedmode, sizeof(frameskip)); if (rc) goto set_parm_fail; max_frame_interval = (int64_t)frameskip.maxframeinterval; frame_interval_variance = frameskip.fpsvariance; vsg_mode = VSG_MODE_VFR; venc_mode = VENC_MODE_VFR; rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_SET_MAX_FRAME_INTERVAL, &max_frame_interval); if (rc) goto set_parm_fail; } else { vsg_mode = VSG_MODE_CFR; venc_mode = VENC_MODE_CFR; } rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_SET_MODE, &vsg_mode); if (rc) { WFD_MSG_ERR("Setting FR mode for VSG failed\n"); goto set_parm_fail; } rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_FRAMERATE_MODE, &venc_mode); if (rc) { WFD_MSG_ERR("Setting FR mode for VENC failed\n"); goto set_parm_fail; } if (frame_interval_variance) { rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_SET_FRAME_INTERVAL_VARIANCE, &frame_interval_variance); if (rc) { WFD_MSG_ERR("Setting FR variance for VSG failed\n"); goto set_parm_fail; } } set_parm_fail: return rc; } static int wfdioc_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct wfd_inst *inst = container_of(fh, struct wfd_inst, event_handler); return v4l2_event_subscribe(&inst->event_handler, sub, MAX_EVENTS); } static int wfdioc_unsubscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct wfd_inst *inst = container_of(fh, struct wfd_inst, event_handler); return v4l2_event_unsubscribe(&inst->event_handler, sub); } static const struct v4l2_ioctl_ops g_wfd_ioctl_ops = { .vidioc_querycap = wfdioc_querycap, .vidioc_s_fmt_vid_cap = wfdioc_s_fmt, .vidioc_g_fmt_vid_cap = wfdioc_g_fmt, .vidioc_reqbufs = wfdioc_reqbufs, .vidioc_qbuf = wfdioc_qbuf, .vidioc_streamon = wfdioc_streamon, .vidioc_streamoff = wfdioc_streamoff, .vidioc_dqbuf = wfdioc_dqbuf, .vidioc_g_ctrl = wfdioc_g_ctrl, .vidioc_s_ctrl = wfdioc_s_ctrl, .vidioc_g_parm = wfdioc_g_parm, .vidioc_s_parm = wfdioc_s_parm, .vidioc_subscribe_event = wfdioc_subscribe_event, .vidioc_unsubscribe_event = wfdioc_unsubscribe_event, }; static int wfd_set_default_properties(struct file *filp) { struct v4l2_format fmt; struct v4l2_control ctrl; struct wfd_inst *inst = file_to_inst(filp); if (!inst) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } mutex_lock(&inst->lock); fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt.fmt.pix.height = inst->height = DEFAULT_WFD_HEIGHT; fmt.fmt.pix.width = inst->width = DEFAULT_WFD_WIDTH; fmt.fmt.pix.pixelformat = inst->pixelformat = V4L2_PIX_FMT_H264; mutex_unlock(&inst->lock); wfdioc_s_fmt(filp, filp->private_data, &fmt); ctrl.id = V4L2_CID_MPEG_VIDEO_HEADER_MODE; ctrl.value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME; wfdioc_s_ctrl(filp, filp->private_data, &ctrl); return 0; } static void venc_op_buffer_done(void *cookie, u32 status, struct vb2_buffer *buf) { struct file *filp = cookie; struct wfd_inst *inst = file_to_inst(filp); WFD_MSG_DBG("yay!! got callback\n"); mutex_lock(&inst->vb2_lock); vb2_buffer_done(buf, status ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); mutex_unlock(&inst->vb2_lock); } static void venc_ip_buffer_done(void *cookie, u32 status, struct mem_region *mregion) { struct file *filp = cookie; struct wfd_inst *inst = file_to_inst(filp); struct vsg_buf_info buf; struct mdp_buf_info mdp_buf = {0}; struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(filp); int rc = 0; WFD_MSG_DBG("yay!! got ip callback\n"); mdp_buf.inst = inst->mdp_inst; mdp_buf.cookie = mregion; mdp_buf.kvaddr = (u32) mregion->kvaddr; mdp_buf.paddr = (u32)wfd_enc_addr_to_mdp_addr(inst, (unsigned long)mregion->paddr); buf.mdp_buf_info = mdp_buf; rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_RETURN_IP_BUFFER, (void *)&buf); if (rc) WFD_MSG_ERR("Failed to return buffer to vsg\n"); else wfd_stats_update(&inst->stats, WFD_STAT_EVENT_ENC_DEQUEUE); } static void venc_on_event(void *cookie, enum venc_event e) { struct file *filp = cookie; struct wfd_inst *inst = file_to_inst(filp); struct v4l2_event event; int type = 0; switch (e) { case VENC_EVENT_HARDWARE_ERROR: type = V4L2_EVENT_MSM_VIDC_SYS_ERROR; break; default: /* Whatever~~ */ break; } if (type) { event.id = 0; event.type = type; v4l2_event_queue_fh(&inst->event_handler, &event); } } static int vsg_release_input_frame(void *cookie, struct vsg_buf_info *buf) { struct file *filp = cookie; struct wfd_inst *inst = file_to_inst(filp); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(filp); int rc = 0; rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_Q_BUFFER, buf); if (rc) WFD_MSG_ERR("Failed to Q buffer to mdp\n"); else { wfd_stats_update(&inst->stats, WFD_STAT_EVENT_MDP_QUEUE); wfd_stats_update(&inst->stats, WFD_STAT_EVENT_VSG_DEQUEUE); } return rc; } static int vsg_encode_frame(void *cookie, struct vsg_buf_info *buf) { struct file *filp = cookie; struct wfd_inst *inst = file_to_inst(filp); struct wfd_device *wfd_dev = (struct wfd_device *)video_drvdata(filp); struct venc_buf_info venc_buf; int rc = 0; if (!buf) return -EINVAL; venc_buf = (struct venc_buf_info){ .timestamp = timespec_to_ns(&buf->time), .mregion = buf->mdp_buf_info.cookie }; wfd_flush_ion_buffer(wfd_dev->ion_client, venc_buf.mregion); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, ENCODE_FRAME, &venc_buf); if (rc) WFD_MSG_ERR("Encode failed\n"); else wfd_stats_update(&inst->stats, WFD_STAT_EVENT_ENC_QUEUE); return rc; } void *wfd_vb2_mem_ops_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { return wfd_get_mem_info(alloc_ctx, vaddr); } void wfd_vb2_mem_ops_put_userptr(void *buf_priv) { /*TODO: Free the list*/ } void *wfd_vb2_mem_ops_cookie(void *buf_priv) { return buf_priv; } static struct vb2_mem_ops wfd_vb2_mem_ops = { .get_userptr = wfd_vb2_mem_ops_get_userptr, .put_userptr = wfd_vb2_mem_ops_put_userptr, .cookie = wfd_vb2_mem_ops_cookie, }; int wfd_initialize_vb2_queue(struct vb2_queue *q, void *priv) { q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_USERPTR; q->ops = &wfd_vidbuf_ops; q->mem_ops = &wfd_vb2_mem_ops; q->drv_priv = priv; return vb2_queue_init(q); } static int wfd_open(struct file *filp) { int rc = 0; struct wfd_inst *inst = NULL; struct wfd_device *wfd_dev = NULL; struct venc_msg_ops enc_mops; struct mdp_msg_ops mdp_mops; struct vsg_msg_ops vsg_mops; WFD_MSG_DBG("wfd_open: E\n"); wfd_dev = video_drvdata(filp); if (!wfd_dev) { rc = -EINVAL; goto err_dev_busy; } mutex_lock(&wfd_dev->dev_lock); if (wfd_dev->in_use) { WFD_MSG_ERR("Device already in use.\n"); rc = -EBUSY; mutex_unlock(&wfd_dev->dev_lock); goto err_dev_busy; } wfd_dev->in_use = true; mutex_unlock(&wfd_dev->dev_lock); inst = kzalloc(sizeof(struct wfd_inst), GFP_KERNEL); if (!inst) { WFD_MSG_ERR("Could not allocate memory for " "wfd instance\n"); rc = -ENOMEM; goto err_mdp_open; } filp->private_data = &inst->event_handler; mutex_init(&inst->lock); mutex_init(&inst->vb2_lock); INIT_LIST_HEAD(&inst->input_mem_list); INIT_LIST_HEAD(&inst->minfo_list); /* Set up userspace event handlers */ v4l2_fh_init(&inst->event_handler, wfd_dev->pvdev); v4l2_fh_add(&inst->event_handler); wfd_stats_init(&inst->stats, MINOR(filp->f_dentry->d_inode->i_rdev)); mdp_mops.secure = wfd_dev->secure; mdp_mops.iommu_split_domain = wfd_dev->mdp_iommu_split_domain; rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_OPEN, (void *)&mdp_mops); if (rc) { WFD_MSG_ERR("Failed to open mdp subdevice: %d\n", rc); goto err_mdp_open; } inst->mdp_inst = mdp_mops.cookie; rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, load_fw); if (rc) { WFD_MSG_ERR("Failed to load video encoder firmware: %d\n", rc); goto err_venc; } enc_mops.op_buffer_done = venc_op_buffer_done; enc_mops.ip_buffer_done = venc_ip_buffer_done; enc_mops.on_event = venc_on_event; enc_mops.cbdata = filp; enc_mops.secure = wfd_dev->secure; rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, OPEN, (void *)&enc_mops); if (rc || !enc_mops.cookie) { WFD_MSG_ERR("Failed to open encoder subdevice: %d\n", rc); goto err_venc; } inst->venc_inst = enc_mops.cookie; vsg_mops.encode_frame = vsg_encode_frame; vsg_mops.release_input_frame = vsg_release_input_frame; vsg_mops.cbdata = filp; rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_OPEN, &vsg_mops); if (rc) { WFD_MSG_ERR("Failed to open vsg subdevice: %d\n", rc); goto err_vsg_open; } wfd_initialize_vb2_queue(&inst->vid_bufq, filp); wfd_set_default_properties(filp); WFD_MSG_DBG("wfd_open: X\n"); return rc; err_vsg_open: v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, CLOSE, NULL); err_venc: v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_CLOSE, (void *)inst->mdp_inst); err_mdp_open: v4l2_fh_del(&inst->event_handler); mutex_lock(&wfd_dev->dev_lock); wfd_dev->in_use = false; mutex_unlock(&wfd_dev->dev_lock); kfree(inst); err_dev_busy: return rc; } static int wfd_close(struct file *filp) { struct wfd_inst *inst; struct wfd_device *wfd_dev; int rc = 0; wfd_dev = video_drvdata(filp); WFD_MSG_DBG("wfd_close: E\n"); inst = file_to_inst(filp); if (inst) { wfdioc_streamoff(filp, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE); vb2_queue_release(&inst->vid_bufq); wfd_free_input_buffers(wfd_dev, inst); rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_CLOSE, (void *)inst->mdp_inst); if (rc) WFD_MSG_ERR("Failed to CLOSE mdp subdevice: %d\n", rc); rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, CLOSE, (void *)inst->venc_inst); if (rc) WFD_MSG_ERR("Failed to CLOSE enc subdev: %d\n", rc); rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_CLOSE, NULL); if (rc) WFD_MSG_ERR("Failed to CLOSE vsg subdev: %d\n", rc); wfd_stats_deinit(&inst->stats); v4l2_fh_del(&inst->event_handler); mutex_destroy(&inst->lock); mutex_destroy(&inst->vb2_lock); kfree(inst); } mutex_lock(&wfd_dev->dev_lock); wfd_dev->in_use = false; mutex_unlock(&wfd_dev->dev_lock); WFD_MSG_DBG("wfd_close: X\n"); return 0; } unsigned int wfd_poll(struct file *filp, struct poll_table_struct *pt) { struct wfd_inst *inst = file_to_inst(filp); unsigned long flags = 0; bool streamoff = false; poll_wait(filp, &inst->event_handler.wait, pt); mutex_lock(&inst->lock); streamoff = inst->streamoff; mutex_unlock(&inst->lock); if (v4l2_event_pending(&inst->event_handler)) flags |= POLLPRI; if (streamoff) flags |= POLLERR; return flags; } static const struct v4l2_file_operations g_wfd_fops = { .owner = THIS_MODULE, .open = wfd_open, .release = wfd_close, .ioctl = video_ioctl2, .poll = wfd_poll, }; void release_video_device(struct video_device *pvdev) { } static int wfd_dev_setup(struct wfd_device *wfd_dev, int dev_num, struct platform_device *pdev) { int rc = 0; rc = v4l2_device_register(&pdev->dev, &wfd_dev->v4l2_dev); if (rc) { WFD_MSG_ERR("Failed to register the video device\n"); goto err_v4l2_registration; } wfd_dev->pvdev = video_device_alloc(); if (!wfd_dev->pvdev) { WFD_MSG_ERR("Failed to allocate video device\n"); goto err_video_device_alloc; } wfd_dev->pvdev->release = release_video_device; wfd_dev->pvdev->fops = &g_wfd_fops; wfd_dev->pvdev->ioctl_ops = &g_wfd_ioctl_ops; rc = video_register_device(wfd_dev->pvdev, VFL_TYPE_GRABBER, dev_num); if (rc) { WFD_MSG_ERR("Failed to register the device\n"); goto err_video_register_device; } video_set_drvdata(wfd_dev->pvdev, wfd_dev); v4l2_subdev_init(&wfd_dev->mdp_sdev, &mdp_subdev_ops); strncpy(wfd_dev->mdp_sdev.name, "wfd-mdp", V4L2_SUBDEV_NAME_SIZE); rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev, &wfd_dev->mdp_sdev); if (rc) { WFD_MSG_ERR("Failed to register mdp subdevice: %d\n", rc); goto err_mdp_register_subdev; } v4l2_subdev_init(&wfd_dev->enc_sdev, &enc_subdev_ops); strncpy(wfd_dev->enc_sdev.name, "wfd-venc", V4L2_SUBDEV_NAME_SIZE); rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev, &wfd_dev->enc_sdev); if (rc) { WFD_MSG_ERR("Failed to register encoder subdevice: %d\n", rc); goto err_venc_register_subdev; } rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, init, 0); if (rc) { WFD_MSG_ERR("Failed to initiate encoder device %d\n", rc); goto err_venc_init; } v4l2_subdev_init(&wfd_dev->vsg_sdev, &vsg_subdev_ops); strncpy(wfd_dev->vsg_sdev.name, "wfd-vsg", V4L2_SUBDEV_NAME_SIZE); rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev, &wfd_dev->vsg_sdev); if (rc) { WFD_MSG_ERR("Failed to register vsg subdevice: %d\n", rc); goto err_venc_init; } WFD_MSG_DBG("__wfd_probe: X\n"); return rc; err_venc_init: v4l2_device_unregister_subdev(&wfd_dev->enc_sdev); err_venc_register_subdev: v4l2_device_unregister_subdev(&wfd_dev->mdp_sdev); err_mdp_register_subdev: video_unregister_device(wfd_dev->pvdev); err_video_register_device: video_device_release(wfd_dev->pvdev); err_video_device_alloc: v4l2_device_unregister(&wfd_dev->v4l2_dev); err_v4l2_registration: return rc; } static int __devinit __wfd_probe(struct platform_device *pdev) { int rc = 0, c = 0; struct wfd_device *wfd_dev; /* Should be taken as an array*/ struct ion_client *ion_client = NULL; struct msm_wfd_platform_data *wfd_priv; WFD_MSG_DBG("__wfd_probe: E\n"); wfd_dev = kzalloc(sizeof(*wfd_dev)*WFD_NUM_DEVICES, GFP_KERNEL); if (!wfd_dev) { WFD_MSG_ERR("Could not allocate memory for " "wfd device\n"); rc = -ENOMEM; goto err_v4l2_probe; } wfd_priv = pdev->dev.platform_data; pdev->dev.platform_data = (void *) wfd_dev; ion_client = msm_ion_client_create(-1, "wfd"); rc = wfd_stats_setup(); if (rc) { WFD_MSG_ERR("No debugfs support: %d\n", rc); /* Don't treat this as a fatal err */ rc = 0; } if (!ion_client) { WFD_MSG_ERR("Failed to create ion client\n"); rc = -ENODEV; goto err_v4l2_probe; } for (c = 0; c < WFD_NUM_DEVICES; ++c) { rc = wfd_dev_setup(&wfd_dev[c], WFD_DEVICE_NUMBER_BASE + c, pdev); if (rc) { /* Clear out old devices */ for (--c; c >= 0; --c) { v4l2_device_unregister_subdev( &wfd_dev[c].vsg_sdev); v4l2_device_unregister_subdev( &wfd_dev[c].enc_sdev); v4l2_device_unregister_subdev( &wfd_dev[c].mdp_sdev); video_unregister_device(wfd_dev[c].pvdev); video_device_release(wfd_dev[c].pvdev); v4l2_device_unregister(&wfd_dev[c].v4l2_dev); } goto err_v4l2_probe; } /* Other device specific stuff */ mutex_init(&wfd_dev[c].dev_lock); wfd_dev[c].ion_client = ion_client; wfd_dev[c].in_use = false; if (wfd_priv && wfd_priv->wfd_check_mdp_iommu_split) { wfd_dev[c].mdp_iommu_split_domain = wfd_priv->wfd_check_mdp_iommu_split(); } switch (WFD_DEVICE_NUMBER_BASE + c) { case WFD_DEVICE_SECURE: wfd_dev[c].secure = true; break; default: break; } } WFD_MSG_DBG("__wfd_probe: X\n"); return rc; err_v4l2_probe: kfree(wfd_dev); return rc; } static int __devexit __wfd_remove(struct platform_device *pdev) { struct wfd_device *wfd_dev; int c = 0; wfd_dev = (struct wfd_device *)pdev->dev.platform_data; WFD_MSG_DBG("Inside wfd_remove\n"); if (!wfd_dev) { WFD_MSG_ERR("Error removing WFD device"); return -ENODEV; } wfd_stats_teardown(); for (c = 0; c < WFD_NUM_DEVICES; ++c) { v4l2_device_unregister_subdev(&wfd_dev[c].vsg_sdev); v4l2_device_unregister_subdev(&wfd_dev[c].enc_sdev); v4l2_device_unregister_subdev(&wfd_dev[c].mdp_sdev); video_unregister_device(wfd_dev[c].pvdev); video_device_release(wfd_dev[c].pvdev); v4l2_device_unregister(&wfd_dev[c].v4l2_dev); } kfree(wfd_dev); return 0; } static const struct of_device_id msm_wfd_dt_match[] = { {.compatible = "qcom,msm-wfd"}, {} }; MODULE_DEVICE_TABLE(of, msm_vidc_dt_match); static struct platform_driver wfd_driver = { .probe = __wfd_probe, .remove = __wfd_remove, .driver = { .name = "msm_wfd", .owner = THIS_MODULE, .of_match_table = msm_wfd_dt_match, } }; static int __init wfd_init(void) { int rc = 0; WFD_MSG_DBG("Calling init function of wfd driver\n"); rc = platform_driver_register(&wfd_driver); if (rc) { WFD_MSG_ERR("failed to load the driver\n"); goto err_platform_registration; } err_platform_registration: return rc; } static void __exit wfd_exit(void) { WFD_MSG_DBG("wfd_exit: X\n"); platform_driver_unregister(&wfd_driver); } module_init(wfd_init); module_exit(wfd_exit);
gpl-2.0
Ca1ne/Classic-Sense-Kernel
drivers/media/video/cpia2/cpia2_core.c
1653
77737
/**************************************************************************** * * Filename: cpia2_core.c * * Copyright 2001, STMicrolectronics, Inc. * Contact: steve.miller@st.com * * Description: * This is a USB driver for CPia2 based video cameras. * The infrastructure of this driver is based on the cpia usb driver by * Jochen Scharrlach and Johannes Erdfeldt. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Stripped of 2.4 stuff ready for main kernel submit by * Alan Cox <alan@lxorguk.ukuu.org.uk> * ****************************************************************************/ #include "cpia2.h" #include <linux/slab.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/firmware.h> /* #define _CPIA2_DEBUG_ */ #ifdef _CPIA2_DEBUG_ static const char *block_name[] = { "System", "VC", "VP", "IDATA" }; #endif static unsigned int debugs_on; /* default 0 - DEBUG_REG */ /****************************************************************************** * * Forward Declarations * *****************************************************************************/ static int apply_vp_patch(struct camera_data *cam); static int set_default_user_mode(struct camera_data *cam); static int set_vw_size(struct camera_data *cam, int size); static int configure_sensor(struct camera_data *cam, int reqwidth, int reqheight); static int config_sensor_410(struct camera_data *cam, int reqwidth, int reqheight); static int config_sensor_500(struct camera_data *cam, int reqwidth, int reqheight); static int set_all_properties(struct camera_data *cam); static void get_color_params(struct camera_data *cam); static void wake_system(struct camera_data *cam); static void set_lowlight_boost(struct camera_data *cam); static void reset_camera_struct(struct camera_data *cam); static int cpia2_set_high_power(struct camera_data *cam); /* Here we want the physical address of the memory. * This is used when initializing the contents of the * area and marking the pages as reserved. */ static inline unsigned long kvirt_to_pa(unsigned long adr) { unsigned long kva, ret; kva = (unsigned long) page_address(vmalloc_to_page((void *)adr)); kva |= adr & (PAGE_SIZE-1); /* restore the offset */ ret = __pa(kva); return ret; } static void *rvmalloc(unsigned long size) { void *mem; unsigned long adr; /* Round it off to PAGE_SIZE */ size = PAGE_ALIGN(size); mem = vmalloc_32(size); if (!mem) return NULL; memset(mem, 0, size); /* Clear the ram out, no junk to the user */ adr = (unsigned long) mem; while ((long)size > 0) { SetPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } return mem; } static void rvfree(void *mem, unsigned long size) { unsigned long adr; if (!mem) return; size = PAGE_ALIGN(size); adr = (unsigned long) mem; while ((long)size > 0) { ClearPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } vfree(mem); } /****************************************************************************** * * cpia2_do_command * * Send an arbitrary command to the camera. For commands that read from * the camera, copy the buffers into the proper param structures. *****************************************************************************/ int cpia2_do_command(struct camera_data *cam, u32 command, u8 direction, u8 param) { int retval = 0; struct cpia2_command cmd; unsigned int device = cam->params.pnp_id.device_type; cmd.command = command; cmd.reg_count = 2; /* default */ cmd.direction = direction; /*** * Set up the command. ***/ switch (command) { case CPIA2_CMD_GET_VERSION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.start = CPIA2_SYSTEM_DEVICE_HI; break; case CPIA2_CMD_GET_PNP_ID: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 8; cmd.start = CPIA2_SYSTEM_DESCRIP_VID_HI; break; case CPIA2_CMD_GET_ASIC_TYPE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.start = CPIA2_VC_ASIC_ID; break; case CPIA2_CMD_GET_SENSOR: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.start = CPIA2_VP_SENSOR_FLAGS; break; case CPIA2_CMD_GET_VP_DEVICE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.start = CPIA2_VP_DEVICEH; break; case CPIA2_CMD_SET_VP_BRIGHTNESS: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_BRIGHTNESS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_EXPOSURE_TARGET; else cmd.start = CPIA2_VP5_EXPOSURE_TARGET; break; case CPIA2_CMD_SET_CONTRAST: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_CONTRAST: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_YRANGE; break; case CPIA2_CMD_SET_VP_SATURATION: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_SATURATION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP_SATURATION; else cmd.start = CPIA2_VP5_MCUVSATURATION; break; case CPIA2_CMD_SET_VP_GPIO_DATA: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_GPIO_DATA; break; case CPIA2_CMD_SET_VP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_GPIO_DIRECTION; break; case CPIA2_CMD_SET_VC_MP_GPIO_DATA: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VC_MP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_MP_DATA; break; case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_MP_DIR; break; case CPIA2_CMD_ENABLE_PACKET_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.start = CPIA2_SYSTEM_INT_PACKET_CTRL; cmd.reg_count = 1; cmd.buffer.block_data[0] = param; break; case CPIA2_CMD_SET_FLICKER_MODES: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_FLICKER_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_FLICKER_MODES; break; case CPIA2_CMD_RESET_FIFO: /* clear fifo and enable stream block */ cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.reg_count = 2; cmd.start = 0; cmd.buffer.registers[0].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[0].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT; cmd.buffer.registers[1].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[1].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT | CPIA2_VC_ST_CTRL_FIFO_ENABLE; break; case CPIA2_CMD_SET_HI_POWER: cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_SYSTEM; cmd.reg_count = 2; cmd.buffer.registers[0].index = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.registers[1].index = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.registers[0].value = CPIA2_SYSTEM_CONTROL_CLEAR_ERR; cmd.buffer.registers[1].value = CPIA2_SYSTEM_CONTROL_HIGH_POWER; break; case CPIA2_CMD_SET_LOW_POWER: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.block_data[0] = 0; break; case CPIA2_CMD_CLEAR_V2W_ERR: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; cmd.buffer.block_data[0] = CPIA2_SYSTEM_CONTROL_CLEAR_ERR; break; case CPIA2_CMD_SET_USER_MODE: /* Then fall through */ cmd.buffer.block_data[0] = param; case CPIA2_CMD_GET_USER_MODE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_USER_MODE; else cmd.start = CPIA2_VP5_USER_MODE; break; case CPIA2_CMD_FRAMERATE_REQ: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_FRAMERATE_REQUEST; else cmd.start = CPIA2_VP5_FRAMERATE_REQUEST; cmd.buffer.block_data[0] = param; break; case CPIA2_CMD_SET_WAKEUP: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_WAKEUP: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_WAKEUP; break; case CPIA2_CMD_SET_PW_CONTROL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_PW_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_PW_CTRL; break; case CPIA2_CMD_GET_VP_SYSTEM_STATE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_SYSTEMSTATE; break; case CPIA2_CMD_SET_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL; break; case CPIA2_CMD_SET_VP_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_SYSTEMCTRL; break; case CPIA2_CMD_SET_VP_EXP_MODES: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VP_EXP_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_EXPOSURE_MODES; break; case CPIA2_CMD_SET_DEVICE_CONFIG: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_DEVICE_CONFIG: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_DEVICE_CONFIG; break; case CPIA2_CMD_SET_SERIAL_ADDR: cmd.buffer.block_data[0] = param; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 1; cmd.start = CPIA2_SYSTEM_VP_SERIAL_ADDR; break; case CPIA2_CMD_SET_SENSOR_CR1: cmd.buffer.block_data[0] = param; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_SENSOR_CR1; break; case CPIA2_CMD_SET_VC_CONTROL: cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_VC_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.start = CPIA2_VC_VC_CTRL; break; case CPIA2_CMD_SET_TARGET_KB: cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.reg_count = 1; cmd.buffer.registers[0].index = CPIA2_VC_VC_TARGET_KB; cmd.buffer.registers[0].value = param; break; case CPIA2_CMD_SET_DEF_JPEG_OPT: cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.reg_count = 4; cmd.buffer.registers[0].index = CPIA2_VC_VC_JPEG_OPT; cmd.buffer.registers[0].value = CPIA2_VC_VC_JPEG_OPT_DOUBLE_SQUEEZE; cmd.buffer.registers[1].index = CPIA2_VC_VC_USER_SQUEEZE; cmd.buffer.registers[1].value = 20; cmd.buffer.registers[2].index = CPIA2_VC_VC_CREEP_PERIOD; cmd.buffer.registers[2].value = 2; cmd.buffer.registers[3].index = CPIA2_VC_VC_JPEG_OPT; cmd.buffer.registers[3].value = CPIA2_VC_VC_JPEG_OPT_DEFAULT; break; case CPIA2_CMD_REHASH_VP4: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; cmd.start = CPIA2_VP_REHASH_VALUES; cmd.buffer.block_data[0] = param; break; case CPIA2_CMD_SET_USER_EFFECTS: /* Note: Be careful with this as this register can also affect flicker modes */ cmd.buffer.block_data[0] = param; /* Then fall through */ case CPIA2_CMD_GET_USER_EFFECTS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; if (device == DEVICE_STV_672) cmd.start = CPIA2_VP4_USER_EFFECTS; else cmd.start = CPIA2_VP5_USER_EFFECTS; break; default: LOG("DoCommand received invalid command\n"); return -EINVAL; } retval = cpia2_send_command(cam, &cmd); if (retval) { return retval; } /*** * Now copy any results from a read into the appropriate param struct. ***/ switch (command) { case CPIA2_CMD_GET_VERSION: cam->params.version.firmware_revision_hi = cmd.buffer.block_data[0]; cam->params.version.firmware_revision_lo = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_PNP_ID: cam->params.pnp_id.vendor = (cmd.buffer.block_data[0] << 8) | cmd.buffer.block_data[1]; cam->params.pnp_id.product = (cmd.buffer.block_data[2] << 8) | cmd.buffer.block_data[3]; cam->params.pnp_id.device_revision = (cmd.buffer.block_data[4] << 8) | cmd.buffer.block_data[5]; if (cam->params.pnp_id.vendor == 0x553) { if (cam->params.pnp_id.product == 0x100) { cam->params.pnp_id.device_type = DEVICE_STV_672; } else if (cam->params.pnp_id.product == 0x140 || cam->params.pnp_id.product == 0x151) { cam->params.pnp_id.device_type = DEVICE_STV_676; } } break; case CPIA2_CMD_GET_ASIC_TYPE: cam->params.version.asic_id = cmd.buffer.block_data[0]; cam->params.version.asic_rev = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_SENSOR: cam->params.version.sensor_flags = cmd.buffer.block_data[0]; cam->params.version.sensor_rev = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_VP_DEVICE: cam->params.version.vp_device_hi = cmd.buffer.block_data[0]; cam->params.version.vp_device_lo = cmd.buffer.block_data[1]; break; case CPIA2_CMD_GET_VP_BRIGHTNESS: cam->params.color_params.brightness = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_CONTRAST: cam->params.color_params.contrast = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_SATURATION: cam->params.color_params.saturation = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_GPIO_DATA: cam->params.vp_params.gpio_data = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_GPIO_DIRECTION: cam->params.vp_params.gpio_direction = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION: cam->params.vc_params.vc_mp_direction =cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VC_MP_GPIO_DATA: cam->params.vc_params.vc_mp_data = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_FLICKER_MODES: cam->params.flicker_control.cam_register = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_WAKEUP: cam->params.vc_params.wakeup = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_PW_CONTROL: cam->params.vc_params.pw_control = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_SYSTEM_CTRL: cam->params.camera_state.system_ctrl = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_SYSTEM_STATE: cam->params.vp_params.system_state = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_SYSTEM_CTRL: cam->params.vp_params.system_ctrl = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VP_EXP_MODES: cam->params.vp_params.exposure_modes = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_DEVICE_CONFIG: cam->params.vp_params.device_config = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_VC_CONTROL: cam->params.vc_params.vc_control = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_USER_MODE: cam->params.vp_params.video_mode = cmd.buffer.block_data[0]; break; case CPIA2_CMD_GET_USER_EFFECTS: cam->params.vp_params.user_effects = cmd.buffer.block_data[0]; break; default: break; } return retval; } /****************************************************************************** * * cpia2_send_command * *****************************************************************************/ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd) { u8 count; u8 start; u8 block_index; u8 *buffer; int retval; const char* dir; if (cmd->direction == TRANSFER_WRITE) { dir = "Write"; } else { dir = "Read"; } block_index = cmd->req_mode & 0x03; switch (cmd->req_mode & 0x0c) { case CAMERAACCESS_TYPE_RANDOM: count = cmd->reg_count * sizeof(struct cpia2_register); start = 0; buffer = (u8 *) & cmd->buffer; if (debugs_on & DEBUG_REG) DBG("%s Random: Register block %s\n", dir, block_name[block_index]); break; case CAMERAACCESS_TYPE_BLOCK: count = cmd->reg_count; start = cmd->start; buffer = cmd->buffer.block_data; if (debugs_on & DEBUG_REG) DBG("%s Block: Register block %s\n", dir, block_name[block_index]); break; case CAMERAACCESS_TYPE_MASK: count = cmd->reg_count * sizeof(struct cpia2_reg_mask); start = 0; buffer = (u8 *) & cmd->buffer; if (debugs_on & DEBUG_REG) DBG("%s Mask: Register block %s\n", dir, block_name[block_index]); break; case CAMERAACCESS_TYPE_REPEAT: /* For patch blocks only */ count = cmd->reg_count; start = cmd->start; buffer = cmd->buffer.block_data; if (debugs_on & DEBUG_REG) DBG("%s Repeat: Register block %s\n", dir, block_name[block_index]); break; default: LOG("%s: invalid request mode\n",__func__); return -EINVAL; } retval = cpia2_usb_transfer_cmd(cam, buffer, cmd->req_mode, start, count, cmd->direction); #ifdef _CPIA2_DEBUG_ if (debugs_on & DEBUG_REG) { int i; for (i = 0; i < cmd->reg_count; i++) { if((cmd->req_mode & 0x0c) == CAMERAACCESS_TYPE_BLOCK) KINFO("%s Block: [0x%02X] = 0x%02X\n", dir, start + i, buffer[i]); if((cmd->req_mode & 0x0c) == CAMERAACCESS_TYPE_RANDOM) KINFO("%s Random: [0x%02X] = 0x%02X\n", dir, cmd->buffer.registers[i].index, cmd->buffer.registers[i].value); } } #endif return retval; }; /************* * Functions to implement camera functionality *************/ /****************************************************************************** * * cpia2_get_version_info * *****************************************************************************/ static void cpia2_get_version_info(struct camera_data *cam) { cpia2_do_command(cam, CPIA2_CMD_GET_VERSION, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_PNP_ID, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_ASIC_TYPE, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_SENSOR, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VP_DEVICE, TRANSFER_READ, 0); } /****************************************************************************** * * cpia2_reset_camera * * Called at least during the open process, sets up initial params. *****************************************************************************/ int cpia2_reset_camera(struct camera_data *cam) { u8 tmp_reg; int retval = 0; int i; struct cpia2_command cmd; /*** * VC setup ***/ retval = configure_sensor(cam, cam->params.roi.width, cam->params.roi.height); if (retval < 0) { ERR("Couldn't configure sensor, error=%d\n", retval); return retval; } /* Clear FIFO and route/enable stream block */ cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.direction = TRANSFER_WRITE; cmd.reg_count = 2; cmd.buffer.registers[0].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[0].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT; cmd.buffer.registers[1].index = CPIA2_VC_ST_CTRL; cmd.buffer.registers[1].value = CPIA2_VC_ST_CTRL_SRC_VC | CPIA2_VC_ST_CTRL_DST_USB | CPIA2_VC_ST_CTRL_EOF_DETECT | CPIA2_VC_ST_CTRL_FIFO_ENABLE; cpia2_send_command(cam, &cmd); cpia2_set_high_power(cam); if (cam->params.pnp_id.device_type == DEVICE_STV_672) { /* Enable button notification */ cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_SYSTEM; cmd.buffer.registers[0].index = CPIA2_SYSTEM_INT_PACKET_CTRL; cmd.buffer.registers[0].value = CPIA2_SYSTEM_INT_PACKET_CTRL_ENABLE_SW_XX; cmd.reg_count = 1; cpia2_send_command(cam, &cmd); } schedule_timeout_interruptible(msecs_to_jiffies(100)); if (cam->params.pnp_id.device_type == DEVICE_STV_672) retval = apply_vp_patch(cam); /* wait for vp to go to sleep */ schedule_timeout_interruptible(msecs_to_jiffies(100)); /*** * If this is a 676, apply VP5 fixes before we start streaming ***/ if (cam->params.pnp_id.device_type == DEVICE_STV_676) { cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP; /* The following writes improve the picture */ cmd.buffer.registers[0].index = CPIA2_VP5_MYBLACK_LEVEL; cmd.buffer.registers[0].value = 0; /* reduce from the default * rec 601 pedestal of 16 */ cmd.buffer.registers[1].index = CPIA2_VP5_MCYRANGE; cmd.buffer.registers[1].value = 0x92; /* increase from 100% to * (256/256 - 31) to fill * available range */ cmd.buffer.registers[2].index = CPIA2_VP5_MYCEILING; cmd.buffer.registers[2].value = 0xFF; /* Increase from the * default rec 601 ceiling * of 240 */ cmd.buffer.registers[3].index = CPIA2_VP5_MCUVSATURATION; cmd.buffer.registers[3].value = 0xFF; /* Increase from the rec * 601 100% level (128) * to 145-192 */ cmd.buffer.registers[4].index = CPIA2_VP5_ANTIFLKRSETUP; cmd.buffer.registers[4].value = 0x80; /* Inhibit the * anti-flicker */ /* The following 4 writes are a fix to allow QVGA to work at 30 fps */ cmd.buffer.registers[5].index = CPIA2_VP_RAM_ADDR_H; cmd.buffer.registers[5].value = 0x01; cmd.buffer.registers[6].index = CPIA2_VP_RAM_ADDR_L; cmd.buffer.registers[6].value = 0xE3; cmd.buffer.registers[7].index = CPIA2_VP_RAM_DATA; cmd.buffer.registers[7].value = 0x02; cmd.buffer.registers[8].index = CPIA2_VP_RAM_DATA; cmd.buffer.registers[8].value = 0xFC; cmd.direction = TRANSFER_WRITE; cmd.reg_count = 9; cpia2_send_command(cam, &cmd); } /* Activate all settings and start the data stream */ /* Set user mode */ set_default_user_mode(cam); /* Give VP time to wake up */ schedule_timeout_interruptible(msecs_to_jiffies(100)); set_all_properties(cam); cpia2_do_command(cam, CPIA2_CMD_GET_USER_MODE, TRANSFER_READ, 0); DBG("After SetAllProperties(cam), user mode is 0x%0X\n", cam->params.vp_params.video_mode); /*** * Set audio regulator off. This and the code to set the compresison * state are too complex to form a CPIA2_CMD_, and seem to be somewhat * intertwined. This stuff came straight from the windows driver. ***/ /* Turn AutoExposure off in VP and enable the serial bridge to the sensor */ cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_CTRL, TRANSFER_READ, 0); tmp_reg = cam->params.vp_params.system_ctrl; cmd.buffer.registers[0].value = tmp_reg & (tmp_reg & (CPIA2_VP_SYSTEMCTRL_HK_CONTROL ^ 0xFF)); cpia2_do_command(cam, CPIA2_CMD_GET_DEVICE_CONFIG, TRANSFER_READ, 0); cmd.buffer.registers[1].value = cam->params.vp_params.device_config | CPIA2_VP_DEVICE_CONFIG_SERIAL_BRIDGE; cmd.buffer.registers[0].index = CPIA2_VP_SYSTEMCTRL; cmd.buffer.registers[1].index = CPIA2_VP_DEVICE_CONFIG; cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP; cmd.reg_count = 2; cmd.direction = TRANSFER_WRITE; cmd.start = 0; cpia2_send_command(cam, &cmd); /* Set the correct I2C address in the CPiA-2 system register */ cpia2_do_command(cam, CPIA2_CMD_SET_SERIAL_ADDR, TRANSFER_WRITE, CPIA2_SYSTEM_VP_SERIAL_ADDR_SENSOR); /* Now have sensor access - set bit to turn the audio regulator off */ cpia2_do_command(cam, CPIA2_CMD_SET_SENSOR_CR1, TRANSFER_WRITE, CPIA2_SENSOR_CR1_DOWN_AUDIO_REGULATOR); /* Set the correct I2C address in the CPiA-2 system register */ if (cam->params.pnp_id.device_type == DEVICE_STV_672) cpia2_do_command(cam, CPIA2_CMD_SET_SERIAL_ADDR, TRANSFER_WRITE, CPIA2_SYSTEM_VP_SERIAL_ADDR_VP); // 0x88 else cpia2_do_command(cam, CPIA2_CMD_SET_SERIAL_ADDR, TRANSFER_WRITE, CPIA2_SYSTEM_VP_SERIAL_ADDR_676_VP); // 0x8a /* increase signal drive strength */ if (cam->params.pnp_id.device_type == DEVICE_STV_676) cpia2_do_command(cam, CPIA2_CMD_SET_VP_EXP_MODES, TRANSFER_WRITE, CPIA2_VP_EXPOSURE_MODES_COMPILE_EXP); /* Start autoexposure */ cpia2_do_command(cam, CPIA2_CMD_GET_DEVICE_CONFIG, TRANSFER_READ, 0); cmd.buffer.registers[0].value = cam->params.vp_params.device_config & (CPIA2_VP_DEVICE_CONFIG_SERIAL_BRIDGE ^ 0xFF); cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_CTRL, TRANSFER_READ, 0); cmd.buffer.registers[1].value = cam->params.vp_params.system_ctrl | CPIA2_VP_SYSTEMCTRL_HK_CONTROL; cmd.buffer.registers[0].index = CPIA2_VP_DEVICE_CONFIG; cmd.buffer.registers[1].index = CPIA2_VP_SYSTEMCTRL; cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VP; cmd.reg_count = 2; cmd.direction = TRANSFER_WRITE; cpia2_send_command(cam, &cmd); /* Set compression state */ cpia2_do_command(cam, CPIA2_CMD_GET_VC_CONTROL, TRANSFER_READ, 0); if (cam->params.compression.inhibit_htables) { tmp_reg = cam->params.vc_params.vc_control | CPIA2_VC_VC_CTRL_INHIBIT_H_TABLES; } else { tmp_reg = cam->params.vc_params.vc_control & ~CPIA2_VC_VC_CTRL_INHIBIT_H_TABLES; } cpia2_do_command(cam, CPIA2_CMD_SET_VC_CONTROL, TRANSFER_WRITE,tmp_reg); /* Set target size (kb) on vc */ cpia2_do_command(cam, CPIA2_CMD_SET_TARGET_KB, TRANSFER_WRITE, cam->params.vc_params.target_kb); /* Wiggle VC Reset */ /*** * First read and wait a bit. ***/ for (i = 0; i < 50; i++) { cpia2_do_command(cam, CPIA2_CMD_GET_PW_CONTROL, TRANSFER_READ, 0); } tmp_reg = cam->params.vc_params.pw_control; tmp_reg &= ~CPIA2_VC_PW_CTRL_VC_RESET_N; cpia2_do_command(cam, CPIA2_CMD_SET_PW_CONTROL, TRANSFER_WRITE,tmp_reg); tmp_reg |= CPIA2_VC_PW_CTRL_VC_RESET_N; cpia2_do_command(cam, CPIA2_CMD_SET_PW_CONTROL, TRANSFER_WRITE,tmp_reg); cpia2_do_command(cam, CPIA2_CMD_SET_DEF_JPEG_OPT, TRANSFER_WRITE, 0); cpia2_do_command(cam, CPIA2_CMD_GET_USER_MODE, TRANSFER_READ, 0); DBG("After VC RESET, user mode is 0x%0X\n", cam->params.vp_params.video_mode); return retval; } /****************************************************************************** * * cpia2_set_high_power * *****************************************************************************/ static int cpia2_set_high_power(struct camera_data *cam) { int i; for (i = 0; i <= 50; i++) { /* Read system status */ cpia2_do_command(cam,CPIA2_CMD_GET_SYSTEM_CTRL,TRANSFER_READ,0); /* If there is an error, clear it */ if(cam->params.camera_state.system_ctrl & CPIA2_SYSTEM_CONTROL_V2W_ERR) cpia2_do_command(cam, CPIA2_CMD_CLEAR_V2W_ERR, TRANSFER_WRITE, 0); /* Try to set high power mode */ cpia2_do_command(cam, CPIA2_CMD_SET_SYSTEM_CTRL, TRANSFER_WRITE, 1); /* Try to read something in VP to check if everything is awake */ cpia2_do_command(cam, CPIA2_CMD_GET_VP_SYSTEM_STATE, TRANSFER_READ, 0); if (cam->params.vp_params.system_state & CPIA2_VP_SYSTEMSTATE_HK_ALIVE) { break; } else if (i == 50) { cam->params.camera_state.power_mode = LO_POWER_MODE; ERR("Camera did not wake up\n"); return -EIO; } } DBG("System now in high power state\n"); cam->params.camera_state.power_mode = HI_POWER_MODE; return 0; } /****************************************************************************** * * cpia2_set_low_power * *****************************************************************************/ int cpia2_set_low_power(struct camera_data *cam) { cam->params.camera_state.power_mode = LO_POWER_MODE; cpia2_do_command(cam, CPIA2_CMD_SET_SYSTEM_CTRL, TRANSFER_WRITE, 0); return 0; } /****************************************************************************** * * apply_vp_patch * *****************************************************************************/ static int cpia2_send_onebyte_command(struct camera_data *cam, struct cpia2_command *cmd, u8 start, u8 datum) { cmd->buffer.block_data[0] = datum; cmd->start = start; cmd->reg_count = 1; return cpia2_send_command(cam, cmd); } static int apply_vp_patch(struct camera_data *cam) { const struct firmware *fw; const char fw_name[] = "cpia2/stv0672_vp4.bin"; int i, ret; struct cpia2_command cmd; ret = request_firmware(&fw, fw_name, &cam->dev->dev); if (ret) { printk(KERN_ERR "cpia2: failed to load VP patch \"%s\"\n", fw_name); return ret; } cmd.req_mode = CAMERAACCESS_TYPE_REPEAT | CAMERAACCESS_VP; cmd.direction = TRANSFER_WRITE; /* First send the start address... */ cpia2_send_onebyte_command(cam, &cmd, 0x0A, fw->data[0]); /* hi */ cpia2_send_onebyte_command(cam, &cmd, 0x0B, fw->data[1]); /* lo */ /* ... followed by the data payload */ for (i = 2; i < fw->size; i += 64) { cmd.start = 0x0C; /* Data */ cmd.reg_count = min_t(int, 64, fw->size - i); memcpy(cmd.buffer.block_data, &fw->data[i], cmd.reg_count); cpia2_send_command(cam, &cmd); } /* Next send the start address... */ cpia2_send_onebyte_command(cam, &cmd, 0x0A, fw->data[0]); /* hi */ cpia2_send_onebyte_command(cam, &cmd, 0x0B, fw->data[1]); /* lo */ /* ... followed by the 'goto' command */ cpia2_send_onebyte_command(cam, &cmd, 0x0D, 1); release_firmware(fw); return 0; } /****************************************************************************** * * set_default_user_mode * *****************************************************************************/ static int set_default_user_mode(struct camera_data *cam) { unsigned char user_mode; unsigned char frame_rate; int width = cam->params.roi.width; int height = cam->params.roi.height; switch (cam->params.version.sensor_flags) { case CPIA2_VP_SENSOR_FLAGS_404: case CPIA2_VP_SENSOR_FLAGS_407: case CPIA2_VP_SENSOR_FLAGS_409: case CPIA2_VP_SENSOR_FLAGS_410: if ((width > STV_IMAGE_QCIF_COLS) || (height > STV_IMAGE_QCIF_ROWS)) { user_mode = CPIA2_VP_USER_MODE_CIF; } else { user_mode = CPIA2_VP_USER_MODE_QCIFDS; } frame_rate = CPIA2_VP_FRAMERATE_30; break; case CPIA2_VP_SENSOR_FLAGS_500: if ((width > STV_IMAGE_CIF_COLS) || (height > STV_IMAGE_CIF_ROWS)) { user_mode = CPIA2_VP_USER_MODE_VGA; } else { user_mode = CPIA2_VP_USER_MODE_QVGADS; } if (cam->params.pnp_id.device_type == DEVICE_STV_672) frame_rate = CPIA2_VP_FRAMERATE_15; else frame_rate = CPIA2_VP_FRAMERATE_30; break; default: LOG("%s: Invalid sensor flag value 0x%0X\n",__func__, cam->params.version.sensor_flags); return -EINVAL; } DBG("Sensor flag = 0x%0x, user mode = 0x%0x, frame rate = 0x%X\n", cam->params.version.sensor_flags, user_mode, frame_rate); cpia2_do_command(cam, CPIA2_CMD_SET_USER_MODE, TRANSFER_WRITE, user_mode); if(cam->params.vp_params.frame_rate > 0 && frame_rate > cam->params.vp_params.frame_rate) frame_rate = cam->params.vp_params.frame_rate; cpia2_set_fps(cam, frame_rate); // if (cam->params.pnp_id.device_type == DEVICE_STV_676) // cpia2_do_command(cam, // CPIA2_CMD_SET_VP_SYSTEM_CTRL, // TRANSFER_WRITE, // CPIA2_VP_SYSTEMCTRL_HK_CONTROL | // CPIA2_VP_SYSTEMCTRL_POWER_CONTROL); return 0; } /****************************************************************************** * * cpia2_match_video_size * * return the best match, where 'best' is as always * the largest that is not bigger than what is requested. *****************************************************************************/ int cpia2_match_video_size(int width, int height) { if (width >= STV_IMAGE_VGA_COLS && height >= STV_IMAGE_VGA_ROWS) return VIDEOSIZE_VGA; if (width >= STV_IMAGE_CIF_COLS && height >= STV_IMAGE_CIF_ROWS) return VIDEOSIZE_CIF; if (width >= STV_IMAGE_QVGA_COLS && height >= STV_IMAGE_QVGA_ROWS) return VIDEOSIZE_QVGA; if (width >= 288 && height >= 216) return VIDEOSIZE_288_216; if (width >= 256 && height >= 192) return VIDEOSIZE_256_192; if (width >= 224 && height >= 168) return VIDEOSIZE_224_168; if (width >= 192 && height >= 144) return VIDEOSIZE_192_144; if (width >= STV_IMAGE_QCIF_COLS && height >= STV_IMAGE_QCIF_ROWS) return VIDEOSIZE_QCIF; return -1; } /****************************************************************************** * * SetVideoSize * *****************************************************************************/ static int set_vw_size(struct camera_data *cam, int size) { int retval = 0; cam->params.vp_params.video_size = size; switch (size) { case VIDEOSIZE_VGA: DBG("Setting size to VGA\n"); cam->params.roi.width = STV_IMAGE_VGA_COLS; cam->params.roi.height = STV_IMAGE_VGA_ROWS; cam->vw.width = STV_IMAGE_VGA_COLS; cam->vw.height = STV_IMAGE_VGA_ROWS; break; case VIDEOSIZE_CIF: DBG("Setting size to CIF\n"); cam->params.roi.width = STV_IMAGE_CIF_COLS; cam->params.roi.height = STV_IMAGE_CIF_ROWS; cam->vw.width = STV_IMAGE_CIF_COLS; cam->vw.height = STV_IMAGE_CIF_ROWS; break; case VIDEOSIZE_QVGA: DBG("Setting size to QVGA\n"); cam->params.roi.width = STV_IMAGE_QVGA_COLS; cam->params.roi.height = STV_IMAGE_QVGA_ROWS; cam->vw.width = STV_IMAGE_QVGA_COLS; cam->vw.height = STV_IMAGE_QVGA_ROWS; break; case VIDEOSIZE_288_216: cam->params.roi.width = 288; cam->params.roi.height = 216; cam->vw.width = 288; cam->vw.height = 216; break; case VIDEOSIZE_256_192: cam->vw.width = 256; cam->vw.height = 192; cam->params.roi.width = 256; cam->params.roi.height = 192; break; case VIDEOSIZE_224_168: cam->vw.width = 224; cam->vw.height = 168; cam->params.roi.width = 224; cam->params.roi.height = 168; break; case VIDEOSIZE_192_144: cam->vw.width = 192; cam->vw.height = 144; cam->params.roi.width = 192; cam->params.roi.height = 144; break; case VIDEOSIZE_QCIF: DBG("Setting size to QCIF\n"); cam->params.roi.width = STV_IMAGE_QCIF_COLS; cam->params.roi.height = STV_IMAGE_QCIF_ROWS; cam->vw.width = STV_IMAGE_QCIF_COLS; cam->vw.height = STV_IMAGE_QCIF_ROWS; break; default: retval = -EINVAL; } return retval; } /****************************************************************************** * * configure_sensor * *****************************************************************************/ static int configure_sensor(struct camera_data *cam, int req_width, int req_height) { int retval; switch (cam->params.version.sensor_flags) { case CPIA2_VP_SENSOR_FLAGS_404: case CPIA2_VP_SENSOR_FLAGS_407: case CPIA2_VP_SENSOR_FLAGS_409: case CPIA2_VP_SENSOR_FLAGS_410: retval = config_sensor_410(cam, req_width, req_height); break; case CPIA2_VP_SENSOR_FLAGS_500: retval = config_sensor_500(cam, req_width, req_height); break; default: return -EINVAL; } return retval; } /****************************************************************************** * * config_sensor_410 * *****************************************************************************/ static int config_sensor_410(struct camera_data *cam, int req_width, int req_height) { struct cpia2_command cmd; int i = 0; int image_size; int image_type; int width = req_width; int height = req_height; /*** * Make sure size doesn't exceed CIF. ***/ if (width > STV_IMAGE_CIF_COLS) width = STV_IMAGE_CIF_COLS; if (height > STV_IMAGE_CIF_ROWS) height = STV_IMAGE_CIF_ROWS; image_size = cpia2_match_video_size(width, height); DBG("Config 410: width = %d, height = %d\n", width, height); DBG("Image size returned is %d\n", image_size); if (image_size >= 0) { set_vw_size(cam, image_size); width = cam->params.roi.width; height = cam->params.roi.height; DBG("After set_vw_size(), width = %d, height = %d\n", width, height); if (width <= 176 && height <= 144) { DBG("image type = VIDEOSIZE_QCIF\n"); image_type = VIDEOSIZE_QCIF; } else if (width <= 320 && height <= 240) { DBG("image type = VIDEOSIZE_QVGA\n"); image_type = VIDEOSIZE_QVGA; } else { DBG("image type = VIDEOSIZE_CIF\n"); image_type = VIDEOSIZE_CIF; } } else { ERR("ConfigSensor410 failed\n"); return -EINVAL; } cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.direction = TRANSFER_WRITE; /* VC Format */ cmd.buffer.registers[i].index = CPIA2_VC_VC_FORMAT; if (image_type == VIDEOSIZE_CIF) { cmd.buffer.registers[i++].value = (u8) (CPIA2_VC_VC_FORMAT_UFIRST | CPIA2_VC_VC_FORMAT_SHORTLINE); } else { cmd.buffer.registers[i++].value = (u8) CPIA2_VC_VC_FORMAT_UFIRST; } /* VC Clocks */ cmd.buffer.registers[i].index = CPIA2_VC_VC_CLOCKS; if (image_type == VIDEOSIZE_QCIF) { if (cam->params.pnp_id.device_type == DEVICE_STV_672) { cmd.buffer.registers[i++].value= (u8)(CPIA2_VC_VC_672_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_672_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV2); DBG("VC_Clocks (0xc4) should be B\n"); } else { cmd.buffer.registers[i++].value= (u8)(CPIA2_VC_VC_676_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_CLOCKS_LOGDIV2); } } else { if (cam->params.pnp_id.device_type == DEVICE_STV_672) { cmd.buffer.registers[i++].value = (u8) (CPIA2_VC_VC_672_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_CLOCKS_LOGDIV0); } else { cmd.buffer.registers[i++].value = (u8) (CPIA2_VC_VC_676_CLOCKS_CIF_DIV_BY_3 | CPIA2_VC_VC_676_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV0); } } DBG("VC_Clocks (0xc4) = 0x%0X\n", cmd.buffer.registers[i-1].value); /* Input reqWidth from VC */ cmd.buffer.registers[i].index = CPIA2_VC_VC_IHSIZE_LO; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) (STV_IMAGE_QCIF_COLS / 4); else cmd.buffer.registers[i++].value = (u8) (STV_IMAGE_CIF_COLS / 4); /* Timings */ cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_HI; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_LO; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 208; else cmd.buffer.registers[i++].value = (u8) 160; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_HI; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_LO; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 160; else cmd.buffer.registers[i++].value = (u8) 64; /* Output Image Size */ cmd.buffer.registers[i].index = CPIA2_VC_VC_OHSIZE; cmd.buffer.registers[i++].value = cam->params.roi.width / 4; cmd.buffer.registers[i].index = CPIA2_VC_VC_OVSIZE; cmd.buffer.registers[i++].value = cam->params.roi.height / 4; /* Cropping */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HCROP; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_COLS / 4) - (width / 4)) / 2); else cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_COLS / 4) - (width / 4)) / 2); cmd.buffer.registers[i].index = CPIA2_VC_VC_VCROP; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_ROWS / 4) - (height / 4)) / 2); else cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_ROWS / 4) - (height / 4)) / 2); /* Scaling registers (defaults) */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HPHASE; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VPHASE; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HISPAN; cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_VISPAN; cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_HICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HFRACT; cmd.buffer.registers[i++].value = (u8) 0x81; /* = 8/1 = 8 (HIBYTE/LOBYTE) */ cmd.buffer.registers[i].index = CPIA2_VC_VC_VFRACT; cmd.buffer.registers[i++].value = (u8) 0x81; /* = 8/1 = 8 (HIBYTE/LOBYTE) */ cmd.reg_count = i; cpia2_send_command(cam, &cmd); return i; } /****************************************************************************** * * config_sensor_500(cam) * *****************************************************************************/ static int config_sensor_500(struct camera_data *cam, int req_width, int req_height) { struct cpia2_command cmd; int i = 0; int image_size = VIDEOSIZE_CIF; int image_type = VIDEOSIZE_VGA; int width = req_width; int height = req_height; unsigned int device = cam->params.pnp_id.device_type; image_size = cpia2_match_video_size(width, height); if (width > STV_IMAGE_CIF_COLS || height > STV_IMAGE_CIF_ROWS) image_type = VIDEOSIZE_VGA; else if (width > STV_IMAGE_QVGA_COLS || height > STV_IMAGE_QVGA_ROWS) image_type = VIDEOSIZE_CIF; else if (width > STV_IMAGE_QCIF_COLS || height > STV_IMAGE_QCIF_ROWS) image_type = VIDEOSIZE_QVGA; else image_type = VIDEOSIZE_QCIF; if (image_size >= 0) { set_vw_size(cam, image_size); width = cam->params.roi.width; height = cam->params.roi.height; } else { ERR("ConfigSensor500 failed\n"); return -EINVAL; } DBG("image_size = %d, width = %d, height = %d, type = %d\n", image_size, width, height, image_type); cmd.req_mode = CAMERAACCESS_TYPE_RANDOM | CAMERAACCESS_VC; cmd.direction = TRANSFER_WRITE; i = 0; /* VC Format */ cmd.buffer.registers[i].index = CPIA2_VC_VC_FORMAT; cmd.buffer.registers[i].value = (u8) CPIA2_VC_VC_FORMAT_UFIRST; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i].value |= (u8) CPIA2_VC_VC_FORMAT_DECIMATING; i++; /* VC Clocks */ cmd.buffer.registers[i].index = CPIA2_VC_VC_CLOCKS; if (device == DEVICE_STV_672) { if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i].value = (u8)CPIA2_VC_VC_CLOCKS_LOGDIV1; else cmd.buffer.registers[i].value = (u8)(CPIA2_VC_VC_672_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV3); } else { if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i].value = (u8)CPIA2_VC_VC_CLOCKS_LOGDIV0; else cmd.buffer.registers[i].value = (u8)(CPIA2_VC_VC_676_CLOCKS_SCALING | CPIA2_VC_VC_CLOCKS_LOGDIV2); } i++; DBG("VC_CLOCKS = 0x%X\n", cmd.buffer.registers[i-1].value); /* Input width from VP */ cmd.buffer.registers[i].index = CPIA2_VC_VC_IHSIZE_LO; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i].value = (u8) (STV_IMAGE_VGA_COLS / 4); else cmd.buffer.registers[i].value = (u8) (STV_IMAGE_QVGA_COLS / 4); i++; DBG("Input width = %d\n", cmd.buffer.registers[i-1].value); /* Timings */ cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_HI; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 2; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_XLIM_LO; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 250; else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) 125; else cmd.buffer.registers[i++].value = (u8) 160; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_HI; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 2; else cmd.buffer.registers[i++].value = (u8) 1; cmd.buffer.registers[i].index = CPIA2_VC_VC_YLIM_LO; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) 12; else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) 64; else cmd.buffer.registers[i++].value = (u8) 6; /* Output Image Size */ cmd.buffer.registers[i].index = CPIA2_VC_VC_OHSIZE; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = STV_IMAGE_CIF_COLS / 4; else cmd.buffer.registers[i++].value = width / 4; cmd.buffer.registers[i].index = CPIA2_VC_VC_OVSIZE; if (image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = STV_IMAGE_CIF_ROWS / 4; else cmd.buffer.registers[i++].value = height / 4; /* Cropping */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HCROP; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_VGA_COLS / 4) - (width / 4)) / 2); else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QVGA_COLS / 4) - (width / 4)) / 2); else if (image_type == VIDEOSIZE_CIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_COLS / 4) - (width / 4)) / 2); else /*if (image_type == VIDEOSIZE_QCIF)*/ cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_COLS / 4) - (width / 4)) / 2); cmd.buffer.registers[i].index = CPIA2_VC_VC_VCROP; if (image_type == VIDEOSIZE_VGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_VGA_ROWS / 4) - (height / 4)) / 2); else if (image_type == VIDEOSIZE_QVGA) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QVGA_ROWS / 4) - (height / 4)) / 2); else if (image_type == VIDEOSIZE_CIF) cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_CIF_ROWS / 4) - (height / 4)) / 2); else /*if (image_type == VIDEOSIZE_QCIF)*/ cmd.buffer.registers[i++].value = (u8) (((STV_IMAGE_QCIF_ROWS / 4) - (height / 4)) / 2); /* Scaling registers (defaults) */ cmd.buffer.registers[i].index = CPIA2_VC_VC_HPHASE; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 36; else cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VPHASE; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 32; else cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HISPAN; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 26; else cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_VISPAN; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 21; else cmd.buffer.registers[i++].value = (u8) 31; cmd.buffer.registers[i].index = CPIA2_VC_VC_HICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_VICROP; cmd.buffer.registers[i++].value = (u8) 0; cmd.buffer.registers[i].index = CPIA2_VC_VC_HFRACT; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0x2B; /* 2/11 */ else cmd.buffer.registers[i++].value = (u8) 0x81; /* 8/1 */ cmd.buffer.registers[i].index = CPIA2_VC_VC_VFRACT; if (image_type == VIDEOSIZE_CIF || image_type == VIDEOSIZE_QCIF) cmd.buffer.registers[i++].value = (u8) 0x13; /* 1/3 */ else cmd.buffer.registers[i++].value = (u8) 0x81; /* 8/1 */ cmd.reg_count = i; cpia2_send_command(cam, &cmd); return i; } /****************************************************************************** * * setallproperties * * This sets all user changeable properties to the values in cam->params. *****************************************************************************/ static int set_all_properties(struct camera_data *cam) { /** * Don't set target_kb here, it will be set later. * framerate and user_mode were already set (set_default_user_mode). **/ cpia2_set_color_params(cam); cpia2_usb_change_streaming_alternate(cam, cam->params.camera_state.stream_mode); cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE, cam->params.vp_params.user_effects); cpia2_set_flicker_mode(cam, cam->params.flicker_control.flicker_mode_req); cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION, TRANSFER_WRITE, cam->params.vp_params.gpio_direction); cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, TRANSFER_WRITE, cam->params.vp_params.gpio_data); wake_system(cam); set_lowlight_boost(cam); return 0; } /****************************************************************************** * * cpia2_save_camera_state * *****************************************************************************/ void cpia2_save_camera_state(struct camera_data *cam) { get_color_params(cam); cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DATA, TRANSFER_READ, 0); /* Don't get framerate or target_kb. Trust the values we already have */ } /****************************************************************************** * * get_color_params * *****************************************************************************/ static void get_color_params(struct camera_data *cam) { cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION, TRANSFER_READ, 0); cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST, TRANSFER_READ, 0); } /****************************************************************************** * * cpia2_set_color_params * *****************************************************************************/ void cpia2_set_color_params(struct camera_data *cam) { DBG("Setting color params\n"); cpia2_set_brightness(cam, cam->params.color_params.brightness); cpia2_set_contrast(cam, cam->params.color_params.contrast); cpia2_set_saturation(cam, cam->params.color_params.saturation); } /****************************************************************************** * * cpia2_set_flicker_mode * *****************************************************************************/ int cpia2_set_flicker_mode(struct camera_data *cam, int mode) { unsigned char cam_reg; int err = 0; if(cam->params.pnp_id.device_type != DEVICE_STV_672) return -EINVAL; /* Set the appropriate bits in FLICKER_MODES, preserving the rest */ if((err = cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES, TRANSFER_READ, 0))) return err; cam_reg = cam->params.flicker_control.cam_register; switch(mode) { case NEVER_FLICKER: cam_reg |= CPIA2_VP_FLICKER_MODES_NEVER_FLICKER; cam_reg &= ~CPIA2_VP_FLICKER_MODES_50HZ; break; case FLICKER_60: cam_reg &= ~CPIA2_VP_FLICKER_MODES_NEVER_FLICKER; cam_reg &= ~CPIA2_VP_FLICKER_MODES_50HZ; break; case FLICKER_50: cam_reg &= ~CPIA2_VP_FLICKER_MODES_NEVER_FLICKER; cam_reg |= CPIA2_VP_FLICKER_MODES_50HZ; break; default: return -EINVAL; } if((err = cpia2_do_command(cam, CPIA2_CMD_SET_FLICKER_MODES, TRANSFER_WRITE, cam_reg))) return err; /* Set the appropriate bits in EXP_MODES, preserving the rest */ if((err = cpia2_do_command(cam, CPIA2_CMD_GET_VP_EXP_MODES, TRANSFER_READ, 0))) return err; cam_reg = cam->params.vp_params.exposure_modes; if (mode == NEVER_FLICKER) { cam_reg |= CPIA2_VP_EXPOSURE_MODES_INHIBIT_FLICKER; } else { cam_reg &= ~CPIA2_VP_EXPOSURE_MODES_INHIBIT_FLICKER; } if((err = cpia2_do_command(cam, CPIA2_CMD_SET_VP_EXP_MODES, TRANSFER_WRITE, cam_reg))) return err; if((err = cpia2_do_command(cam, CPIA2_CMD_REHASH_VP4, TRANSFER_WRITE, 1))) return err; switch(mode) { case NEVER_FLICKER: cam->params.flicker_control.flicker_mode_req = mode; break; case FLICKER_60: cam->params.flicker_control.flicker_mode_req = mode; cam->params.flicker_control.mains_frequency = 60; break; case FLICKER_50: cam->params.flicker_control.flicker_mode_req = mode; cam->params.flicker_control.mains_frequency = 50; break; default: err = -EINVAL; } return err; } /****************************************************************************** * * cpia2_set_property_flip * *****************************************************************************/ void cpia2_set_property_flip(struct camera_data *cam, int prop_val) { unsigned char cam_reg; cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); cam_reg = cam->params.vp_params.user_effects; if (prop_val) { cam_reg |= CPIA2_VP_USER_EFFECTS_FLIP; } else { cam_reg &= ~CPIA2_VP_USER_EFFECTS_FLIP; } cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE, cam_reg); } /****************************************************************************** * * cpia2_set_property_mirror * *****************************************************************************/ void cpia2_set_property_mirror(struct camera_data *cam, int prop_val) { unsigned char cam_reg; cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0); cam_reg = cam->params.vp_params.user_effects; if (prop_val) { cam_reg |= CPIA2_VP_USER_EFFECTS_MIRROR; } else { cam_reg &= ~CPIA2_VP_USER_EFFECTS_MIRROR; } cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE, cam_reg); } /****************************************************************************** * * set_target_kb * * The new Target KB is set in cam->params.vc_params.target_kb and * activates on reset. *****************************************************************************/ int cpia2_set_target_kb(struct camera_data *cam, unsigned char value) { DBG("Requested target_kb = %d\n", value); if (value != cam->params.vc_params.target_kb) { cpia2_usb_stream_pause(cam); /* reset camera for new target_kb */ cam->params.vc_params.target_kb = value; cpia2_reset_camera(cam); cpia2_usb_stream_resume(cam); } return 0; } /****************************************************************************** * * cpia2_set_gpio * *****************************************************************************/ int cpia2_set_gpio(struct camera_data *cam, unsigned char setting) { int ret; /* Set the microport direction (register 0x90, should be defined * already) to 1 (user output), and set the microport data (0x91) to * the value in the ioctl argument. */ ret = cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION, CPIA2_VC_MP_DIR_OUTPUT, 255); if (ret < 0) return ret; cam->params.vp_params.gpio_direction = 255; ret = cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, CPIA2_VC_MP_DIR_OUTPUT, setting); if (ret < 0) return ret; cam->params.vp_params.gpio_data = setting; return 0; } /****************************************************************************** * * cpia2_set_fps * *****************************************************************************/ int cpia2_set_fps(struct camera_data *cam, int framerate) { int retval; switch(framerate) { case CPIA2_VP_FRAMERATE_30: case CPIA2_VP_FRAMERATE_25: if(cam->params.pnp_id.device_type == DEVICE_STV_672 && cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500) { return -EINVAL; } /* Fall through */ case CPIA2_VP_FRAMERATE_15: case CPIA2_VP_FRAMERATE_12_5: case CPIA2_VP_FRAMERATE_7_5: case CPIA2_VP_FRAMERATE_6_25: break; default: return -EINVAL; } if (cam->params.pnp_id.device_type == DEVICE_STV_672 && framerate == CPIA2_VP_FRAMERATE_15) framerate = 0; /* Work around bug in VP4 */ retval = cpia2_do_command(cam, CPIA2_CMD_FRAMERATE_REQ, TRANSFER_WRITE, framerate); if(retval == 0) cam->params.vp_params.frame_rate = framerate; return retval; } /****************************************************************************** * * cpia2_set_brightness * *****************************************************************************/ void cpia2_set_brightness(struct camera_data *cam, unsigned char value) { /*** * Don't let the register be set to zero - bug in VP4 - flash of full * brightness ***/ if (cam->params.pnp_id.device_type == DEVICE_STV_672 && value == 0) value++; DBG("Setting brightness to %d (0x%0x)\n", value, value); cpia2_do_command(cam,CPIA2_CMD_SET_VP_BRIGHTNESS, TRANSFER_WRITE,value); } /****************************************************************************** * * cpia2_set_contrast * *****************************************************************************/ void cpia2_set_contrast(struct camera_data *cam, unsigned char value) { DBG("Setting contrast to %d (0x%0x)\n", value, value); cam->params.color_params.contrast = value; cpia2_do_command(cam, CPIA2_CMD_SET_CONTRAST, TRANSFER_WRITE, value); } /****************************************************************************** * * cpia2_set_saturation * *****************************************************************************/ void cpia2_set_saturation(struct camera_data *cam, unsigned char value) { DBG("Setting saturation to %d (0x%0x)\n", value, value); cam->params.color_params.saturation = value; cpia2_do_command(cam,CPIA2_CMD_SET_VP_SATURATION, TRANSFER_WRITE,value); } /****************************************************************************** * * wake_system * *****************************************************************************/ static void wake_system(struct camera_data *cam) { cpia2_do_command(cam, CPIA2_CMD_SET_WAKEUP, TRANSFER_WRITE, 0); } /****************************************************************************** * * set_lowlight_boost * * Valid for STV500 sensor only *****************************************************************************/ static void set_lowlight_boost(struct camera_data *cam) { struct cpia2_command cmd; if (cam->params.pnp_id.device_type != DEVICE_STV_672 || cam->params.version.sensor_flags != CPIA2_VP_SENSOR_FLAGS_500) return; cmd.direction = TRANSFER_WRITE; cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 3; cmd.start = CPIA2_VP_RAM_ADDR_H; cmd.buffer.block_data[0] = 0; /* High byte of address to write to */ cmd.buffer.block_data[1] = 0x59; /* Low byte of address to write to */ cmd.buffer.block_data[2] = 0; /* High byte of data to write */ cpia2_send_command(cam, &cmd); if (cam->params.vp_params.lowlight_boost) { cmd.buffer.block_data[0] = 0x02; /* Low byte data to write */ } else { cmd.buffer.block_data[0] = 0x06; } cmd.start = CPIA2_VP_RAM_DATA; cmd.reg_count = 1; cpia2_send_command(cam, &cmd); /* Rehash the VP4 values */ cpia2_do_command(cam, CPIA2_CMD_REHASH_VP4, TRANSFER_WRITE, 1); } /****************************************************************************** * * cpia2_set_format * * Assumes that new size is already set in param struct. *****************************************************************************/ void cpia2_set_format(struct camera_data *cam) { cam->flush = true; cpia2_usb_stream_pause(cam); /* reset camera to new size */ cpia2_set_low_power(cam); cpia2_reset_camera(cam); cam->flush = false; cpia2_dbg_dump_registers(cam); cpia2_usb_stream_resume(cam); } /****************************************************************************** * * cpia2_dbg_dump_registers * *****************************************************************************/ void cpia2_dbg_dump_registers(struct camera_data *cam) { #ifdef _CPIA2_DEBUG_ struct cpia2_command cmd; if (!(debugs_on & DEBUG_DUMP_REGS)) return; cmd.direction = TRANSFER_READ; /* Start with bank 0 (SYSTEM) */ cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; cmd.reg_count = 3; cmd.start = 0; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "System Device Hi = 0x%X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "System Device Lo = 0x%X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "System_system control = 0x%X\n", cmd.buffer.block_data[2]); /* Bank 1 (VC) */ cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 4; cmd.start = 0x80; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "ASIC_ID = 0x%X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "ASIC_REV = 0x%X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "PW_CONTRL = 0x%X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "WAKEUP = 0x%X\n", cmd.buffer.block_data[3]); cmd.start = 0xA0; /* ST_CTRL */ cmd.reg_count = 1; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "Stream ctrl = 0x%X\n", cmd.buffer.block_data[0]); cmd.start = 0xA4; /* Stream status */ cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "Stream status = 0x%X\n", cmd.buffer.block_data[0]); cmd.start = 0xA8; /* USB status */ cmd.reg_count = 3; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "USB_CTRL = 0x%X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "USB_STRM = 0x%X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "USB_STATUS = 0x%X\n", cmd.buffer.block_data[2]); cmd.start = 0xAF; /* USB settings */ cmd.reg_count = 1; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "USB settings = 0x%X\n", cmd.buffer.block_data[0]); cmd.start = 0xC0; /* VC stuff */ cmd.reg_count = 26; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VC Control = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VC Format = 0x%0X\n", cmd.buffer.block_data[3]); printk(KERN_DEBUG "VC Clocks = 0x%0X\n", cmd.buffer.block_data[4]); printk(KERN_DEBUG "VC IHSize = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VC Xlim Hi = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VC XLim Lo = 0x%0X\n", cmd.buffer.block_data[7]); printk(KERN_DEBUG "VC YLim Hi = 0x%0X\n", cmd.buffer.block_data[8]); printk(KERN_DEBUG "VC YLim Lo = 0x%0X\n", cmd.buffer.block_data[9]); printk(KERN_DEBUG "VC OHSize = 0x%0X\n", cmd.buffer.block_data[10]); printk(KERN_DEBUG "VC OVSize = 0x%0X\n", cmd.buffer.block_data[11]); printk(KERN_DEBUG "VC HCrop = 0x%0X\n", cmd.buffer.block_data[12]); printk(KERN_DEBUG "VC VCrop = 0x%0X\n", cmd.buffer.block_data[13]); printk(KERN_DEBUG "VC HPhase = 0x%0X\n", cmd.buffer.block_data[14]); printk(KERN_DEBUG "VC VPhase = 0x%0X\n", cmd.buffer.block_data[15]); printk(KERN_DEBUG "VC HIspan = 0x%0X\n", cmd.buffer.block_data[16]); printk(KERN_DEBUG "VC VIspan = 0x%0X\n", cmd.buffer.block_data[17]); printk(KERN_DEBUG "VC HiCrop = 0x%0X\n", cmd.buffer.block_data[18]); printk(KERN_DEBUG "VC ViCrop = 0x%0X\n", cmd.buffer.block_data[19]); printk(KERN_DEBUG "VC HiFract = 0x%0X\n", cmd.buffer.block_data[20]); printk(KERN_DEBUG "VC ViFract = 0x%0X\n", cmd.buffer.block_data[21]); printk(KERN_DEBUG "VC JPeg Opt = 0x%0X\n", cmd.buffer.block_data[22]); printk(KERN_DEBUG "VC Creep Per = 0x%0X\n", cmd.buffer.block_data[23]); printk(KERN_DEBUG "VC User Sq. = 0x%0X\n", cmd.buffer.block_data[24]); printk(KERN_DEBUG "VC Target KB = 0x%0X\n", cmd.buffer.block_data[25]); /*** VP ***/ cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 14; cmd.start = 0; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP Dev Hi = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP Dev Lo = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP Sys State = 0x%0X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "VP Sys Ctrl = 0x%0X\n", cmd.buffer.block_data[3]); printk(KERN_DEBUG "VP Sensor flg = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VP Sensor Rev = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VP Dev Config = 0x%0X\n", cmd.buffer.block_data[7]); printk(KERN_DEBUG "VP GPIO_DIR = 0x%0X\n", cmd.buffer.block_data[8]); printk(KERN_DEBUG "VP GPIO_DATA = 0x%0X\n", cmd.buffer.block_data[9]); printk(KERN_DEBUG "VP Ram ADDR H = 0x%0X\n", cmd.buffer.block_data[10]); printk(KERN_DEBUG "VP Ram ADDR L = 0x%0X\n", cmd.buffer.block_data[11]); printk(KERN_DEBUG "VP RAM Data = 0x%0X\n", cmd.buffer.block_data[12]); printk(KERN_DEBUG "Do Call = 0x%0X\n", cmd.buffer.block_data[13]); if (cam->params.pnp_id.device_type == DEVICE_STV_672) { cmd.reg_count = 9; cmd.start = 0x0E; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP Clock Ctrl = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP Patch Rev = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP Vid Mode = 0x%0X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "VP Framerate = 0x%0X\n", cmd.buffer.block_data[3]); printk(KERN_DEBUG "VP UserEffect = 0x%0X\n", cmd.buffer.block_data[4]); printk(KERN_DEBUG "VP White Bal = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VP WB thresh = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VP Exp Modes = 0x%0X\n", cmd.buffer.block_data[7]); printk(KERN_DEBUG "VP Exp Target = 0x%0X\n", cmd.buffer.block_data[8]); cmd.reg_count = 1; cmd.start = 0x1B; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP FlickerMds = 0x%0X\n", cmd.buffer.block_data[0]); } else { cmd.reg_count = 8 ; cmd.start = 0x0E; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP Clock Ctrl = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP Patch Rev = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP Vid Mode = 0x%0X\n", cmd.buffer.block_data[5]); printk(KERN_DEBUG "VP Framerate = 0x%0X\n", cmd.buffer.block_data[6]); printk(KERN_DEBUG "VP UserEffect = 0x%0X\n", cmd.buffer.block_data[7]); cmd.reg_count = 1; cmd.start = CPIA2_VP5_EXPOSURE_TARGET; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP5 Exp Target= 0x%0X\n", cmd.buffer.block_data[0]); cmd.reg_count = 4; cmd.start = 0x3A; cpia2_send_command(cam, &cmd); printk(KERN_DEBUG "VP5 MY Black = 0x%0X\n", cmd.buffer.block_data[0]); printk(KERN_DEBUG "VP5 MCY Range = 0x%0X\n", cmd.buffer.block_data[1]); printk(KERN_DEBUG "VP5 MYCEILING = 0x%0X\n", cmd.buffer.block_data[2]); printk(KERN_DEBUG "VP5 MCUV Sat = 0x%0X\n", cmd.buffer.block_data[3]); } #endif } /****************************************************************************** * * reset_camera_struct * * Sets all values to the defaults *****************************************************************************/ static void reset_camera_struct(struct camera_data *cam) { /*** * The following parameter values are the defaults from the register map. ***/ cam->params.color_params.brightness = DEFAULT_BRIGHTNESS; cam->params.color_params.contrast = DEFAULT_CONTRAST; cam->params.color_params.saturation = DEFAULT_SATURATION; cam->params.vp_params.lowlight_boost = 0; /* FlickerModes */ cam->params.flicker_control.flicker_mode_req = NEVER_FLICKER; cam->params.flicker_control.mains_frequency = 60; /* jpeg params */ cam->params.compression.jpeg_options = CPIA2_VC_VC_JPEG_OPT_DEFAULT; cam->params.compression.creep_period = 2; cam->params.compression.user_squeeze = 20; cam->params.compression.inhibit_htables = false; /* gpio params */ cam->params.vp_params.gpio_direction = 0; /* write, the default safe mode */ cam->params.vp_params.gpio_data = 0; /* Target kb params */ cam->params.vc_params.target_kb = DEFAULT_TARGET_KB; /*** * Set Sensor FPS as fast as possible. ***/ if(cam->params.pnp_id.device_type == DEVICE_STV_672) { if(cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500) cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_15; else cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_30; } else { cam->params.vp_params.frame_rate = CPIA2_VP_FRAMERATE_30; } /*** * Set default video mode as large as possible : * for vga sensor set to vga, for cif sensor set to CIF. ***/ if (cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500) { cam->sensor_type = CPIA2_SENSOR_500; cam->video_size = VIDEOSIZE_VGA; cam->params.roi.width = STV_IMAGE_VGA_COLS; cam->params.roi.height = STV_IMAGE_VGA_ROWS; } else { cam->sensor_type = CPIA2_SENSOR_410; cam->video_size = VIDEOSIZE_CIF; cam->params.roi.width = STV_IMAGE_CIF_COLS; cam->params.roi.height = STV_IMAGE_CIF_ROWS; } /*** * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP * Ioctl. Here, just do the window and picture stucts. ***/ cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */ cam->vp.brightness = (u16) cam->params.color_params.brightness * 256; cam->vp.colour = (u16) cam->params.color_params.saturation * 256; cam->vp.contrast = (u16) cam->params.color_params.contrast * 256; cam->vw.x = 0; cam->vw.y = 0; cam->vw.width = cam->params.roi.width; cam->vw.height = cam->params.roi.height; cam->vw.flags = 0; cam->vw.clipcount = 0; return; } /****************************************************************************** * * cpia2_init_camera_struct * * Initializes camera struct, does not call reset to fill in defaults. *****************************************************************************/ struct camera_data *cpia2_init_camera_struct(void) { struct camera_data *cam; cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) { ERR("couldn't kmalloc cpia2 struct\n"); return NULL; } cam->present = 1; mutex_init(&cam->busy_lock); init_waitqueue_head(&cam->wq_stream); return cam; } /****************************************************************************** * * cpia2_init_camera * * Initializes camera. *****************************************************************************/ int cpia2_init_camera(struct camera_data *cam) { DBG("Start\n"); cam->mmapped = false; /* Get sensor and asic types before reset. */ cpia2_set_high_power(cam); cpia2_get_version_info(cam); if (cam->params.version.asic_id != CPIA2_ASIC_672) { ERR("Device IO error (asicID has incorrect value of 0x%X\n", cam->params.version.asic_id); return -ENODEV; } /* Set GPIO direction and data to a safe state. */ cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION, TRANSFER_WRITE, 0); cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, TRANSFER_WRITE, 0); /* resetting struct requires version info for sensor and asic types */ reset_camera_struct(cam); cpia2_set_low_power(cam); DBG("End\n"); return 0; } /****************************************************************************** * * cpia2_allocate_buffers * *****************************************************************************/ int cpia2_allocate_buffers(struct camera_data *cam) { int i; if(!cam->buffers) { u32 size = cam->num_frames*sizeof(struct framebuf); cam->buffers = kmalloc(size, GFP_KERNEL); if(!cam->buffers) { ERR("couldn't kmalloc frame buffer structures\n"); return -ENOMEM; } } if(!cam->frame_buffer) { cam->frame_buffer = rvmalloc(cam->frame_size*cam->num_frames); if (!cam->frame_buffer) { ERR("couldn't vmalloc frame buffer data area\n"); kfree(cam->buffers); cam->buffers = NULL; return -ENOMEM; } } for(i=0; i<cam->num_frames-1; ++i) { cam->buffers[i].next = &cam->buffers[i+1]; cam->buffers[i].data = cam->frame_buffer +i*cam->frame_size; cam->buffers[i].status = FRAME_EMPTY; cam->buffers[i].length = 0; cam->buffers[i].max_length = 0; cam->buffers[i].num = i; } cam->buffers[i].next = cam->buffers; cam->buffers[i].data = cam->frame_buffer +i*cam->frame_size; cam->buffers[i].status = FRAME_EMPTY; cam->buffers[i].length = 0; cam->buffers[i].max_length = 0; cam->buffers[i].num = i; cam->curbuff = cam->buffers; cam->workbuff = cam->curbuff->next; DBG("buffers=%p, curbuff=%p, workbuff=%p\n", cam->buffers, cam->curbuff, cam->workbuff); return 0; } /****************************************************************************** * * cpia2_free_buffers * *****************************************************************************/ void cpia2_free_buffers(struct camera_data *cam) { if(cam->buffers) { kfree(cam->buffers); cam->buffers = NULL; } if(cam->frame_buffer) { rvfree(cam->frame_buffer, cam->frame_size*cam->num_frames); cam->frame_buffer = NULL; } } /****************************************************************************** * * cpia2_read * *****************************************************************************/ long cpia2_read(struct camera_data *cam, char __user *buf, unsigned long count, int noblock) { struct framebuf *frame; if (!count) { return 0; } if (!buf) { ERR("%s: buffer NULL\n",__func__); return -EINVAL; } if (!cam) { ERR("%s: Internal error, camera_data NULL!\n",__func__); return -EINVAL; } /* make this _really_ smp and multithread-safe */ if (mutex_lock_interruptible(&cam->busy_lock)) return -ERESTARTSYS; if (!cam->present) { LOG("%s: camera removed\n",__func__); mutex_unlock(&cam->busy_lock); return 0; /* EOF */ } if(!cam->streaming) { /* Start streaming */ cpia2_usb_stream_start(cam, cam->params.camera_state.stream_mode); } /* Copy cam->curbuff in case it changes while we're processing */ frame = cam->curbuff; if (noblock && frame->status != FRAME_READY) { mutex_unlock(&cam->busy_lock); return -EAGAIN; } if(frame->status != FRAME_READY) { mutex_unlock(&cam->busy_lock); wait_event_interruptible(cam->wq_stream, !cam->present || (frame = cam->curbuff)->status == FRAME_READY); if (signal_pending(current)) return -ERESTARTSYS; /* make this _really_ smp and multithread-safe */ if (mutex_lock_interruptible(&cam->busy_lock)) { return -ERESTARTSYS; } if(!cam->present) { mutex_unlock(&cam->busy_lock); return 0; } } /* copy data to user space */ if (frame->length > count) { mutex_unlock(&cam->busy_lock); return -EFAULT; } if (copy_to_user(buf, frame->data, frame->length)) { mutex_unlock(&cam->busy_lock); return -EFAULT; } count = frame->length; frame->status = FRAME_EMPTY; mutex_unlock(&cam->busy_lock); return count; } /****************************************************************************** * * cpia2_poll * *****************************************************************************/ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp, poll_table *wait) { unsigned int status=0; if(!cam) { ERR("%s: Internal error, camera_data not found!\n",__func__); return POLLERR; } mutex_lock(&cam->busy_lock); if(!cam->present) { mutex_unlock(&cam->busy_lock); return POLLHUP; } if(!cam->streaming) { /* Start streaming */ cpia2_usb_stream_start(cam, cam->params.camera_state.stream_mode); } mutex_unlock(&cam->busy_lock); poll_wait(filp, &cam->wq_stream, wait); mutex_lock(&cam->busy_lock); if(!cam->present) status = POLLHUP; else if(cam->curbuff->status == FRAME_READY) status = POLLIN | POLLRDNORM; mutex_unlock(&cam->busy_lock); return status; } /****************************************************************************** * * cpia2_remap_buffer * *****************************************************************************/ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma) { const char *adr = (const char *)vma->vm_start; unsigned long size = vma->vm_end-vma->vm_start; unsigned long start_offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long start = (unsigned long) adr; unsigned long page, pos; if (!cam) return -ENODEV; DBG("mmap offset:%ld size:%ld\n", start_offset, size); /* make this _really_ smp-safe */ if (mutex_lock_interruptible(&cam->busy_lock)) return -ERESTARTSYS; if (!cam->present) { mutex_unlock(&cam->busy_lock); return -ENODEV; } if (size > cam->frame_size*cam->num_frames || (start_offset % cam->frame_size) != 0 || (start_offset+size > cam->frame_size*cam->num_frames)) { mutex_unlock(&cam->busy_lock); return -EINVAL; } pos = ((unsigned long) (cam->frame_buffer)) + start_offset; while (size > 0) { page = kvirt_to_pa(pos); if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) { mutex_unlock(&cam->busy_lock); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; if (size > PAGE_SIZE) size -= PAGE_SIZE; else size = 0; } cam->mmapped = true; mutex_unlock(&cam->busy_lock); return 0; }
gpl-2.0
SiddheshK15/android_kernel_cyanogen_msm8916
arch/mips/kernel/cpu-probe.c
1909
25797
/* * Processor capabilities determination functions. * * Copyright (C) xxxx the Anonymous * Copyright (C) 1994 - 2006 Ralf Baechle * Copyright (C) 2003, 2004 Maciej W. Rozycki * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/export.h> #include <asm/bugs.h> #include <asm/cpu.h> #include <asm/fpu.h> #include <asm/mipsregs.h> #include <asm/watch.h> #include <asm/elf.h> #include <asm/spram.h> #include <asm/uaccess.h> static int __cpuinitdata mips_fpu_disabled; static int __init fpu_disable(char *s) { cpu_data[0].options &= ~MIPS_CPU_FPU; mips_fpu_disabled = 1; return 1; } __setup("nofpu", fpu_disable); int __cpuinitdata mips_dsp_disabled; static int __init dsp_disable(char *s) { cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); mips_dsp_disabled = 1; return 1; } __setup("nodsp", dsp_disable); static inline void check_errata(void) { struct cpuinfo_mips *c = &current_cpu_data; switch (c->cputype) { case CPU_34K: /* * Erratum "RPS May Cause Incorrect Instruction Execution" * This code only handles VPE0, any SMP/SMTC/RTOS code * making use of VPE1 will be responsable for that VPE. */ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS); break; default: break; } } void __init check_bugs32(void) { check_errata(); } /* * Probe whether cpu has config register by trying to play with * alternate cache bit and see whether it matters. * It's used by cpu_probe to distinguish between R3000A and R3081. */ static inline int cpu_has_confreg(void) { #ifdef CONFIG_CPU_R3000 extern unsigned long r3k_cache_size(unsigned long); unsigned long size1, size2; unsigned long cfg = read_c0_conf(); size1 = r3k_cache_size(ST0_ISC); write_c0_conf(cfg ^ R30XX_CONF_AC); size2 = r3k_cache_size(ST0_ISC); write_c0_conf(cfg); return size1 != size2; #else return 0; #endif } static inline void set_elf_platform(int cpu, const char *plat) { if (cpu == 0) __elf_platform = plat; } /* * Get the FPU Implementation/Revision. */ static inline unsigned long cpu_get_fpu_id(void) { unsigned long tmp, fpu_id; tmp = read_c0_status(); __enable_fpu(); fpu_id = read_32bit_cp1_register(CP1_REVISION); write_c0_status(tmp); return fpu_id; } /* * Check the CPU has an FPU the official way. */ static inline int __cpu_has_fpu(void) { return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); } static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) { #ifdef __NEED_VMBITS_PROBE write_c0_entryhi(0x3fffffffffffe000ULL); back_to_back_c0_hazard(); c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); #endif } static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) { switch (isa) { case MIPS_CPU_ISA_M64R2: c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2; case MIPS_CPU_ISA_M64R1: c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1; case MIPS_CPU_ISA_V: c->isa_level |= MIPS_CPU_ISA_V; case MIPS_CPU_ISA_IV: c->isa_level |= MIPS_CPU_ISA_IV; case MIPS_CPU_ISA_III: c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; case MIPS_CPU_ISA_M32R2: c->isa_level |= MIPS_CPU_ISA_M32R2; case MIPS_CPU_ISA_M32R1: c->isa_level |= MIPS_CPU_ISA_M32R1; case MIPS_CPU_ISA_II: c->isa_level |= MIPS_CPU_ISA_II; case MIPS_CPU_ISA_I: c->isa_level |= MIPS_CPU_ISA_I; break; } } static char unknown_isa[] __cpuinitdata = KERN_ERR \ "Unsupported ISA type, c0.config0: %d."; static inline unsigned int decode_config0(struct cpuinfo_mips *c) { unsigned int config0; int isa; config0 = read_c0_config(); if (((config0 & MIPS_CONF_MT) >> 7) == 1) c->options |= MIPS_CPU_TLB; isa = (config0 & MIPS_CONF_AT) >> 13; switch (isa) { case 0: switch ((config0 & MIPS_CONF_AR) >> 10) { case 0: set_isa(c, MIPS_CPU_ISA_M32R1); break; case 1: set_isa(c, MIPS_CPU_ISA_M32R2); break; default: goto unknown; } break; case 2: switch ((config0 & MIPS_CONF_AR) >> 10) { case 0: set_isa(c, MIPS_CPU_ISA_M64R1); break; case 1: set_isa(c, MIPS_CPU_ISA_M64R2); break; default: goto unknown; } break; default: goto unknown; } return config0 & MIPS_CONF_M; unknown: panic(unknown_isa, config0); } static inline unsigned int decode_config1(struct cpuinfo_mips *c) { unsigned int config1; config1 = read_c0_config1(); if (config1 & MIPS_CONF1_MD) c->ases |= MIPS_ASE_MDMX; if (config1 & MIPS_CONF1_WR) c->options |= MIPS_CPU_WATCH; if (config1 & MIPS_CONF1_CA) c->ases |= MIPS_ASE_MIPS16; if (config1 & MIPS_CONF1_EP) c->options |= MIPS_CPU_EJTAG; if (config1 & MIPS_CONF1_FP) { c->options |= MIPS_CPU_FPU; c->options |= MIPS_CPU_32FPR; } if (cpu_has_tlb) c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; return config1 & MIPS_CONF_M; } static inline unsigned int decode_config2(struct cpuinfo_mips *c) { unsigned int config2; config2 = read_c0_config2(); if (config2 & MIPS_CONF2_SL) c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; return config2 & MIPS_CONF_M; } static inline unsigned int decode_config3(struct cpuinfo_mips *c) { unsigned int config3; config3 = read_c0_config3(); if (config3 & MIPS_CONF3_SM) { c->ases |= MIPS_ASE_SMARTMIPS; c->options |= MIPS_CPU_RIXI; } if (config3 & MIPS_CONF3_RXI) c->options |= MIPS_CPU_RIXI; if (config3 & MIPS_CONF3_DSP) c->ases |= MIPS_ASE_DSP; if (config3 & MIPS_CONF3_DSP2P) c->ases |= MIPS_ASE_DSP2P; if (config3 & MIPS_CONF3_VINT) c->options |= MIPS_CPU_VINT; if (config3 & MIPS_CONF3_VEIC) c->options |= MIPS_CPU_VEIC; if (config3 & MIPS_CONF3_MT) c->ases |= MIPS_ASE_MIPSMT; if (config3 & MIPS_CONF3_ULRI) c->options |= MIPS_CPU_ULRI; if (config3 & MIPS_CONF3_ISA) c->options |= MIPS_CPU_MICROMIPS; #ifdef CONFIG_CPU_MICROMIPS write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE); #endif if (config3 & MIPS_CONF3_VZ) c->ases |= MIPS_ASE_VZ; return config3 & MIPS_CONF_M; } static inline unsigned int decode_config4(struct cpuinfo_mips *c) { unsigned int config4; config4 = read_c0_config4(); if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT && cpu_has_tlb) c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; c->kscratch_mask = (config4 >> 16) & 0xff; return config4 & MIPS_CONF_M; } static void __cpuinit decode_configs(struct cpuinfo_mips *c) { int ok; /* MIPS32 or MIPS64 compliant CPU. */ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; c->scache.flags = MIPS_CACHE_NOT_PRESENT; ok = decode_config0(c); /* Read Config registers. */ BUG_ON(!ok); /* Arch spec violation! */ if (ok) ok = decode_config1(c); if (ok) ok = decode_config2(c); if (ok) ok = decode_config3(c); if (ok) ok = decode_config4(c); mips_probe_watch_registers(c); if (cpu_has_mips_r2) c->core = read_c0_ebase() & 0x3ff; } #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ | MIPS_CPU_COUNTER) static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) { switch (c->processor_id & 0xff00) { case PRID_IMP_R2000: c->cputype = CPU_R2000; __cpu_name[cpu] = "R2000"; set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) c->options |= MIPS_CPU_FPU; c->tlbsize = 64; break; case PRID_IMP_R3000: if ((c->processor_id & 0xff) == PRID_REV_R3000A) { if (cpu_has_confreg()) { c->cputype = CPU_R3081E; __cpu_name[cpu] = "R3081"; } else { c->cputype = CPU_R3000A; __cpu_name[cpu] = "R3000A"; } } else { c->cputype = CPU_R3000; __cpu_name[cpu] = "R3000"; } set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) c->options |= MIPS_CPU_FPU; c->tlbsize = 64; break; case PRID_IMP_R4000: if (read_c0_config() & CONF_SC) { if ((c->processor_id & 0xff) >= PRID_REV_R4400) { c->cputype = CPU_R4400PC; __cpu_name[cpu] = "R4400PC"; } else { c->cputype = CPU_R4000PC; __cpu_name[cpu] = "R4000PC"; } } else { if ((c->processor_id & 0xff) >= PRID_REV_R4400) { c->cputype = CPU_R4400SC; __cpu_name[cpu] = "R4400SC"; } else { c->cputype = CPU_R4000SC; __cpu_name[cpu] = "R4000SC"; } } set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_WATCH | MIPS_CPU_VCE | MIPS_CPU_LLSC; c->tlbsize = 48; break; case PRID_IMP_VR41XX: set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS; c->tlbsize = 32; switch (c->processor_id & 0xf0) { case PRID_REV_VR4111: c->cputype = CPU_VR4111; __cpu_name[cpu] = "NEC VR4111"; break; case PRID_REV_VR4121: c->cputype = CPU_VR4121; __cpu_name[cpu] = "NEC VR4121"; break; case PRID_REV_VR4122: if ((c->processor_id & 0xf) < 0x3) { c->cputype = CPU_VR4122; __cpu_name[cpu] = "NEC VR4122"; } else { c->cputype = CPU_VR4181A; __cpu_name[cpu] = "NEC VR4181A"; } break; case PRID_REV_VR4130: if ((c->processor_id & 0xf) < 0x4) { c->cputype = CPU_VR4131; __cpu_name[cpu] = "NEC VR4131"; } else { c->cputype = CPU_VR4133; c->options |= MIPS_CPU_LLSC; __cpu_name[cpu] = "NEC VR4133"; } break; default: printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); c->cputype = CPU_VR41XX; __cpu_name[cpu] = "NEC Vr41xx"; break; } break; case PRID_IMP_R4300: c->cputype = CPU_R4300; __cpu_name[cpu] = "R4300"; set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 32; break; case PRID_IMP_R4600: c->cputype = CPU_R4600; __cpu_name[cpu] = "R4600"; set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 48; break; #if 0 case PRID_IMP_R4650: /* * This processor doesn't have an MMU, so it's not * "real easy" to run Linux on it. It is left purely * for documentation. Commented out because it shares * it's c0_prid id number with the TX3900. */ c->cputype = CPU_R4650; __cpu_name[cpu] = "R4650"; set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; c->tlbsize = 48; break; #endif case PRID_IMP_TX39: set_isa(c, MIPS_CPU_ISA_I); c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { c->cputype = CPU_TX3927; __cpu_name[cpu] = "TX3927"; c->tlbsize = 64; } else { switch (c->processor_id & 0xff) { case PRID_REV_TX3912: c->cputype = CPU_TX3912; __cpu_name[cpu] = "TX3912"; c->tlbsize = 32; break; case PRID_REV_TX3922: c->cputype = CPU_TX3922; __cpu_name[cpu] = "TX3922"; c->tlbsize = 64; break; } } break; case PRID_IMP_R4700: c->cputype = CPU_R4700; __cpu_name[cpu] = "R4700"; set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 48; break; case PRID_IMP_TX49: c->cputype = CPU_TX49XX; __cpu_name[cpu] = "R49XX"; set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_LLSC; if (!(c->processor_id & 0x08)) c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR; c->tlbsize = 48; break; case PRID_IMP_R5000: c->cputype = CPU_R5000; __cpu_name[cpu] = "R5000"; set_isa(c, MIPS_CPU_ISA_IV); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 48; break; case PRID_IMP_R5432: c->cputype = CPU_R5432; __cpu_name[cpu] = "R5432"; set_isa(c, MIPS_CPU_ISA_IV); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_WATCH | MIPS_CPU_LLSC; c->tlbsize = 48; break; case PRID_IMP_R5500: c->cputype = CPU_R5500; __cpu_name[cpu] = "R5500"; set_isa(c, MIPS_CPU_ISA_IV); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_WATCH | MIPS_CPU_LLSC; c->tlbsize = 48; break; case PRID_IMP_NEVADA: c->cputype = CPU_NEVADA; __cpu_name[cpu] = "Nevada"; set_isa(c, MIPS_CPU_ISA_IV); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_DIVEC | MIPS_CPU_LLSC; c->tlbsize = 48; break; case PRID_IMP_R6000: c->cputype = CPU_R6000; __cpu_name[cpu] = "R6000"; set_isa(c, MIPS_CPU_ISA_II); c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | MIPS_CPU_LLSC; c->tlbsize = 32; break; case PRID_IMP_R6000A: c->cputype = CPU_R6000A; __cpu_name[cpu] = "R6000A"; set_isa(c, MIPS_CPU_ISA_II); c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | MIPS_CPU_LLSC; c->tlbsize = 32; break; case PRID_IMP_RM7000: c->cputype = CPU_RM7000; __cpu_name[cpu] = "RM7000"; set_isa(c, MIPS_CPU_ISA_IV); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; /* * Undocumented RM7000: Bit 29 in the info register of * the RM7000 v2.0 indicates if the TLB has 48 or 64 * entries. * * 29 1 => 64 entry JTLB * 0 => 48 entry JTLB */ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; break; case PRID_IMP_RM9000: c->cputype = CPU_RM9000; __cpu_name[cpu] = "RM9000"; set_isa(c, MIPS_CPU_ISA_IV); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; /* * Bit 29 in the info register of the RM9000 * indicates if the TLB has 48 or 64 entries. * * 29 1 => 64 entry JTLB * 0 => 48 entry JTLB */ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; break; case PRID_IMP_R8000: c->cputype = CPU_R8000; __cpu_name[cpu] = "RM8000"; set_isa(c, MIPS_CPU_ISA_IV); c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 384; /* has weird TLB: 3-way x 128 */ break; case PRID_IMP_R10000: c->cputype = CPU_R10000; __cpu_name[cpu] = "R10000"; set_isa(c, MIPS_CPU_ISA_IV); c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | MIPS_CPU_LLSC; c->tlbsize = 64; break; case PRID_IMP_R12000: c->cputype = CPU_R12000; __cpu_name[cpu] = "R12000"; set_isa(c, MIPS_CPU_ISA_IV); c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | MIPS_CPU_LLSC; c->tlbsize = 64; break; case PRID_IMP_R14000: c->cputype = CPU_R14000; __cpu_name[cpu] = "R14000"; set_isa(c, MIPS_CPU_ISA_IV); c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | MIPS_CPU_LLSC; c->tlbsize = 64; break; case PRID_IMP_LOONGSON2: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; switch (c->processor_id & PRID_REV_MASK) { case PRID_REV_LOONGSON2E: set_elf_platform(cpu, "loongson2e"); break; case PRID_REV_LOONGSON2F: set_elf_platform(cpu, "loongson2f"); break; } set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | MIPS_CPU_32FPR; c->tlbsize = 64; break; case PRID_IMP_LOONGSON1: decode_configs(c); c->cputype = CPU_LOONGSON1; switch (c->processor_id & PRID_REV_MASK) { case PRID_REV_LOONGSON1B: __cpu_name[cpu] = "Loongson 1B"; break; } break; } } static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { case PRID_IMP_4KC: c->cputype = CPU_4KC; __cpu_name[cpu] = "MIPS 4Kc"; break; case PRID_IMP_4KEC: case PRID_IMP_4KECR2: c->cputype = CPU_4KEC; __cpu_name[cpu] = "MIPS 4KEc"; break; case PRID_IMP_4KSC: case PRID_IMP_4KSD: c->cputype = CPU_4KSC; __cpu_name[cpu] = "MIPS 4KSc"; break; case PRID_IMP_5KC: c->cputype = CPU_5KC; __cpu_name[cpu] = "MIPS 5Kc"; break; case PRID_IMP_5KE: c->cputype = CPU_5KE; __cpu_name[cpu] = "MIPS 5KE"; break; case PRID_IMP_20KC: c->cputype = CPU_20KC; __cpu_name[cpu] = "MIPS 20Kc"; break; case PRID_IMP_24K: c->cputype = CPU_24K; __cpu_name[cpu] = "MIPS 24Kc"; break; case PRID_IMP_24KE: c->cputype = CPU_24K; __cpu_name[cpu] = "MIPS 24KEc"; break; case PRID_IMP_25KF: c->cputype = CPU_25KF; __cpu_name[cpu] = "MIPS 25Kc"; break; case PRID_IMP_34K: c->cputype = CPU_34K; __cpu_name[cpu] = "MIPS 34Kc"; break; case PRID_IMP_74K: c->cputype = CPU_74K; __cpu_name[cpu] = "MIPS 74Kc"; break; case PRID_IMP_M14KC: c->cputype = CPU_M14KC; __cpu_name[cpu] = "MIPS M14Kc"; break; case PRID_IMP_M14KEC: c->cputype = CPU_M14KEC; __cpu_name[cpu] = "MIPS M14KEc"; break; case PRID_IMP_1004K: c->cputype = CPU_1004K; __cpu_name[cpu] = "MIPS 1004Kc"; break; case PRID_IMP_1074K: c->cputype = CPU_74K; __cpu_name[cpu] = "MIPS 1074Kc"; break; } spram_config(); } static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { case PRID_IMP_AU1_REV1: case PRID_IMP_AU1_REV2: c->cputype = CPU_ALCHEMY; switch ((c->processor_id >> 24) & 0xff) { case 0: __cpu_name[cpu] = "Au1000"; break; case 1: __cpu_name[cpu] = "Au1500"; break; case 2: __cpu_name[cpu] = "Au1100"; break; case 3: __cpu_name[cpu] = "Au1550"; break; case 4: __cpu_name[cpu] = "Au1200"; if ((c->processor_id & 0xff) == 2) __cpu_name[cpu] = "Au1250"; break; case 5: __cpu_name[cpu] = "Au1210"; break; default: __cpu_name[cpu] = "Au1xxx"; break; } break; } } static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { case PRID_IMP_SB1: c->cputype = CPU_SB1; __cpu_name[cpu] = "SiByte SB1"; /* FPU in pass1 is known to have issues. */ if ((c->processor_id & 0xff) < 0x02) c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); break; case PRID_IMP_SB1A: c->cputype = CPU_SB1A; __cpu_name[cpu] = "SiByte SB1A"; break; } } static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { case PRID_IMP_SR71000: c->cputype = CPU_SR71000; __cpu_name[cpu] = "Sandcraft SR71000"; c->scache.ways = 8; c->tlbsize = 64; break; } } static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { case PRID_IMP_PR4450: c->cputype = CPU_PR4450; __cpu_name[cpu] = "Philips PR4450"; set_isa(c, MIPS_CPU_ISA_M32R1); break; } } static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { case PRID_IMP_BMIPS32_REV4: case PRID_IMP_BMIPS32_REV8: c->cputype = CPU_BMIPS32; __cpu_name[cpu] = "Broadcom BMIPS32"; set_elf_platform(cpu, "bmips32"); break; case PRID_IMP_BMIPS3300: case PRID_IMP_BMIPS3300_ALT: case PRID_IMP_BMIPS3300_BUG: c->cputype = CPU_BMIPS3300; __cpu_name[cpu] = "Broadcom BMIPS3300"; set_elf_platform(cpu, "bmips3300"); break; case PRID_IMP_BMIPS43XX: { int rev = c->processor_id & 0xff; if (rev >= PRID_REV_BMIPS4380_LO && rev <= PRID_REV_BMIPS4380_HI) { c->cputype = CPU_BMIPS4380; __cpu_name[cpu] = "Broadcom BMIPS4380"; set_elf_platform(cpu, "bmips4380"); } else { c->cputype = CPU_BMIPS4350; __cpu_name[cpu] = "Broadcom BMIPS4350"; set_elf_platform(cpu, "bmips4350"); } break; } case PRID_IMP_BMIPS5000: c->cputype = CPU_BMIPS5000; __cpu_name[cpu] = "Broadcom BMIPS5000"; set_elf_platform(cpu, "bmips5000"); c->options |= MIPS_CPU_ULRI; break; } } static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { case PRID_IMP_CAVIUM_CN38XX: case PRID_IMP_CAVIUM_CN31XX: case PRID_IMP_CAVIUM_CN30XX: c->cputype = CPU_CAVIUM_OCTEON; __cpu_name[cpu] = "Cavium Octeon"; goto platform; case PRID_IMP_CAVIUM_CN58XX: case PRID_IMP_CAVIUM_CN56XX: case PRID_IMP_CAVIUM_CN50XX: case PRID_IMP_CAVIUM_CN52XX: c->cputype = CPU_CAVIUM_OCTEON_PLUS; __cpu_name[cpu] = "Cavium Octeon+"; platform: set_elf_platform(cpu, "octeon"); break; case PRID_IMP_CAVIUM_CN61XX: case PRID_IMP_CAVIUM_CN63XX: case PRID_IMP_CAVIUM_CN66XX: case PRID_IMP_CAVIUM_CN68XX: c->cputype = CPU_CAVIUM_OCTEON2; __cpu_name[cpu] = "Cavium Octeon II"; set_elf_platform(cpu, "octeon2"); break; default: printk(KERN_INFO "Unknown Octeon chip!\n"); c->cputype = CPU_UNKNOWN; break; } } static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); /* JZRISC does not implement the CP0 counter. */ c->options &= ~MIPS_CPU_COUNTER; switch (c->processor_id & 0xff00) { case PRID_IMP_JZRISC: c->cputype = CPU_JZRISC; __cpu_name[cpu] = "Ingenic JZRISC"; break; default: panic("Unknown Ingenic Processor ID!"); break; } } static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) { decode_configs(c); if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) { c->cputype = CPU_ALCHEMY; __cpu_name[cpu] = "Au1300"; /* following stuff is not for Alchemy */ return; } c->options = (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_COUNTER | MIPS_CPU_DIVEC | MIPS_CPU_WATCH | MIPS_CPU_EJTAG | MIPS_CPU_LLSC); switch (c->processor_id & 0xff00) { case PRID_IMP_NETLOGIC_XLP8XX: case PRID_IMP_NETLOGIC_XLP3XX: c->cputype = CPU_XLP; __cpu_name[cpu] = "Netlogic XLP"; break; case PRID_IMP_NETLOGIC_XLR732: case PRID_IMP_NETLOGIC_XLR716: case PRID_IMP_NETLOGIC_XLR532: case PRID_IMP_NETLOGIC_XLR308: case PRID_IMP_NETLOGIC_XLR532C: case PRID_IMP_NETLOGIC_XLR516C: case PRID_IMP_NETLOGIC_XLR508C: case PRID_IMP_NETLOGIC_XLR308C: c->cputype = CPU_XLR; __cpu_name[cpu] = "Netlogic XLR"; break; case PRID_IMP_NETLOGIC_XLS608: case PRID_IMP_NETLOGIC_XLS408: case PRID_IMP_NETLOGIC_XLS404: case PRID_IMP_NETLOGIC_XLS208: case PRID_IMP_NETLOGIC_XLS204: case PRID_IMP_NETLOGIC_XLS108: case PRID_IMP_NETLOGIC_XLS104: case PRID_IMP_NETLOGIC_XLS616B: case PRID_IMP_NETLOGIC_XLS608B: case PRID_IMP_NETLOGIC_XLS416B: case PRID_IMP_NETLOGIC_XLS412B: case PRID_IMP_NETLOGIC_XLS408B: case PRID_IMP_NETLOGIC_XLS404B: c->cputype = CPU_XLR; __cpu_name[cpu] = "Netlogic XLS"; break; default: pr_info("Unknown Netlogic chip id [%02x]!\n", c->processor_id); c->cputype = CPU_XLR; break; } if (c->cputype == CPU_XLP) { set_isa(c, MIPS_CPU_ISA_M64R2); c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK); /* This will be updated again after all threads are woken up */ c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; } else { set_isa(c, MIPS_CPU_ISA_M64R1); c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; } } #ifdef CONFIG_64BIT /* For use by uaccess.h */ u64 __ua_limit; EXPORT_SYMBOL(__ua_limit); #endif const char *__cpu_name[NR_CPUS]; const char *__elf_platform; __cpuinit void cpu_probe(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int cpu = smp_processor_id(); c->processor_id = PRID_IMP_UNKNOWN; c->fpu_id = FPIR_IMP_NONE; c->cputype = CPU_UNKNOWN; c->processor_id = read_c0_prid(); switch (c->processor_id & 0xff0000) { case PRID_COMP_LEGACY: cpu_probe_legacy(c, cpu); break; case PRID_COMP_MIPS: cpu_probe_mips(c, cpu); break; case PRID_COMP_ALCHEMY: cpu_probe_alchemy(c, cpu); break; case PRID_COMP_SIBYTE: cpu_probe_sibyte(c, cpu); break; case PRID_COMP_BROADCOM: cpu_probe_broadcom(c, cpu); break; case PRID_COMP_SANDCRAFT: cpu_probe_sandcraft(c, cpu); break; case PRID_COMP_NXP: cpu_probe_nxp(c, cpu); break; case PRID_COMP_CAVIUM: cpu_probe_cavium(c, cpu); break; case PRID_COMP_INGENIC: cpu_probe_ingenic(c, cpu); break; case PRID_COMP_NETLOGIC: cpu_probe_netlogic(c, cpu); break; } BUG_ON(!__cpu_name[cpu]); BUG_ON(c->cputype == CPU_UNKNOWN); /* * Platform code can force the cpu type to optimize code * generation. In that case be sure the cpu type is correctly * manually setup otherwise it could trigger some nasty bugs. */ BUG_ON(current_cpu_type() != c->cputype); if (mips_fpu_disabled) c->options &= ~MIPS_CPU_FPU; if (mips_dsp_disabled) c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; } } if (cpu_has_mips_r2) { c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; /* R2 has Performance Counter Interrupt indicator */ c->options |= MIPS_CPU_PCI; } else c->srsets = 1; cpu_probe_vmbits(c); #ifdef CONFIG_64BIT if (cpu == 0) __ua_limit = ~((1ull << cpu_vmbits) - 1); #endif } __cpuinit void cpu_report(void) { struct cpuinfo_mips *c = &current_cpu_data; printk(KERN_INFO "CPU revision is: %08x (%s)\n", c->processor_id, cpu_name_string()); if (c->options & MIPS_CPU_FPU) printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); }
gpl-2.0
lujji/JXD-7800b-KK-kernel
drivers/media/video/c-qcam.c
2677
21453
/* * Video4Linux Colour QuickCam driver * Copyright 1997-2000 Philip Blundell <philb@gnu.org> * * Module parameters: * * parport=auto -- probe all parports (default) * parport=0 -- parport0 becomes qcam1 * parport=2,0,1 -- parports 2,0,1 are tried in that order * * probe=0 -- do no probing, assume camera is present * probe=1 -- use IEEE-1284 autoprobe data only (default) * probe=2 -- probe aggressively for cameras * * force_rgb=1 -- force data format to RGB (default is BGR) * * The parport parameter controls which parports will be scanned. * Scanning all parports causes some printers to print a garbage page. * -- March 14, 1999 Billy Donahue <billy@escape.com> * * Fixed data format to BGR, added force_rgb parameter. Added missing * parport_unregister_driver() on module removal. * -- May 28, 2000 Claudio Matsuoka <claudio@conectiva.com> */ #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/parport.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/jiffies.h> #include <linux/version.h> #include <linux/videodev2.h> #include <asm/uaccess.h> #include <media/v4l2-device.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> struct qcam { struct v4l2_device v4l2_dev; struct video_device vdev; struct pardevice *pdev; struct parport *pport; int width, height; int ccd_width, ccd_height; int mode; int contrast, brightness, whitebal; int top, left; unsigned int bidirectional; struct mutex lock; }; /* cameras maximum */ #define MAX_CAMS 4 /* The three possible QuickCam modes */ #define QC_MILLIONS 0x18 #define QC_BILLIONS 0x10 #define QC_THOUSANDS 0x08 /* with VIDEC compression (not supported) */ /* The three possible decimations */ #define QC_DECIMATION_1 0 #define QC_DECIMATION_2 2 #define QC_DECIMATION_4 4 #define BANNER "Colour QuickCam for Video4Linux v0.06" static int parport[MAX_CAMS] = { [1 ... MAX_CAMS-1] = -1 }; static int probe = 2; static int force_rgb; static int video_nr = -1; /* FIXME: parport=auto would never have worked, surely? --RR */ MODULE_PARM_DESC(parport, "parport=<auto|n[,n]...> for port detection method\n" "probe=<0|1|2> for camera detection method\n" "force_rgb=<0|1> for RGB data format (default BGR)"); module_param_array(parport, int, NULL, 0); module_param(probe, int, 0); module_param(force_rgb, bool, 0); module_param(video_nr, int, 0); static struct qcam *qcams[MAX_CAMS]; static unsigned int num_cams; static inline void qcam_set_ack(struct qcam *qcam, unsigned int i) { /* note: the QC specs refer to the PCAck pin by voltage, not software level. PC ports have builtin inverters. */ parport_frob_control(qcam->pport, 8, i ? 8 : 0); } static inline unsigned int qcam_ready1(struct qcam *qcam) { return (parport_read_status(qcam->pport) & 0x8) ? 1 : 0; } static inline unsigned int qcam_ready2(struct qcam *qcam) { return (parport_read_data(qcam->pport) & 0x1) ? 1 : 0; } static unsigned int qcam_await_ready1(struct qcam *qcam, int value) { struct v4l2_device *v4l2_dev = &qcam->v4l2_dev; unsigned long oldjiffies = jiffies; unsigned int i; for (oldjiffies = jiffies; time_before(jiffies, oldjiffies + msecs_to_jiffies(40));) if (qcam_ready1(qcam) == value) return 0; /* If the camera didn't respond within 1/25 second, poll slowly for a while. */ for (i = 0; i < 50; i++) { if (qcam_ready1(qcam) == value) return 0; msleep_interruptible(100); } /* Probably somebody pulled the plug out. Not much we can do. */ v4l2_err(v4l2_dev, "ready1 timeout (%d) %x %x\n", value, parport_read_status(qcam->pport), parport_read_control(qcam->pport)); return 1; } static unsigned int qcam_await_ready2(struct qcam *qcam, int value) { struct v4l2_device *v4l2_dev = &qcam->v4l2_dev; unsigned long oldjiffies = jiffies; unsigned int i; for (oldjiffies = jiffies; time_before(jiffies, oldjiffies + msecs_to_jiffies(40));) if (qcam_ready2(qcam) == value) return 0; /* If the camera didn't respond within 1/25 second, poll slowly for a while. */ for (i = 0; i < 50; i++) { if (qcam_ready2(qcam) == value) return 0; msleep_interruptible(100); } /* Probably somebody pulled the plug out. Not much we can do. */ v4l2_err(v4l2_dev, "ready2 timeout (%d) %x %x %x\n", value, parport_read_status(qcam->pport), parport_read_control(qcam->pport), parport_read_data(qcam->pport)); return 1; } static int qcam_read_data(struct qcam *qcam) { unsigned int idata; qcam_set_ack(qcam, 0); if (qcam_await_ready1(qcam, 1)) return -1; idata = parport_read_status(qcam->pport) & 0xf0; qcam_set_ack(qcam, 1); if (qcam_await_ready1(qcam, 0)) return -1; idata |= parport_read_status(qcam->pport) >> 4; return idata; } static int qcam_write_data(struct qcam *qcam, unsigned int data) { struct v4l2_device *v4l2_dev = &qcam->v4l2_dev; unsigned int idata; parport_write_data(qcam->pport, data); idata = qcam_read_data(qcam); if (data != idata) { v4l2_warn(v4l2_dev, "sent %x but received %x\n", data, idata); return 1; } return 0; } static inline int qcam_set(struct qcam *qcam, unsigned int cmd, unsigned int data) { if (qcam_write_data(qcam, cmd)) return -1; if (qcam_write_data(qcam, data)) return -1; return 0; } static inline int qcam_get(struct qcam *qcam, unsigned int cmd) { if (qcam_write_data(qcam, cmd)) return -1; return qcam_read_data(qcam); } static int qc_detect(struct qcam *qcam) { unsigned int stat, ostat, i, count = 0; /* The probe routine below is not very reliable. The IEEE-1284 probe takes precedence. */ /* XXX Currently parport provides no way to distinguish between "the IEEE probe was not done" and "the probe was done, but no device was found". Fix this one day. */ if (qcam->pport->probe_info[0].class == PARPORT_CLASS_MEDIA && qcam->pport->probe_info[0].model && !strcmp(qcam->pdev->port->probe_info[0].model, "Color QuickCam 2.0")) { printk(KERN_DEBUG "QuickCam: Found by IEEE1284 probe.\n"); return 1; } if (probe < 2) return 0; parport_write_control(qcam->pport, 0xc); /* look for a heartbeat */ ostat = stat = parport_read_status(qcam->pport); for (i = 0; i < 250; i++) { mdelay(1); stat = parport_read_status(qcam->pport); if (ostat != stat) { if (++count >= 3) return 1; ostat = stat; } } /* Reset the camera and try again */ parport_write_control(qcam->pport, 0xc); parport_write_control(qcam->pport, 0x8); mdelay(1); parport_write_control(qcam->pport, 0xc); mdelay(1); count = 0; ostat = stat = parport_read_status(qcam->pport); for (i = 0; i < 250; i++) { mdelay(1); stat = parport_read_status(qcam->pport); if (ostat != stat) { if (++count >= 3) return 1; ostat = stat; } } /* no (or flatline) camera, give up */ return 0; } static void qc_reset(struct qcam *qcam) { parport_write_control(qcam->pport, 0xc); parport_write_control(qcam->pport, 0x8); mdelay(1); parport_write_control(qcam->pport, 0xc); mdelay(1); } /* Reset the QuickCam and program for brightness, contrast, * white-balance, and resolution. */ static void qc_setup(struct qcam *qcam) { qc_reset(qcam); /* Set the brightness. */ qcam_set(qcam, 11, qcam->brightness); /* Set the height and width. These refer to the actual CCD area *before* applying the selected decimation. */ qcam_set(qcam, 17, qcam->ccd_height); qcam_set(qcam, 19, qcam->ccd_width / 2); /* Set top and left. */ qcam_set(qcam, 0xd, qcam->top); qcam_set(qcam, 0xf, qcam->left); /* Set contrast and white balance. */ qcam_set(qcam, 0x19, qcam->contrast); qcam_set(qcam, 0x1f, qcam->whitebal); /* Set the speed. */ qcam_set(qcam, 45, 2); } /* Read some bytes from the camera and put them in the buffer. nbytes should be a multiple of 3, because bidirectional mode gives us three bytes at a time. */ static unsigned int qcam_read_bytes(struct qcam *qcam, unsigned char *buf, unsigned int nbytes) { unsigned int bytes = 0; qcam_set_ack(qcam, 0); if (qcam->bidirectional) { /* It's a bidirectional port */ while (bytes < nbytes) { unsigned int lo1, hi1, lo2, hi2; unsigned char r, g, b; if (qcam_await_ready2(qcam, 1)) return bytes; lo1 = parport_read_data(qcam->pport) >> 1; hi1 = ((parport_read_status(qcam->pport) >> 3) & 0x1f) ^ 0x10; qcam_set_ack(qcam, 1); if (qcam_await_ready2(qcam, 0)) return bytes; lo2 = parport_read_data(qcam->pport) >> 1; hi2 = ((parport_read_status(qcam->pport) >> 3) & 0x1f) ^ 0x10; qcam_set_ack(qcam, 0); r = lo1 | ((hi1 & 1) << 7); g = ((hi1 & 0x1e) << 3) | ((hi2 & 0x1e) >> 1); b = lo2 | ((hi2 & 1) << 7); if (force_rgb) { buf[bytes++] = r; buf[bytes++] = g; buf[bytes++] = b; } else { buf[bytes++] = b; buf[bytes++] = g; buf[bytes++] = r; } } } else { /* It's a unidirectional port */ int i = 0, n = bytes; unsigned char rgb[3]; while (bytes < nbytes) { unsigned int hi, lo; if (qcam_await_ready1(qcam, 1)) return bytes; hi = (parport_read_status(qcam->pport) & 0xf0); qcam_set_ack(qcam, 1); if (qcam_await_ready1(qcam, 0)) return bytes; lo = (parport_read_status(qcam->pport) & 0xf0); qcam_set_ack(qcam, 0); /* flip some bits */ rgb[(i = bytes++ % 3)] = (hi | (lo >> 4)) ^ 0x88; if (i >= 2) { get_fragment: if (force_rgb) { buf[n++] = rgb[0]; buf[n++] = rgb[1]; buf[n++] = rgb[2]; } else { buf[n++] = rgb[2]; buf[n++] = rgb[1]; buf[n++] = rgb[0]; } } } if (i) { i = 0; goto get_fragment; } } return bytes; } #define BUFSZ 150 static long qc_capture(struct qcam *qcam, char __user *buf, unsigned long len) { struct v4l2_device *v4l2_dev = &qcam->v4l2_dev; unsigned lines, pixelsperline, bitsperxfer; unsigned int is_bi_dir = qcam->bidirectional; size_t wantlen, outptr = 0; char tmpbuf[BUFSZ]; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; /* Wait for camera to become ready */ for (;;) { int i = qcam_get(qcam, 41); if (i == -1) { qc_setup(qcam); return -EIO; } if ((i & 0x80) == 0) break; schedule(); } if (qcam_set(qcam, 7, (qcam->mode | (is_bi_dir ? 1 : 0)) + 1)) return -EIO; lines = qcam->height; pixelsperline = qcam->width; bitsperxfer = (is_bi_dir) ? 24 : 8; if (is_bi_dir) { /* Turn the port around */ parport_data_reverse(qcam->pport); mdelay(3); qcam_set_ack(qcam, 0); if (qcam_await_ready1(qcam, 1)) { qc_setup(qcam); return -EIO; } qcam_set_ack(qcam, 1); if (qcam_await_ready1(qcam, 0)) { qc_setup(qcam); return -EIO; } } wantlen = lines * pixelsperline * 24 / 8; while (wantlen) { size_t t, s; s = (wantlen > BUFSZ) ? BUFSZ : wantlen; t = qcam_read_bytes(qcam, tmpbuf, s); if (outptr < len) { size_t sz = len - outptr; if (sz > t) sz = t; if (__copy_to_user(buf + outptr, tmpbuf, sz)) break; outptr += sz; } wantlen -= t; if (t < s) break; cond_resched(); } len = outptr; if (wantlen) { v4l2_err(v4l2_dev, "short read.\n"); if (is_bi_dir) parport_data_forward(qcam->pport); qc_setup(qcam); return len; } if (is_bi_dir) { int l; do { l = qcam_read_bytes(qcam, tmpbuf, 3); cond_resched(); } while (l && (tmpbuf[0] == 0x7e || tmpbuf[1] == 0x7e || tmpbuf[2] == 0x7e)); if (force_rgb) { if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf) v4l2_err(v4l2_dev, "bad EOF\n"); } else { if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe) v4l2_err(v4l2_dev, "bad EOF\n"); } qcam_set_ack(qcam, 0); if (qcam_await_ready1(qcam, 1)) { v4l2_err(v4l2_dev, "no ack after EOF\n"); parport_data_forward(qcam->pport); qc_setup(qcam); return len; } parport_data_forward(qcam->pport); mdelay(3); qcam_set_ack(qcam, 1); if (qcam_await_ready1(qcam, 0)) { v4l2_err(v4l2_dev, "no ack to port turnaround\n"); qc_setup(qcam); return len; } } else { int l; do { l = qcam_read_bytes(qcam, tmpbuf, 1); cond_resched(); } while (l && tmpbuf[0] == 0x7e); l = qcam_read_bytes(qcam, tmpbuf + 1, 2); if (force_rgb) { if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf) v4l2_err(v4l2_dev, "bad EOF\n"); } else { if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe) v4l2_err(v4l2_dev, "bad EOF\n"); } } qcam_write_data(qcam, 0); return len; } /* * Video4linux interfacing */ static int qcam_querycap(struct file *file, void *priv, struct v4l2_capability *vcap) { struct qcam *qcam = video_drvdata(file); strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver)); strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card)); strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info)); vcap->version = KERNEL_VERSION(0, 0, 3); vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE; return 0; } static int qcam_enum_input(struct file *file, void *fh, struct v4l2_input *vin) { if (vin->index > 0) return -EINVAL; strlcpy(vin->name, "Camera", sizeof(vin->name)); vin->type = V4L2_INPUT_TYPE_CAMERA; vin->audioset = 0; vin->tuner = 0; vin->std = 0; vin->status = 0; return 0; } static int qcam_g_input(struct file *file, void *fh, unsigned int *inp) { *inp = 0; return 0; } static int qcam_s_input(struct file *file, void *fh, unsigned int inp) { return (inp > 0) ? -EINVAL : 0; } static int qcam_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { switch (qc->id) { case V4L2_CID_BRIGHTNESS: return v4l2_ctrl_query_fill(qc, 0, 255, 1, 240); case V4L2_CID_CONTRAST: return v4l2_ctrl_query_fill(qc, 0, 255, 1, 192); case V4L2_CID_GAMMA: return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128); } return -EINVAL; } static int qcam_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct qcam *qcam = video_drvdata(file); int ret = 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ctrl->value = qcam->brightness; break; case V4L2_CID_CONTRAST: ctrl->value = qcam->contrast; break; case V4L2_CID_GAMMA: ctrl->value = qcam->whitebal; break; default: ret = -EINVAL; break; } return ret; } static int qcam_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct qcam *qcam = video_drvdata(file); int ret = 0; mutex_lock(&qcam->lock); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: qcam->brightness = ctrl->value; break; case V4L2_CID_CONTRAST: qcam->contrast = ctrl->value; break; case V4L2_CID_GAMMA: qcam->whitebal = ctrl->value; break; default: ret = -EINVAL; break; } if (ret == 0) { parport_claim_or_block(qcam->pdev); qc_setup(qcam); parport_release(qcam->pdev); } mutex_unlock(&qcam->lock); return ret; } static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct qcam *qcam = video_drvdata(file); struct v4l2_pix_format *pix = &fmt->fmt.pix; pix->width = qcam->width; pix->height = qcam->height; pix->pixelformat = V4L2_PIX_FMT_RGB24; pix->field = V4L2_FIELD_NONE; pix->bytesperline = 3 * qcam->width; pix->sizeimage = 3 * qcam->width * qcam->height; /* Just a guess */ pix->colorspace = V4L2_COLORSPACE_SRGB; return 0; } static int qcam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_pix_format *pix = &fmt->fmt.pix; if (pix->height < 60 || pix->width < 80) { pix->height = 60; pix->width = 80; } else if (pix->height < 120 || pix->width < 160) { pix->height = 120; pix->width = 160; } else { pix->height = 240; pix->width = 320; } pix->pixelformat = V4L2_PIX_FMT_RGB24; pix->field = V4L2_FIELD_NONE; pix->bytesperline = 3 * pix->width; pix->sizeimage = 3 * pix->width * pix->height; /* Just a guess */ pix->colorspace = V4L2_COLORSPACE_SRGB; return 0; } static int qcam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct qcam *qcam = video_drvdata(file); struct v4l2_pix_format *pix = &fmt->fmt.pix; int ret = qcam_try_fmt_vid_cap(file, fh, fmt); if (ret) return ret; switch (pix->height) { case 60: qcam->mode = QC_DECIMATION_4; break; case 120: qcam->mode = QC_DECIMATION_2; break; default: qcam->mode = QC_DECIMATION_1; break; } mutex_lock(&qcam->lock); qcam->mode |= QC_MILLIONS; qcam->height = pix->height; qcam->width = pix->width; parport_claim_or_block(qcam->pdev); qc_setup(qcam); parport_release(qcam->pdev); mutex_unlock(&qcam->lock); return 0; } static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static struct v4l2_fmtdesc formats[] = { { 0, 0, 0, "RGB 8:8:8", V4L2_PIX_FMT_RGB24, { 0, 0, 0, 0 } }, }; enum v4l2_buf_type type = fmt->type; if (fmt->index > 0) return -EINVAL; *fmt = formats[fmt->index]; fmt->type = type; return 0; } static ssize_t qcam_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct qcam *qcam = video_drvdata(file); int len; mutex_lock(&qcam->lock); parport_claim_or_block(qcam->pdev); /* Probably should have a semaphore against multiple users */ len = qc_capture(qcam, buf, count); parport_release(qcam->pdev); mutex_unlock(&qcam->lock); return len; } static const struct v4l2_file_operations qcam_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .read = qcam_read, }; static const struct v4l2_ioctl_ops qcam_ioctl_ops = { .vidioc_querycap = qcam_querycap, .vidioc_g_input = qcam_g_input, .vidioc_s_input = qcam_s_input, .vidioc_enum_input = qcam_enum_input, .vidioc_queryctrl = qcam_queryctrl, .vidioc_g_ctrl = qcam_g_ctrl, .vidioc_s_ctrl = qcam_s_ctrl, .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap, }; /* Initialize the QuickCam driver control structure. */ static struct qcam *qcam_init(struct parport *port) { struct qcam *qcam; struct v4l2_device *v4l2_dev; qcam = kzalloc(sizeof(*qcam), GFP_KERNEL); if (qcam == NULL) return NULL; v4l2_dev = &qcam->v4l2_dev; strlcpy(v4l2_dev->name, "c-qcam", sizeof(v4l2_dev->name)); if (v4l2_device_register(NULL, v4l2_dev) < 0) { v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); return NULL; } qcam->pport = port; qcam->pdev = parport_register_device(port, "c-qcam", NULL, NULL, NULL, 0, NULL); qcam->bidirectional = (qcam->pport->modes & PARPORT_MODE_TRISTATE) ? 1 : 0; if (qcam->pdev == NULL) { v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name); kfree(qcam); return NULL; } strlcpy(qcam->vdev.name, "Colour QuickCam", sizeof(qcam->vdev.name)); qcam->vdev.v4l2_dev = v4l2_dev; qcam->vdev.fops = &qcam_fops; qcam->vdev.ioctl_ops = &qcam_ioctl_ops; qcam->vdev.release = video_device_release_empty; video_set_drvdata(&qcam->vdev, qcam); mutex_init(&qcam->lock); qcam->width = qcam->ccd_width = 320; qcam->height = qcam->ccd_height = 240; qcam->mode = QC_MILLIONS | QC_DECIMATION_1; qcam->contrast = 192; qcam->brightness = 240; qcam->whitebal = 128; qcam->top = 1; qcam->left = 14; return qcam; } static int init_cqcam(struct parport *port) { struct qcam *qcam; struct v4l2_device *v4l2_dev; if (parport[0] != -1) { /* The user gave specific instructions */ int i, found = 0; for (i = 0; i < MAX_CAMS && parport[i] != -1; i++) { if (parport[0] == port->number) found = 1; } if (!found) return -ENODEV; } if (num_cams == MAX_CAMS) return -ENOSPC; qcam = qcam_init(port); if (qcam == NULL) return -ENODEV; v4l2_dev = &qcam->v4l2_dev; parport_claim_or_block(qcam->pdev); qc_reset(qcam); if (probe && qc_detect(qcam) == 0) { parport_release(qcam->pdev); parport_unregister_device(qcam->pdev); kfree(qcam); return -ENODEV; } qc_setup(qcam); parport_release(qcam->pdev); if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) { v4l2_err(v4l2_dev, "Unable to register Colour QuickCam on %s\n", qcam->pport->name); parport_unregister_device(qcam->pdev); kfree(qcam); return -ENODEV; } v4l2_info(v4l2_dev, "%s: Colour QuickCam found on %s\n", video_device_node_name(&qcam->vdev), qcam->pport->name); qcams[num_cams++] = qcam; return 0; } static void close_cqcam(struct qcam *qcam) { video_unregister_device(&qcam->vdev); parport_unregister_device(qcam->pdev); kfree(qcam); } static void cq_attach(struct parport *port) { init_cqcam(port); } static void cq_detach(struct parport *port) { /* Write this some day. */ } static struct parport_driver cqcam_driver = { .name = "cqcam", .attach = cq_attach, .detach = cq_detach, }; static int __init cqcam_init(void) { printk(KERN_INFO BANNER "\n"); return parport_register_driver(&cqcam_driver); } static void __exit cqcam_cleanup(void) { unsigned int i; for (i = 0; i < num_cams; i++) close_cqcam(qcams[i]); parport_unregister_driver(&cqcam_driver); } MODULE_AUTHOR("Philip Blundell <philb@gnu.org>"); MODULE_DESCRIPTION(BANNER); MODULE_LICENSE("GPL"); module_init(cqcam_init); module_exit(cqcam_cleanup);
gpl-2.0
redyk/omap3630-3.0.8
drivers/net/wimax/i2400m/control.c
2933
43832
/* * Intel Wireless WiMAX Connection 2400m * Miscellaneous control functions for managing the device * * * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <linux-wimax@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * - Initial implementation * * This is a collection of functions used to control the device (plus * a few helpers). * * There are utilities for handling TLV buffers, hooks on the device's * reports to act on device changes of state [i2400m_report_hook()], * on acks to commands [i2400m_msg_ack_hook()], a helper for sending * commands to the device and blocking until a reply arrives * [i2400m_msg_to_dev()], a few high level commands for manipulating * the device state, powersving mode and configuration plus the * routines to setup the device once communication is stablished with * it [i2400m_dev_initialize()]. * * ROADMAP * * i2400m_dev_initialize() Called by i2400m_dev_start() * i2400m_set_init_config() * i2400m_cmd_get_state() * i2400m_dev_shutdown() Called by i2400m_dev_stop() * i2400m_reset() * * i2400m_{cmd,get,set}_*() * i2400m_msg_to_dev() * i2400m_msg_check_status() * * i2400m_report_hook() Called on reception of an event * i2400m_report_state_hook() * i2400m_tlv_buffer_walk() * i2400m_tlv_match() * i2400m_report_tlv_system_state() * i2400m_report_tlv_rf_switches_status() * i2400m_report_tlv_media_status() * i2400m_cmd_enter_powersave() * * i2400m_msg_ack_hook() Called on reception of a reply to a * command, get or set */ #include <stdarg.h> #include "i2400m.h" #include <linux/kernel.h> #include <linux/slab.h> #include <linux/wimax/i2400m.h> #define D_SUBMODULE control #include "debug-levels.h" static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */ module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644); MODULE_PARM_DESC(idle_mode_disabled, "If true, the device will not enable idle mode negotiation " "with the base station (when connected) to save power."); /* 0 (power saving enabled) by default */ static int i2400m_power_save_disabled; module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644); MODULE_PARM_DESC(power_save_disabled, "If true, the driver will not tell the device to enter " "power saving mode when it reports it is ready for it. " "False by default (so the device is told to do power " "saving)."); static int i2400m_passive_mode; /* 0 (passive mode disabled) by default */ module_param_named(passive_mode, i2400m_passive_mode, int, 0644); MODULE_PARM_DESC(passive_mode, "If true, the driver will not do any device setup " "and leave it up to user space, who must be properly " "setup."); /* * Return if a TLV is of a give type and size * * @tlv_hdr: pointer to the TLV * @tlv_type: type of the TLV we are looking for * @tlv_size: expected size of the TLV we are looking for (if -1, * don't check the size). This includes the header * Returns: 0 if the TLV matches * < 0 if it doesn't match at all * > 0 total TLV + payload size, if the type matches, but not * the size */ static ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv, enum i2400m_tlv tlv_type, ssize_t tlv_size) { if (le16_to_cpu(tlv->type) != tlv_type) /* Not our type? skip */ return -1; if (tlv_size != -1 && le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) { size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv); printk(KERN_WARNING "W: tlv type 0x%x mismatched because of " "size (got %zu vs %zu expected)\n", tlv_type, size, tlv_size); return size; } return 0; } /* * Given a buffer of TLVs, iterate over them * * @i2400m: device instance * @tlv_buf: pointer to the beginning of the TLV buffer * @buf_size: buffer size in bytes * @tlv_pos: seek position; this is assumed to be a pointer returned * by i2400m_tlv_buffer_walk() [and thus, validated]. The * TLV returned will be the one following this one. * * Usage: * * tlv_itr = NULL; * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr)) { * ... * // Do stuff with tlv_itr, DON'T MODIFY IT * ... * } */ static const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk( struct i2400m *i2400m, const void *tlv_buf, size_t buf_size, const struct i2400m_tlv_hdr *tlv_pos) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size; size_t offset, length, avail_size; unsigned type; if (tlv_pos == NULL) /* Take the first one? */ tlv_pos = tlv_buf; else /* Nope, the next one */ tlv_pos = (void *) tlv_pos + le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos); if (tlv_pos == tlv_top) { /* buffer done */ tlv_pos = NULL; goto error_beyond_end; } if (tlv_pos > tlv_top) { tlv_pos = NULL; WARN_ON(1); goto error_beyond_end; } offset = (void *) tlv_pos - (void *) tlv_buf; avail_size = buf_size - offset; if (avail_size < sizeof(*tlv_pos)) { dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: " "short header\n", tlv_buf, buf_size, offset); goto error_short_header; } type = le16_to_cpu(tlv_pos->type); length = le16_to_cpu(tlv_pos->length); if (avail_size < sizeof(*tlv_pos) + length) { dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], " "tlv type 0x%04x @%zu: " "short data (%zu bytes vs %zu needed)\n", tlv_buf, buf_size, type, offset, avail_size, sizeof(*tlv_pos) + length); goto error_short_header; } error_short_header: error_beyond_end: return tlv_pos; } /* * Find a TLV in a buffer of sequential TLVs * * @i2400m: device descriptor * @tlv_hdr: pointer to the first TLV in the sequence * @size: size of the buffer in bytes; all TLVs are assumed to fit * fully in the buffer (otherwise we'll complain). * @tlv_type: type of the TLV we are looking for * @tlv_size: expected size of the TLV we are looking for (if -1, * don't check the size). This includes the header * * Returns: NULL if the TLV is not found, otherwise a pointer to * it. If the sizes don't match, an error is printed and NULL * returned. */ static const struct i2400m_tlv_hdr *i2400m_tlv_find( struct i2400m *i2400m, const struct i2400m_tlv_hdr *tlv_hdr, size_t size, enum i2400m_tlv tlv_type, ssize_t tlv_size) { ssize_t match; struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv = NULL; while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) { match = i2400m_tlv_match(tlv, tlv_type, tlv_size); if (match == 0) /* found it :) */ break; if (match > 0) dev_warn(dev, "TLV type 0x%04x found with size " "mismatch (%zu vs %zu needed)\n", tlv_type, match, tlv_size); } return tlv; } static const struct { char *msg; int errno; } ms_to_errno[I2400M_MS_MAX] = { [I2400M_MS_DONE_OK] = { "", 0 }, [I2400M_MS_DONE_IN_PROGRESS] = { "", 0 }, [I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS }, [I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ }, [I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL }, [I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG }, [I2400M_MS_VERSION_ERROR] = { "bad version", -EIO }, [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, [I2400M_MS_BUSY] = { "busy", -EBUSY }, [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ }, [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, [I2400M_MS_NO_RF] = { "no RF", -EIO }, [I2400M_MS_NOT_READY_FOR_POWERSAVE] = { "not ready for powersave", -EACCES }, [I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT }, }; /* * i2400m_msg_check_status - translate a message's status code * * @i2400m: device descriptor * @l3l4_hdr: message header * @strbuf: buffer to place a formatted error message (unless NULL). * @strbuf_size: max amount of available space; larger messages will * be truncated. * * Returns: errno code corresponding to the status code in @l3l4_hdr * and a message in @strbuf describing the error. */ int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr, char *strbuf, size_t strbuf_size) { int result; enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status); const char *str; if (status == 0) return 0; if (status >= ARRAY_SIZE(ms_to_errno)) { str = "unknown status code"; result = -EBADR; } else { str = ms_to_errno[status].msg; result = ms_to_errno[status].errno; } if (strbuf) snprintf(strbuf, strbuf_size, "%s (%d)", str, status); return result; } /* * Act on a TLV System State reported by the device * * @i2400m: device descriptor * @ss: validated System State TLV */ static void i2400m_report_tlv_system_state(struct i2400m *i2400m, const struct i2400m_tlv_system_state *ss) { struct device *dev = i2400m_dev(i2400m); struct wimax_dev *wimax_dev = &i2400m->wimax_dev; enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state); d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state); if (i2400m->state != i2400m_state) { i2400m->state = i2400m_state; wake_up_all(&i2400m->state_wq); } switch (i2400m_state) { case I2400M_SS_UNINITIALIZED: case I2400M_SS_INIT: case I2400M_SS_CONFIG: case I2400M_SS_PRODUCTION: wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED); break; case I2400M_SS_RF_OFF: case I2400M_SS_RF_SHUTDOWN: wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF); break; case I2400M_SS_READY: case I2400M_SS_STANDBY: case I2400M_SS_SLEEPACTIVE: wimax_state_change(wimax_dev, WIMAX_ST_READY); break; case I2400M_SS_CONNECTING: case I2400M_SS_WIMAX_CONNECTED: wimax_state_change(wimax_dev, WIMAX_ST_READY); break; case I2400M_SS_SCAN: case I2400M_SS_OUT_OF_ZONE: wimax_state_change(wimax_dev, WIMAX_ST_SCANNING); break; case I2400M_SS_IDLE: d_printf(1, dev, "entering BS-negotiated idle mode\n"); case I2400M_SS_DISCONNECTING: case I2400M_SS_DATA_PATH_CONNECTED: wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED); break; default: /* Huh? just in case, shut it down */ dev_err(dev, "HW BUG? unknown state %u: shutting down\n", i2400m_state); i2400m_reset(i2400m, I2400M_RT_WARM); break; } d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", i2400m, ss, i2400m_state); } /* * Parse and act on a TLV Media Status sent by the device * * @i2400m: device descriptor * @ms: validated Media Status TLV * * This will set the carrier up on down based on the device's link * report. This is done asides of what the WiMAX stack does based on * the device's state as sometimes we need to do a link-renew (the BS * wants us to renew a DHCP lease, for example). * * In fact, doc says that every time we get a link-up, we should do a * DHCP negotiation... */ static void i2400m_report_tlv_media_status(struct i2400m *i2400m, const struct i2400m_tlv_media_status *ms) { struct device *dev = i2400m_dev(i2400m); struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct net_device *net_dev = wimax_dev->net_dev; enum i2400m_media_status status = le32_to_cpu(ms->media_status); d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status); switch (status) { case I2400M_MEDIA_STATUS_LINK_UP: netif_carrier_on(net_dev); break; case I2400M_MEDIA_STATUS_LINK_DOWN: netif_carrier_off(net_dev); break; /* * This is the network telling us we need to retrain the DHCP * lease -- so far, we are trusting the WiMAX Network Service * in user space to pick this up and poke the DHCP client. */ case I2400M_MEDIA_STATUS_LINK_RENEW: netif_carrier_on(net_dev); break; default: dev_err(dev, "HW BUG? unknown media status %u\n", status); } d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n", i2400m, ms, status); } /* * Process a TLV from a 'state report' * * @i2400m: device descriptor * @tlv: pointer to the TLV header; it has been already validated for * consistent size. * @tag: for error messages * * Act on the TLVs from a 'state report'. */ static void i2400m_report_state_parse_tlv(struct i2400m *i2400m, const struct i2400m_tlv_hdr *tlv, const char *tag) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_media_status *ms; const struct i2400m_tlv_system_state *ss; const struct i2400m_tlv_rf_switches_status *rfss; if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) { ss = container_of(tlv, typeof(*ss), hdr); d_printf(2, dev, "%s: system state TLV " "found (0x%04x), state 0x%08x\n", tag, I2400M_TLV_SYSTEM_STATE, le32_to_cpu(ss->state)); i2400m_report_tlv_system_state(i2400m, ss); } if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) { rfss = container_of(tlv, typeof(*rfss), hdr); d_printf(2, dev, "%s: RF status TLV " "found (0x%04x), sw 0x%02x hw 0x%02x\n", tag, I2400M_TLV_RF_STATUS, le32_to_cpu(rfss->sw_rf_switch), le32_to_cpu(rfss->hw_rf_switch)); i2400m_report_tlv_rf_switches_status(i2400m, rfss); } if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) { ms = container_of(tlv, typeof(*ms), hdr); d_printf(2, dev, "%s: Media Status TLV: %u\n", tag, le32_to_cpu(ms->media_status)); i2400m_report_tlv_media_status(i2400m, ms); } } /* * Parse a 'state report' and extract information * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message (header + payload). The header length * declaration is assumed to be congruent with @size (as in * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) * * Walk over the TLVs in a report state and act on them. */ static void i2400m_report_state_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size, const char *tag) { struct device *dev = i2400m_dev(i2400m); const struct i2400m_tlv_hdr *tlv; size_t tlv_size = le16_to_cpu(l3l4_hdr->length); d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n", i2400m, l3l4_hdr, size, tag); tlv = NULL; while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl, tlv_size, tlv))) i2400m_report_state_parse_tlv(i2400m, tlv, tag); d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n", i2400m, l3l4_hdr, size, tag); } /* * i2400m_report_hook - (maybe) act on a report * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message (header + payload). The header length * declaration is assumed to be congruent with @size (as in * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) * * Extract information we might need (like carrien on/off) from a * device report. */ void i2400m_report_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size) { struct device *dev = i2400m_dev(i2400m); unsigned msg_type; d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n", i2400m, l3l4_hdr, size); /* Chew on the message, we might need some information from * here */ msg_type = le16_to_cpu(l3l4_hdr->type); switch (msg_type) { case I2400M_MT_REPORT_STATE: /* carrier detection... */ i2400m_report_state_hook(i2400m, l3l4_hdr, size, "REPORT STATE"); break; /* If the device is ready for power save, then ask it to do * it. */ case I2400M_MT_REPORT_POWERSAVE_READY: /* zzzzz */ if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) { if (i2400m_power_save_disabled) d_printf(1, dev, "ready for powersave, " "not requesting (disabled by module " "parameter)\n"); else { d_printf(1, dev, "ready for powersave, " "requesting\n"); i2400m_cmd_enter_powersave(i2400m); } } break; } d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n", i2400m, l3l4_hdr, size); } /* * i2400m_msg_ack_hook - process cmd/set/get ack for internal status * * @i2400m: device descriptor * @l3l4_hdr: pointer to message; it has been already validated for * consistent size. * @size: size of the message * * Extract information we might need from acks to commands and act on * it. This is akin to i2400m_report_hook(). Note most of this * processing should be done in the function that calls the * command. This is here for some cases where it can't happen... */ static void i2400m_msg_ack_hook(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size) { int result; struct device *dev = i2400m_dev(i2400m); unsigned ack_type, ack_status; char strerr[32]; /* Chew on the message, we might need some information from * here */ ack_type = le16_to_cpu(l3l4_hdr->type); ack_status = le16_to_cpu(l3l4_hdr->status); switch (ack_type) { case I2400M_MT_CMD_ENTER_POWERSAVE: /* This is just left here for the sake of example, as * the processing is done somewhere else. */ if (0) { result = i2400m_msg_check_status( l3l4_hdr, strerr, sizeof(strerr)); if (result >= 0) d_printf(1, dev, "ready for power save: %zd\n", size); } break; } } /* * i2400m_msg_size_check() - verify message size and header are congruent * * It is ok if the total message size is larger than the expected * size, as there can be padding. */ int i2400m_msg_size_check(struct i2400m *i2400m, const struct i2400m_l3l4_hdr *l3l4_hdr, size_t msg_size) { int result; struct device *dev = i2400m_dev(i2400m); size_t expected_size; d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n", i2400m, l3l4_hdr, msg_size); if (msg_size < sizeof(*l3l4_hdr)) { dev_err(dev, "bad size for message header " "(expected at least %zu, got %zu)\n", (size_t) sizeof(*l3l4_hdr), msg_size); result = -EIO; goto error_hdr_size; } expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr); if (msg_size < expected_size) { dev_err(dev, "bad size for message code 0x%04x (expected %zu, " "got %zu)\n", le16_to_cpu(l3l4_hdr->type), expected_size, msg_size); result = -EIO; } else result = 0; error_hdr_size: d_fnend(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n", i2400m, l3l4_hdr, msg_size, result); return result; } /* * Cancel a wait for a command ACK * * @i2400m: device descriptor * @code: [negative] errno code to cancel with (don't use * -EINPROGRESS) * * If there is an ack already filled out, free it. */ void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code) { struct sk_buff *ack_skb; unsigned long flags; spin_lock_irqsave(&i2400m->rx_lock, flags); ack_skb = i2400m->ack_skb; if (ack_skb && !IS_ERR(ack_skb)) kfree_skb(ack_skb); i2400m->ack_skb = ERR_PTR(code); spin_unlock_irqrestore(&i2400m->rx_lock, flags); } /** * i2400m_msg_to_dev - Send a control message to the device and get a response * * @i2400m: device descriptor * * @msg_skb: an skb * * * @buf: pointer to the buffer containing the message to be sent; it * has to start with a &struct i2400M_l3l4_hdr and then * followed by the payload. Once this function returns, the * buffer can be reused. * * @buf_len: buffer size * * Returns: * * Pointer to skb containing the ack message. You need to check the * pointer with IS_ERR(), as it might be an error code. Error codes * could happen because: * * - the message wasn't formatted correctly * - couldn't send the message * - failed waiting for a response * - the ack message wasn't formatted correctly * * The returned skb has been allocated with wimax_msg_to_user_alloc(), * it contains the response in a netlink attribute and is ready to be * passed up to user space with wimax_msg_to_user_send(). To access * the payload and its length, use wimax_msg_{data,len}() on the skb. * * The skb has to be freed with kfree_skb() once done. * * Description: * * This function delivers a message/command to the device and waits * for an ack to be received. The format is described in * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an * ack. * * This function will not check the ack status, that's left up to the * caller. Once done with the ack skb, it has to be kfree_skb()ed. * * The i2400m handles only one message at the same time, thus we need * the mutex to exclude other players. * * We write the message and then wait for an answer to come back. The * RX path intercepts control messages and handles them in * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed * locally and then forwarded (as needed) to user space on the WiMAX * stack message pipe. Acks are saved and passed back to us through an * skb in i2400m->ack_skb which is ready to be given to generic * netlink if need be. */ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m, const void *buf, size_t buf_len) { int result; struct device *dev = i2400m_dev(i2400m); const struct i2400m_l3l4_hdr *msg_l3l4_hdr; struct sk_buff *ack_skb; const struct i2400m_l3l4_hdr *ack_l3l4_hdr; size_t ack_len; int ack_timeout; unsigned msg_type; unsigned long flags; d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n", i2400m, buf, buf_len); rmb(); /* Make sure we see what i2400m_dev_reset_handle() */ if (i2400m->boot_mode) return ERR_PTR(-EL3RST); msg_l3l4_hdr = buf; /* Check msg & payload consistency */ result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len); if (result < 0) goto error_bad_msg; msg_type = le16_to_cpu(msg_l3l4_hdr->type); d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n", msg_type, buf_len); d_dump(2, dev, buf, buf_len); /* Setup the completion, ack_skb ("we are waiting") and send * the message to the device */ mutex_lock(&i2400m->msg_mutex); spin_lock_irqsave(&i2400m->rx_lock, flags); i2400m->ack_skb = ERR_PTR(-EINPROGRESS); spin_unlock_irqrestore(&i2400m->rx_lock, flags); init_completion(&i2400m->msg_completion); result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL); if (result < 0) { dev_err(dev, "can't send message 0x%04x: %d\n", le16_to_cpu(msg_l3l4_hdr->type), result); goto error_tx; } /* Some commands take longer to execute because of crypto ops, * so we give them some more leeway on timeout */ switch (msg_type) { case I2400M_MT_GET_TLS_OPERATION_RESULT: case I2400M_MT_CMD_SEND_EAP_RESPONSE: ack_timeout = 5 * HZ; break; default: ack_timeout = HZ; } if (unlikely(i2400m->trace_msg_from_user)) wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL); /* The RX path in rx.c will put any response for this message * in i2400m->ack_skb and wake us up. If we cancel the wait, * we need to change the value of i2400m->ack_skb to something * not -EINPROGRESS so RX knows there is no one waiting. */ result = wait_for_completion_interruptible_timeout( &i2400m->msg_completion, ack_timeout); if (result == 0) { dev_err(dev, "timeout waiting for reply to message 0x%04x\n", msg_type); result = -ETIMEDOUT; i2400m_msg_to_dev_cancel_wait(i2400m, result); goto error_wait_for_completion; } else if (result < 0) { dev_err(dev, "error waiting for reply to message 0x%04x: %d\n", msg_type, result); i2400m_msg_to_dev_cancel_wait(i2400m, result); goto error_wait_for_completion; } /* Pull out the ack data from i2400m->ack_skb -- see if it is * an error and act accordingly */ spin_lock_irqsave(&i2400m->rx_lock, flags); ack_skb = i2400m->ack_skb; if (IS_ERR(ack_skb)) result = PTR_ERR(ack_skb); else result = 0; i2400m->ack_skb = NULL; spin_unlock_irqrestore(&i2400m->rx_lock, flags); if (result < 0) goto error_ack_status; ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len); /* Check the ack and deliver it if it is ok */ if (unlikely(i2400m->trace_msg_from_user)) wimax_msg(&i2400m->wimax_dev, "echo", ack_l3l4_hdr, ack_len, GFP_KERNEL); result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len); if (result < 0) { dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n", msg_type, result); goto error_bad_ack_len; } if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) { dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n", le16_to_cpu(ack_l3l4_hdr->type), msg_type); result = -EIO; goto error_bad_ack_type; } i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len); mutex_unlock(&i2400m->msg_mutex); d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n", i2400m, buf, buf_len, ack_skb); return ack_skb; error_bad_ack_type: error_bad_ack_len: kfree_skb(ack_skb); error_ack_status: error_wait_for_completion: error_tx: mutex_unlock(&i2400m->msg_mutex); error_bad_msg: d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n", i2400m, buf, buf_len, result); return ERR_PTR(result); } /* * Definitions for the Enter Power Save command * * The Enter Power Save command requests the device to go into power * saving mode. The device will ack or nak the command depending on it * being ready for it. If it acks, we tell the USB subsystem to * * As well, the device might request to go into power saving mode by * sending a report (REPORT_POWERSAVE_READY), in which case, we issue * this command. The hookups in the RX coder allow */ enum { I2400M_WAKEUP_ENABLED = 0x01, I2400M_WAKEUP_DISABLED = 0x02, I2400M_TLV_TYPE_WAKEUP_MODE = 144, }; struct i2400m_cmd_enter_power_save { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_hdr tlv; __le32 val; } __packed; /* * Request entering power save * * This command is (mainly) executed when the device indicates that it * is ready to go into powersave mode via a REPORT_POWERSAVE_READY. */ int i2400m_cmd_enter_powersave(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_cmd_enter_power_save *cmd; char strerr[32]; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE); cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE); cmd->tlv.length = cpu_to_le16(sizeof(cmd->val)); cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'Enter power save' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); if (result == -EACCES) d_printf(1, dev, "Cannot enter power save mode\n"); else if (result < 0) dev_err(dev, "'Enter power save' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE, result, strerr); else d_printf(1, dev, "device ready to power save\n"); kfree_skb(ack_skb); error_msg_to_dev: kfree(cmd); error_alloc: return result; } EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave); /* * Definitions for getting device information */ enum { I2400M_TLV_DETAILED_DEVICE_INFO = 140 }; /** * i2400m_get_device_info - Query the device for detailed device information * * @i2400m: device descriptor * * Returns: an skb whose skb->data points to a 'struct * i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The * skb is *guaranteed* to contain the whole TLV data structure. * * On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error * code. */ struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; const struct i2400m_tlv_hdr *tlv; const struct i2400m_tlv_detailed_device_info *ddi; char strerr[32]; ack_skb = ERR_PTR(-ENOMEM); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'get device info' command: %ld\n", PTR_ERR(ack_skb)); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'get device info' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result, strerr); goto error_cmd_failed; } tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi)); if (tlv == NULL) { dev_err(dev, "GET DEVICE INFO: " "detailed device info TLV not found (0x%04x)\n", I2400M_TLV_DETAILED_DEVICE_INFO); result = -EIO; goto error_no_tlv; } skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data); error_msg_to_dev: kfree(cmd); error_alloc: return ack_skb; error_no_tlv: error_cmd_failed: kfree_skb(ack_skb); kfree(cmd); return ERR_PTR(result); } /* Firmware interface versions we support */ enum { I2400M_HDIv_MAJOR = 9, I2400M_HDIv_MINOR = 1, I2400M_HDIv_MINOR_2 = 2, }; /** * i2400m_firmware_check - check firmware versions are compatible with * the driver * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code an error and a message in the * kernel log. * * Long function, but quite simple; first chunk launches the command * and double checks the reply for the right TLV. Then we process the * TLV (where the meat is). * * Once we process the TLV that gives us the firmware's interface * version, we encode it and save it in i2400m->fw_version for future * reference. */ int i2400m_firmware_check(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; const struct i2400m_tlv_hdr *tlv; const struct i2400m_tlv_l4_message_versions *l4mv; char strerr[32]; unsigned major, minor, branch; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { result = PTR_ERR(ack_skb); dev_err(dev, "Failed to issue 'get lm version' command: %-d\n", result); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'get lm version' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_LM_VERSION, result, strerr); goto error_cmd_failed; } tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv)); if (tlv == NULL) { dev_err(dev, "get lm version: TLV not found (0x%04x)\n", I2400M_TLV_L4_MESSAGE_VERSIONS); result = -EIO; goto error_no_tlv; } l4mv = container_of(tlv, typeof(*l4mv), hdr); major = le16_to_cpu(l4mv->major); minor = le16_to_cpu(l4mv->minor); branch = le16_to_cpu(l4mv->branch); result = -EINVAL; if (major != I2400M_HDIv_MAJOR) { dev_err(dev, "unsupported major fw version " "%u.%u.%u\n", major, minor, branch); goto error_bad_major; } result = 0; if (minor < I2400M_HDIv_MINOR_2 && minor > I2400M_HDIv_MINOR) dev_warn(dev, "untested minor fw version %u.%u.%u\n", major, minor, branch); /* Yes, we ignore the branch -- we don't have to track it */ i2400m->fw_version = major << 16 | minor; dev_info(dev, "firmware interface version %u.%u.%u\n", major, minor, branch); error_bad_major: error_no_tlv: error_cmd_failed: kfree_skb(ack_skb); error_msg_to_dev: kfree(cmd); error_alloc: return result; } /* * Send an DoExitIdle command to the device to ask it to go out of * basestation-idle mode. * * @i2400m: device descriptor * * This starts a renegotiation with the basestation that might involve * another crypto handshake with user space. * * Returns: 0 if ok, < 0 errno code on error. */ int i2400m_cmd_exit_idle(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; char strerr[32]; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'exit idle' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); kfree_skb(ack_skb); error_msg_to_dev: kfree(cmd); error_alloc: return result; } /* * Query the device for its state, update the WiMAX stack's idea of it * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code on error. * * Executes a 'Get State' command and parses the returned * TLVs. * * Because this is almost identical to a 'Report State', we use * i2400m_report_state_hook() to parse the answer. This will set the * carrier state, as well as the RF Kill switches state. */ static int i2400m_cmd_get_state(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; char strerr[32]; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->type = cpu_to_le16(I2400M_MT_GET_STATE); cmd->length = 0; cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'get state' command: %ld\n", PTR_ERR(ack_skb)); result = PTR_ERR(ack_skb); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'get state' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_STATE, result, strerr); goto error_cmd_failed; } i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack), "GET STATE"); result = 0; kfree_skb(ack_skb); error_cmd_failed: error_msg_to_dev: kfree(cmd); error_alloc: return result; } /** * Set basic configuration settings * * @i2400m: device descriptor * @args: array of pointers to the TLV headers to send for * configuration (each followed by its payload). * TLV headers and payloads must be properly initialized, with the * right endianess (LE). * @arg_size: number of pointers in the @args array */ static int i2400m_set_init_config(struct i2400m *i2400m, const struct i2400m_tlv_hdr **arg, size_t args) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct i2400m_l3l4_hdr *cmd; char strerr[32]; unsigned argc, argsize, tlv_size; const struct i2400m_tlv_hdr *tlv_hdr; void *buf, *itr; d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args); result = 0; if (args == 0) goto none; /* Compute the size of all the TLVs, so we can alloc a * contiguous command block to copy them. */ argsize = 0; for (argc = 0; argc < args; argc++) { tlv_hdr = arg[argc]; argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length); } WARN_ON(argc >= 9); /* As per hw spec */ /* Alloc the space for the command and TLVs*/ result = -ENOMEM; buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL); if (buf == NULL) goto error_alloc; cmd = buf; cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG); cmd->length = cpu_to_le16(argsize); cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); /* Copy the TLVs */ itr = buf + sizeof(*cmd); for (argc = 0; argc < args; argc++) { tlv_hdr = arg[argc]; tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length); memcpy(itr, tlv_hdr, tlv_size); itr += tlv_size; } /* Send the message! */ ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize); result = PTR_ERR(ack_skb); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'init config' command: %d\n", result); goto error_msg_to_dev; } result = i2400m_msg_check_status(wimax_msg_data(ack_skb), strerr, sizeof(strerr)); if (result < 0) dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n", I2400M_MT_SET_INIT_CONFIG, result, strerr); kfree_skb(ack_skb); error_msg_to_dev: kfree(buf); error_alloc: none: d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n", i2400m, arg, args, result); return result; } /** * i2400m_set_idle_timeout - Set the device's idle mode timeout * * @i2400m: i2400m device descriptor * * @msecs: milliseconds for the timeout to enter idle mode. Between * 100 to 300000 (5m); 0 to disable. In increments of 100. * * After this @msecs of the link being idle (no data being sent or * received), the device will negotiate with the basestation entering * idle mode for saving power. The connection is maintained, but * getting out of it (done in tx.c) will require some negotiation, * possible crypto re-handshake and a possible DHCP re-lease. * * Only available if fw_version >= 0x00090002. * * Returns: 0 if ok, < 0 errno code on error. */ int i2400m_set_idle_timeout(struct i2400m *i2400m, unsigned msecs) { int result; struct device *dev = i2400m_dev(i2400m); struct sk_buff *ack_skb; struct { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_config_idle_timeout cit; } *cmd; const struct i2400m_l3l4_hdr *ack; size_t ack_len; char strerr[32]; result = -ENOSYS; if (i2400m_le_v1_3(i2400m)) goto error_alloc; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_GET_STATE); cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->cit.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT); cmd->cit.hdr.length = cpu_to_le16(sizeof(cmd->cit.timeout)); cmd->cit.timeout = cpu_to_le32(msecs); ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); if (IS_ERR(ack_skb)) { dev_err(dev, "Failed to issue 'set idle timeout' command: " "%ld\n", PTR_ERR(ack_skb)); result = PTR_ERR(ack_skb); goto error_msg_to_dev; } ack = wimax_msg_data_len(ack_skb, &ack_len); result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); if (result < 0) { dev_err(dev, "'set idle timeout' (0x%04x) command failed: " "%d - %s\n", I2400M_MT_GET_STATE, result, strerr); goto error_cmd_failed; } result = 0; kfree_skb(ack_skb); error_cmd_failed: error_msg_to_dev: kfree(cmd); error_alloc: return result; } /** * i2400m_dev_initialize - Initialize the device once communications are ready * * @i2400m: device descriptor * * Returns: 0 if ok, < 0 errno code on error. * * Configures the device to work the way we like it. * * At the point of this call, the device is registered with the WiMAX * and netdev stacks, firmware is uploaded and we can talk to the * device normally. */ int i2400m_dev_initialize(struct i2400m *i2400m) { int result; struct device *dev = i2400m_dev(i2400m); struct i2400m_tlv_config_idle_parameters idle_params; struct i2400m_tlv_config_idle_timeout idle_timeout; struct i2400m_tlv_config_d2h_data_format df; struct i2400m_tlv_config_dl_host_reorder dlhr; const struct i2400m_tlv_hdr *args[9]; unsigned argc = 0; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); if (i2400m_passive_mode) goto out_passive; /* Disable idle mode? (enabled by default) */ if (i2400m_idle_mode_disabled) { if (i2400m_le_v1_3(i2400m)) { idle_params.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS); idle_params.hdr.length = cpu_to_le16( sizeof(idle_params) - sizeof(idle_params.hdr)); idle_params.idle_timeout = 0; idle_params.idle_paging_interval = 0; args[argc++] = &idle_params.hdr; } else { idle_timeout.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT); idle_timeout.hdr.length = cpu_to_le16( sizeof(idle_timeout) - sizeof(idle_timeout.hdr)); idle_timeout.timeout = 0; args[argc++] = &idle_timeout.hdr; } } if (i2400m_ge_v1_4(i2400m)) { /* Enable extended RX data format? */ df.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT); df.hdr.length = cpu_to_le16( sizeof(df) - sizeof(df.hdr)); df.format = 1; args[argc++] = &df.hdr; /* Enable RX data reordering? * (switch flipped in rx.c:i2400m_rx_setup() after fw upload) */ if (i2400m->rx_reorder) { dlhr.hdr.type = cpu_to_le16(I2400M_TLV_CONFIG_DL_HOST_REORDER); dlhr.hdr.length = cpu_to_le16( sizeof(dlhr) - sizeof(dlhr.hdr)); dlhr.reorder = 1; args[argc++] = &dlhr.hdr; } } result = i2400m_set_init_config(i2400m, args, argc); if (result < 0) goto error; out_passive: /* * Update state: Here it just calls a get state; parsing the * result (System State TLV and RF Status TLV [done in the rx * path hooks]) will set the hardware and software RF-Kill * status. */ result = i2400m_cmd_get_state(i2400m); error: if (result < 0) dev_err(dev, "failed to initialize the device: %d\n", result); d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); return result; } /** * i2400m_dev_shutdown - Shutdown a running device * * @i2400m: device descriptor * * Release resources acquired during the running of the device; in * theory, should also tell the device to go to sleep, switch off the * radio, all that, but at this point, in most cases (driver * disconnection, reset handling) we can't even talk to the device. */ void i2400m_dev_shutdown(struct i2400m *i2400m) { struct device *dev = i2400m_dev(i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m); d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); }
gpl-2.0
alianmohammad/gem5-linux-kernel
net/802/stp.c
4981
2684
/* * STP SAP demux * * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/mutex.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/llc.h> #include <linux/slab.h> #include <linux/module.h> #include <net/llc.h> #include <net/llc_pdu.h> #include <net/stp.h> /* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */ #define GARP_ADDR_MIN 0x20 #define GARP_ADDR_MAX 0x2F #define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN) static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly; static const struct stp_proto __rcu *stp_proto __read_mostly; static struct llc_sap *sap __read_mostly; static unsigned int sap_registered; static DEFINE_MUTEX(stp_proto_mutex); /* Called under rcu_read_lock from LLC */ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct ethhdr *eh = eth_hdr(skb); const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); const struct stp_proto *proto; if (pdu->ssap != LLC_SAP_BSPAN || pdu->dsap != LLC_SAP_BSPAN || pdu->ctrl_1 != LLC_PDU_TYPE_U) goto err; if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) { proto = rcu_dereference(garp_protos[eh->h_dest[5] - GARP_ADDR_MIN]); if (proto && !ether_addr_equal(eh->h_dest, proto->group_address)) goto err; } else proto = rcu_dereference(stp_proto); if (!proto) goto err; proto->rcv(proto, skb, dev); return 0; err: kfree_skb(skb); return 0; } int stp_proto_register(const struct stp_proto *proto) { int err = 0; mutex_lock(&stp_proto_mutex); if (sap_registered++ == 0) { sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv); if (!sap) { err = -ENOMEM; goto out; } } if (is_zero_ether_addr(proto->group_address)) rcu_assign_pointer(stp_proto, proto); else rcu_assign_pointer(garp_protos[proto->group_address[5] - GARP_ADDR_MIN], proto); out: mutex_unlock(&stp_proto_mutex); return err; } EXPORT_SYMBOL_GPL(stp_proto_register); void stp_proto_unregister(const struct stp_proto *proto) { mutex_lock(&stp_proto_mutex); if (is_zero_ether_addr(proto->group_address)) RCU_INIT_POINTER(stp_proto, NULL); else RCU_INIT_POINTER(garp_protos[proto->group_address[5] - GARP_ADDR_MIN], NULL); synchronize_rcu(); if (--sap_registered == 0) llc_sap_put(sap); mutex_unlock(&stp_proto_mutex); } EXPORT_SYMBOL_GPL(stp_proto_unregister); MODULE_LICENSE("GPL");
gpl-2.0