repo_name
string
path
string
copies
string
size
string
content
string
license
string
crdroid-devices/android_kernel_lge_hammerhead
drivers/leds/leds-locomo.c
9353
2370
/* * linux/drivers/leds/leds-locomo.c * * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/leds.h> #include <mach/hardware.h> #include <asm/hardware/locomo.h> static void locomoled_brightness_set(struct led_classdev *led_cdev, enum led_brightness value, int offset) { struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->dev->parent); unsigned long flags; local_irq_save(flags); if (value) locomo_writel(LOCOMO_LPT_TOFH, locomo_dev->mapbase + offset); else locomo_writel(LOCOMO_LPT_TOFL, locomo_dev->mapbase + offset); local_irq_restore(flags); } static void locomoled_brightness_set0(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT0); } static void locomoled_brightness_set1(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT1); } static struct led_classdev locomo_led0 = { .name = "locomo:amber:charge", .default_trigger = "main-battery-charging", .brightness_set = locomoled_brightness_set0, }; static struct led_classdev locomo_led1 = { .name = "locomo:green:mail", .default_trigger = "nand-disk", .brightness_set = locomoled_brightness_set1, }; static int locomoled_probe(struct locomo_dev *ldev) { int ret; ret = led_classdev_register(&ldev->dev, &locomo_led0); if (ret < 0) return ret; ret = led_classdev_register(&ldev->dev, &locomo_led1); if (ret < 0) led_classdev_unregister(&locomo_led0); return ret; } static int locomoled_remove(struct locomo_dev *dev) { led_classdev_unregister(&locomo_led0); led_classdev_unregister(&locomo_led1); return 0; } static struct locomo_driver locomoled_driver = { .drv = { .name = "locomoled" }, .devid = LOCOMO_DEVID_LED, .probe = locomoled_probe, .remove = locomoled_remove, }; static int __init locomoled_init(void) { return locomo_driver_register(&locomoled_driver); } module_init(locomoled_init); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>"); MODULE_DESCRIPTION("Locomo LED driver"); MODULE_LICENSE("GPL");
gpl-2.0
Guazi/kernelwip
drivers/leds/leds-locomo.c
9353
2370
/* * linux/drivers/leds/leds-locomo.c * * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/leds.h> #include <mach/hardware.h> #include <asm/hardware/locomo.h> static void locomoled_brightness_set(struct led_classdev *led_cdev, enum led_brightness value, int offset) { struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->dev->parent); unsigned long flags; local_irq_save(flags); if (value) locomo_writel(LOCOMO_LPT_TOFH, locomo_dev->mapbase + offset); else locomo_writel(LOCOMO_LPT_TOFL, locomo_dev->mapbase + offset); local_irq_restore(flags); } static void locomoled_brightness_set0(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT0); } static void locomoled_brightness_set1(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT1); } static struct led_classdev locomo_led0 = { .name = "locomo:amber:charge", .default_trigger = "main-battery-charging", .brightness_set = locomoled_brightness_set0, }; static struct led_classdev locomo_led1 = { .name = "locomo:green:mail", .default_trigger = "nand-disk", .brightness_set = locomoled_brightness_set1, }; static int locomoled_probe(struct locomo_dev *ldev) { int ret; ret = led_classdev_register(&ldev->dev, &locomo_led0); if (ret < 0) return ret; ret = led_classdev_register(&ldev->dev, &locomo_led1); if (ret < 0) led_classdev_unregister(&locomo_led0); return ret; } static int locomoled_remove(struct locomo_dev *dev) { led_classdev_unregister(&locomo_led0); led_classdev_unregister(&locomo_led1); return 0; } static struct locomo_driver locomoled_driver = { .drv = { .name = "locomoled" }, .devid = LOCOMO_DEVID_LED, .probe = locomoled_probe, .remove = locomoled_remove, }; static int __init locomoled_init(void) { return locomo_driver_register(&locomoled_driver); } module_init(locomoled_init); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>"); MODULE_DESCRIPTION("Locomo LED driver"); MODULE_LICENSE("GPL");
gpl-2.0
santod/NuK3rn3l_htc_m7_GPE-5.0.x
drivers/sh/maple/maple.c
9865
21950
/* * Core maple bus functionality * * Copyright (C) 2007 - 2009 Adrian McMenamin * Copyright (C) 2001 - 2008 Paul Mundt * Copyright (C) 2000 - 2001 YAEGASHI Takeshi * Copyright (C) 2001 M. R. Brown * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/maple.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/cacheflush.h> #include <asm/dma.h> #include <asm/io.h> #include <mach/dma.h> #include <mach/sysasic.h> MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); static void maple_dma_handler(struct work_struct *work); static void maple_vblank_handler(struct work_struct *work); static DECLARE_WORK(maple_dma_process, maple_dma_handler); static DECLARE_WORK(maple_vblank_process, maple_vblank_handler); static LIST_HEAD(maple_waitq); static LIST_HEAD(maple_sentq); /* mutex to protect queue of waiting packets */ static DEFINE_MUTEX(maple_wlist_lock); static struct maple_driver maple_unsupported_device; static struct device maple_bus; static int subdevice_map[MAPLE_PORTS]; static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; static unsigned long maple_pnp_time; static int started, scanning, fullscan; static struct kmem_cache *maple_queue_cache; struct maple_device_specify { int port; int unit; }; static bool checked[MAPLE_PORTS]; static bool empty[MAPLE_PORTS]; static struct maple_device *baseunits[MAPLE_PORTS]; /** * maple_driver_register - register a maple driver * @drv: maple driver to be registered. * * Registers the passed in @drv, while updating the bus type. * Devices with matching function IDs will be automatically probed. */ int maple_driver_register(struct maple_driver *drv) { if (!drv) return -EINVAL; drv->drv.bus = &maple_bus_type; return driver_register(&drv->drv); } EXPORT_SYMBOL_GPL(maple_driver_register); /** * maple_driver_unregister - unregister a maple driver. * @drv: maple driver to unregister. * * Cleans up after maple_driver_register(). To be invoked in the exit * path of any module drivers. */ void maple_driver_unregister(struct maple_driver *drv) { driver_unregister(&drv->drv); } EXPORT_SYMBOL_GPL(maple_driver_unregister); /* set hardware registers to enable next round of dma */ static void maple_dma_reset(void) { __raw_writel(MAPLE_MAGIC, MAPLE_RESET); /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ __raw_writel(1, MAPLE_TRIGTYPE); /* * Maple system register * bits 31 - 16 timeout in units of 20nsec * bit 12 hard trigger - set 0 to keep responding to VBLANK * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA * max delay is 11 */ __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR); __raw_writel(1, MAPLE_ENABLE); } /** * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND * @dev: device responding * @callback: handler callback * @interval: interval in jiffies between callbacks * @function: the function code for the device */ void maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq *mq), unsigned long interval, unsigned long function) { dev->callback = callback; dev->interval = interval; dev->function = cpu_to_be32(function); dev->when = jiffies; } EXPORT_SYMBOL_GPL(maple_getcond_callback); static int maple_dma_done(void) { return (__raw_readl(MAPLE_STATE) & 1) == 0; } static void maple_release_device(struct device *dev) { struct maple_device *mdev; struct mapleq *mq; mdev = to_maple_dev(dev); mq = mdev->mq; kmem_cache_free(maple_queue_cache, mq->recvbuf); kfree(mq); kfree(mdev); } /** * maple_add_packet - add a single instruction to the maple bus queue * @mdev: maple device * @function: function on device being queried * @command: maple command to add * @length: length of command string (in 32 bit words) * @data: remainder of command string */ int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, size_t length, void *data) { int ret = 0; void *sendbuf = NULL; if (length) { sendbuf = kzalloc(length * 4, GFP_KERNEL); if (!sendbuf) { ret = -ENOMEM; goto out; } ((__be32 *)sendbuf)[0] = cpu_to_be32(function); } mdev->mq->command = command; mdev->mq->length = length; if (length > 1) memcpy(sendbuf + 4, data, (length - 1) * 4); mdev->mq->sendbuf = sendbuf; mutex_lock(&maple_wlist_lock); list_add_tail(&mdev->mq->list, &maple_waitq); mutex_unlock(&maple_wlist_lock); out: return ret; } EXPORT_SYMBOL_GPL(maple_add_packet); static struct mapleq *maple_allocq(struct maple_device *mdev) { struct mapleq *mq; mq = kzalloc(sizeof(*mq), GFP_KERNEL); if (!mq) goto failed_nomem; INIT_LIST_HEAD(&mq->list); mq->dev = mdev; mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); if (!mq->recvbuf) goto failed_p2; mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]); return mq; failed_p2: kfree(mq); failed_nomem: dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n", mdev->port, mdev->unit); return NULL; } static struct maple_device *maple_alloc_dev(int port, int unit) { struct maple_device *mdev; /* zero this out to avoid kobj subsystem * thinking it has already been registered */ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return NULL; mdev->port = port; mdev->unit = unit; mdev->mq = maple_allocq(mdev); if (!mdev->mq) { kfree(mdev); return NULL; } mdev->dev.bus = &maple_bus_type; mdev->dev.parent = &maple_bus; init_waitqueue_head(&mdev->maple_wait); return mdev; } static void maple_free_dev(struct maple_device *mdev) { kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf); kfree(mdev->mq); kfree(mdev); } /* process the command queue into a maple command block * terminating command has bit 32 of first long set to 0 */ static void maple_build_block(struct mapleq *mq) { int port, unit, from, to, len; unsigned long *lsendbuf = mq->sendbuf; port = mq->dev->port & 3; unit = mq->dev->unit; len = mq->length; from = port << 6; to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); *maple_lastptr &= 0x7fffffff; maple_lastptr = maple_sendptr; *maple_sendptr++ = (port << 16) | len | 0x80000000; *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf); *maple_sendptr++ = mq->command | (to << 8) | (from << 16) | (len << 24); while (len-- > 0) *maple_sendptr++ = *lsendbuf++; } /* build up command queue */ static void maple_send(void) { int i, maple_packets = 0; struct mapleq *mq, *nmq; if (!maple_dma_done()) return; /* disable DMA */ __raw_writel(0, MAPLE_ENABLE); if (!list_empty(&maple_sentq)) goto finish; mutex_lock(&maple_wlist_lock); if (list_empty(&maple_waitq)) { mutex_unlock(&maple_wlist_lock); goto finish; } maple_lastptr = maple_sendbuf; maple_sendptr = maple_sendbuf; list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { maple_build_block(mq); list_del_init(&mq->list); list_add_tail(&mq->list, &maple_sentq); if (maple_packets++ > MAPLE_MAXPACKETS) break; } mutex_unlock(&maple_wlist_lock); if (maple_packets > 0) { for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE, DMA_BIDIRECTIONAL); } finish: maple_dma_reset(); } /* check if there is a driver registered likely to match this device */ static int maple_check_matching_driver(struct device_driver *driver, void *devptr) { struct maple_driver *maple_drv; struct maple_device *mdev; mdev = devptr; maple_drv = to_maple_driver(driver); if (mdev->devinfo.function & cpu_to_be32(maple_drv->function)) return 1; return 0; } static void maple_detach_driver(struct maple_device *mdev) { device_unregister(&mdev->dev); } /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ static void maple_attach_driver(struct maple_device *mdev) { char *p, *recvbuf; unsigned long function; int matched, error; recvbuf = mdev->mq->recvbuf->buf; /* copy the data as individual elements in * case of memory optimisation */ memcpy(&mdev->devinfo.function, recvbuf + 4, 4); memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12); memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); memcpy(mdev->product_name, mdev->devinfo.product_name, 30); mdev->product_name[30] = '\0'; memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60); mdev->product_licence[60] = '\0'; for (p = mdev->product_name + 29; mdev->product_name <= p; p--) if (*p == ' ') *p = '\0'; else break; for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--) if (*p == ' ') *p = '\0'; else break; function = be32_to_cpu(mdev->devinfo.function); dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n", mdev->product_name, function, mdev->port, mdev->unit); if (function > 0x200) { /* Do this silently - as not a real device */ function = 0; mdev->driver = &maple_unsupported_device; dev_set_name(&mdev->dev, "%d:0.port", mdev->port); } else { matched = bus_for_each_drv(&maple_bus_type, NULL, mdev, maple_check_matching_driver); if (matched == 0) { /* Driver does not exist yet */ dev_info(&mdev->dev, "no driver found\n"); mdev->driver = &maple_unsupported_device; } dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port, mdev->unit, function); } mdev->function = function; mdev->dev.release = &maple_release_device; atomic_set(&mdev->busy, 0); error = device_register(&mdev->dev); if (error) { dev_warn(&mdev->dev, "could not register device at" " (%d, %d), with error 0x%X\n", mdev->unit, mdev->port, error); maple_free_dev(mdev); mdev = NULL; return; } } /* * if device has been registered for the given * port and unit then return 1 - allows identification * of which devices need to be attached or detached */ static int check_maple_device(struct device *device, void *portptr) { struct maple_device_specify *ds; struct maple_device *mdev; ds = portptr; mdev = to_maple_dev(device); if (mdev->port == ds->port && mdev->unit == ds->unit) return 1; return 0; } static int setup_maple_commands(struct device *device, void *ignored) { int add; struct maple_device *mdev = to_maple_dev(device); if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 && time_after(jiffies, mdev->when)) { /* bounce if we cannot add */ add = maple_add_packet(mdev, be32_to_cpu(mdev->devinfo.function), MAPLE_COMMAND_GETCOND, 1, NULL); if (!add) mdev->when = jiffies + mdev->interval; } else { if (time_after(jiffies, maple_pnp_time)) /* Ensure we don't have block reads and devinfo * calls interfering with one another - so flag the * device as busy */ if (atomic_read(&mdev->busy) == 0) { atomic_set(&mdev->busy, 1); maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); } } return 0; } /* VBLANK bottom half - implemented via workqueue */ static void maple_vblank_handler(struct work_struct *work) { int x, locking; struct maple_device *mdev; if (!maple_dma_done()) return; __raw_writel(0, MAPLE_ENABLE); if (!list_empty(&maple_sentq)) goto finish; /* * Set up essential commands - to fetch data and * check devices are still present */ bus_for_each_dev(&maple_bus_type, NULL, NULL, setup_maple_commands); if (time_after(jiffies, maple_pnp_time)) { /* * Scan the empty ports - bus is flakey and may have * mis-reported emptyness */ for (x = 0; x < MAPLE_PORTS; x++) { if (checked[x] && empty[x]) { mdev = baseunits[x]; if (!mdev) break; atomic_set(&mdev->busy, 1); locking = maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); if (!locking) break; } } maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; } finish: maple_send(); } /* handle devices added via hotplugs - placing them on queue for DEVINFO */ static void maple_map_subunits(struct maple_device *mdev, int submask) { int retval, k, devcheck; struct maple_device *mdev_add; struct maple_device_specify ds; ds.port = mdev->port; for (k = 0; k < 5; k++) { ds.unit = k + 1; retval = bus_for_each_dev(&maple_bus_type, NULL, &ds, check_maple_device); if (retval) { submask = submask >> 1; continue; } devcheck = submask & 0x01; if (devcheck) { mdev_add = maple_alloc_dev(mdev->port, k + 1); if (!mdev_add) return; atomic_set(&mdev_add->busy, 1); maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); /* mark that we are checking sub devices */ scanning = 1; } submask = submask >> 1; } } /* mark a device as removed */ static void maple_clean_submap(struct maple_device *mdev) { int killbit; killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); killbit = ~killbit; killbit &= 0xFF; subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; } /* handle empty port or hotplug removal */ static void maple_response_none(struct maple_device *mdev) { maple_clean_submap(mdev); if (likely(mdev->unit != 0)) { /* * Block devices play up * and give the impression they have * been removed even when still in place or * trip the mtd layer when they have * really gone - this code traps that eventuality * and ensures we aren't overloaded with useless * error messages */ if (mdev->can_unload) { if (!mdev->can_unload(mdev)) { atomic_set(&mdev->busy, 2); wake_up(&mdev->maple_wait); return; } } dev_info(&mdev->dev, "detaching device at (%d, %d)\n", mdev->port, mdev->unit); maple_detach_driver(mdev); return; } else { if (!started || !fullscan) { if (checked[mdev->port] == false) { checked[mdev->port] = true; empty[mdev->port] = true; dev_info(&mdev->dev, "no devices" " to port %d\n", mdev->port); } return; } } /* Some hardware devices generate false detach messages on unit 0 */ atomic_set(&mdev->busy, 0); } /* preprocess hotplugs or scans */ static void maple_response_devinfo(struct maple_device *mdev, char *recvbuf) { char submask; if (!started || (scanning == 2) || !fullscan) { if ((mdev->unit == 0) && (checked[mdev->port] == false)) { checked[mdev->port] = true; maple_attach_driver(mdev); } else { if (mdev->unit != 0) maple_attach_driver(mdev); if (mdev->unit == 0) { empty[mdev->port] = false; maple_attach_driver(mdev); } } } if (mdev->unit == 0) { submask = recvbuf[2] & 0x1F; if (submask ^ subdevice_map[mdev->port]) { maple_map_subunits(mdev, submask); subdevice_map[mdev->port] = submask; } } } static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf) { if (mdev->fileerr_handler) { mdev->fileerr_handler(mdev, recvbuf); return; } else dev_warn(&mdev->dev, "device at (%d, %d) reports" "file error 0x%X\n", mdev->port, mdev->unit, ((int *)recvbuf)[1]); } static void maple_port_rescan(void) { int i; struct maple_device *mdev; fullscan = 1; for (i = 0; i < MAPLE_PORTS; i++) { if (checked[i] == false) { fullscan = 0; mdev = baseunits[i]; maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 0, NULL); } } } /* maple dma end bottom half - implemented via workqueue */ static void maple_dma_handler(struct work_struct *work) { struct mapleq *mq, *nmq; struct maple_device *mdev; char *recvbuf; enum maple_code code; if (!maple_dma_done()) return; __raw_writel(0, MAPLE_ENABLE); if (!list_empty(&maple_sentq)) { list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { mdev = mq->dev; recvbuf = mq->recvbuf->buf; dma_cache_sync(&mdev->dev, recvbuf, 0x400, DMA_FROM_DEVICE); code = recvbuf[0]; kfree(mq->sendbuf); list_del_init(&mq->list); switch (code) { case MAPLE_RESPONSE_NONE: maple_response_none(mdev); break; case MAPLE_RESPONSE_DEVINFO: maple_response_devinfo(mdev, recvbuf); atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_DATATRF: if (mdev->callback) mdev->callback(mq); atomic_set(&mdev->busy, 0); wake_up(&mdev->maple_wait); break; case MAPLE_RESPONSE_FILEERR: maple_response_fileerr(mdev, recvbuf); atomic_set(&mdev->busy, 0); wake_up(&mdev->maple_wait); break; case MAPLE_RESPONSE_AGAIN: case MAPLE_RESPONSE_BADCMD: case MAPLE_RESPONSE_BADFUNC: dev_warn(&mdev->dev, "non-fatal error" " 0x%X at (%d, %d)\n", code, mdev->port, mdev->unit); atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_ALLINFO: dev_notice(&mdev->dev, "extended" " device information request for (%d, %d)" " but call is not supported\n", mdev->port, mdev->unit); atomic_set(&mdev->busy, 0); break; case MAPLE_RESPONSE_OK: atomic_set(&mdev->busy, 0); wake_up(&mdev->maple_wait); break; default: break; } } /* if scanning is 1 then we have subdevices to check */ if (scanning == 1) { maple_send(); scanning = 2; } else scanning = 0; /*check if we have actually tested all ports yet */ if (!fullscan) maple_port_rescan(); /* mark that we have been through the first scan */ started = 1; } maple_send(); } static irqreturn_t maple_dma_interrupt(int irq, void *dev_id) { /* Load everything into the bottom half */ schedule_work(&maple_dma_process); return IRQ_HANDLED; } static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id) { schedule_work(&maple_vblank_process); return IRQ_HANDLED; } static int maple_set_dma_interrupt_handler(void) { return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt, IRQF_SHARED, "maple bus DMA", &maple_unsupported_device); } static int maple_set_vblank_interrupt_handler(void) { return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt, IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device); } static int maple_get_dma_buffer(void) { maple_sendbuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, MAPLE_DMA_PAGES); if (!maple_sendbuf) return -ENOMEM; return 0; } static int maple_match_bus_driver(struct device *devptr, struct device_driver *drvptr) { struct maple_driver *maple_drv = to_maple_driver(drvptr); struct maple_device *maple_dev = to_maple_dev(devptr); /* Trap empty port case */ if (maple_dev->devinfo.function == 0xFFFFFFFF) return 0; else if (maple_dev->devinfo.function & cpu_to_be32(maple_drv->function)) return 1; return 0; } static int maple_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { return 0; } static void maple_bus_release(struct device *dev) { } static struct maple_driver maple_unsupported_device = { .drv = { .name = "maple_unsupported_device", .bus = &maple_bus_type, }, }; /* * maple_bus_type - core maple bus structure */ struct bus_type maple_bus_type = { .name = "maple", .match = maple_match_bus_driver, .uevent = maple_bus_uevent, }; EXPORT_SYMBOL_GPL(maple_bus_type); static struct device maple_bus = { .init_name = "maple", .release = maple_bus_release, }; static int __init maple_bus_init(void) { int retval, i; struct maple_device *mdev[MAPLE_PORTS]; __raw_writel(0, MAPLE_ENABLE); retval = device_register(&maple_bus); if (retval) goto cleanup; retval = bus_register(&maple_bus_type); if (retval) goto cleanup_device; retval = driver_register(&maple_unsupported_device.drv); if (retval) goto cleanup_bus; /* allocate memory for maple bus dma */ retval = maple_get_dma_buffer(); if (retval) { dev_err(&maple_bus, "failed to allocate DMA buffers\n"); goto cleanup_basic; } /* set up DMA interrupt handler */ retval = maple_set_dma_interrupt_handler(); if (retval) { dev_err(&maple_bus, "bus failed to grab maple " "DMA IRQ\n"); goto cleanup_dma; } /* set up VBLANK interrupt handler */ retval = maple_set_vblank_interrupt_handler(); if (retval) { dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n"); goto cleanup_irq; } maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); if (!maple_queue_cache) goto cleanup_bothirqs; INIT_LIST_HEAD(&maple_waitq); INIT_LIST_HEAD(&maple_sentq); /* setup maple ports */ for (i = 0; i < MAPLE_PORTS; i++) { checked[i] = false; empty[i] = false; mdev[i] = maple_alloc_dev(i, 0); if (!mdev[i]) { while (i-- > 0) maple_free_dev(mdev[i]); goto cleanup_cache; } baseunits[i] = mdev[i]; atomic_set(&mdev[i]->busy, 1); maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); subdevice_map[i] = 0; } maple_pnp_time = jiffies + HZ; /* prepare initial queue */ maple_send(); dev_info(&maple_bus, "bus core now registered\n"); return 0; cleanup_cache: kmem_cache_destroy(maple_queue_cache); cleanup_bothirqs: free_irq(HW_EVENT_VSYNC, 0); cleanup_irq: free_irq(HW_EVENT_MAPLE_DMA, 0); cleanup_dma: free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); cleanup_basic: driver_unregister(&maple_unsupported_device.drv); cleanup_bus: bus_unregister(&maple_bus_type); cleanup_device: device_unregister(&maple_bus); cleanup: printk(KERN_ERR "Maple bus registration failed\n"); return retval; } /* Push init to later to ensure hardware gets detected */ fs_initcall(maple_bus_init);
gpl-2.0
Buckmarble/elite_kernel_grouper
net/appletalk/dev.c
12681
1232
/* * Moved here from drivers/net/net_init.c, which is: * Written 1993,1994,1995 by Donald Becker. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ltalk.h> static void ltalk_setup(struct net_device *dev) { /* Fill in the fields of the device structure with localtalk-generic values. */ dev->type = ARPHRD_LOCALTLK; dev->hard_header_len = LTALK_HLEN; dev->mtu = LTALK_MTU; dev->addr_len = LTALK_ALEN; dev->tx_queue_len = 10; dev->broadcast[0] = 0xFF; dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP; } /** * alloc_ltalkdev - Allocates and sets up an localtalk device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this localtalk device * * Fill in the fields of the device structure with localtalk-generic * values. Basically does everything except registering the device. * * Constructs a new net device, complete with a private data area of * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_ltalkdev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "lt%d", ltalk_setup); } EXPORT_SYMBOL(alloc_ltalkdev);
gpl-2.0
drakaz/gaosp_kernel
arch/arm/plat-orion/time.c
138
4407
/* * arch/arm/plat-orion/time.c * * Marvell Orion SoC timer handling. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * Timer 0 is used as free-running clocksource, while timer 1 is * used as clock_event_device. */ #include <linux/kernel.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/mach/time.h> #include <mach/hardware.h> /* * Number of timer ticks per jiffy. */ static u32 ticks_per_jiffy; /* * Timer block registers. */ #define TIMER_CTRL (TIMER_VIRT_BASE + 0x0000) #define TIMER0_EN 0x0001 #define TIMER0_RELOAD_EN 0x0002 #define TIMER1_EN 0x0004 #define TIMER1_RELOAD_EN 0x0008 #define TIMER0_RELOAD (TIMER_VIRT_BASE + 0x0010) #define TIMER0_VAL (TIMER_VIRT_BASE + 0x0014) #define TIMER1_RELOAD (TIMER_VIRT_BASE + 0x0018) #define TIMER1_VAL (TIMER_VIRT_BASE + 0x001c) /* * Clocksource handling. */ static cycle_t orion_clksrc_read(void) { return 0xffffffff - readl(TIMER0_VAL); } static struct clocksource orion_clksrc = { .name = "orion_clocksource", .shift = 20, .rating = 300, .read = orion_clksrc_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; /* * Clockevent handling. */ static int orion_clkevt_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags; u32 u; if (delta == 0) return -ETIME; local_irq_save(flags); /* * Clear and enable clockevent timer interrupt. */ writel(BRIDGE_INT_TIMER1_CLR, BRIDGE_CAUSE); u = readl(BRIDGE_MASK); u |= BRIDGE_INT_TIMER1; writel(u, BRIDGE_MASK); /* * Setup new clockevent timer value. */ writel(delta, TIMER1_VAL); /* * Enable the timer. */ u = readl(TIMER_CTRL); u = (u & ~TIMER1_RELOAD_EN) | TIMER1_EN; writel(u, TIMER_CTRL); local_irq_restore(flags); return 0; } static void orion_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; u32 u; local_irq_save(flags); if (mode == CLOCK_EVT_MODE_PERIODIC) { /* * Setup timer to fire at 1/HZ intervals. */ writel(ticks_per_jiffy - 1, TIMER1_RELOAD); writel(ticks_per_jiffy - 1, TIMER1_VAL); /* * Enable timer interrupt. */ u = readl(BRIDGE_MASK); writel(u | BRIDGE_INT_TIMER1, BRIDGE_MASK); /* * Enable timer. */ u = readl(TIMER_CTRL); writel(u | TIMER1_EN | TIMER1_RELOAD_EN, TIMER_CTRL); } else { /* * Disable timer. */ u = readl(TIMER_CTRL); writel(u & ~TIMER1_EN, TIMER_CTRL); /* * Disable timer interrupt. */ u = readl(BRIDGE_MASK); writel(u & ~BRIDGE_INT_TIMER1, BRIDGE_MASK); /* * ACK pending timer interrupt. */ writel(BRIDGE_INT_TIMER1_CLR, BRIDGE_CAUSE); } local_irq_restore(flags); } static struct clock_event_device orion_clkevt = { .name = "orion_tick", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .shift = 32, .rating = 300, .set_next_event = orion_clkevt_next_event, .set_mode = orion_clkevt_mode, }; static irqreturn_t orion_timer_interrupt(int irq, void *dev_id) { /* * ACK timer interrupt and call event handler. */ writel(BRIDGE_INT_TIMER1_CLR, BRIDGE_CAUSE); orion_clkevt.event_handler(&orion_clkevt); return IRQ_HANDLED; } static struct irqaction orion_timer_irq = { .name = "orion_tick", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = orion_timer_interrupt }; void __init orion_time_init(unsigned int irq, unsigned int tclk) { u32 u; ticks_per_jiffy = (tclk + HZ/2) / HZ; /* * Setup free-running clocksource timer (interrupts * disabled.) */ writel(0xffffffff, TIMER0_VAL); writel(0xffffffff, TIMER0_RELOAD); u = readl(BRIDGE_MASK); writel(u & ~BRIDGE_INT_TIMER0, BRIDGE_MASK); u = readl(TIMER_CTRL); writel(u | TIMER0_EN | TIMER0_RELOAD_EN, TIMER_CTRL); orion_clksrc.mult = clocksource_hz2mult(tclk, orion_clksrc.shift); clocksource_register(&orion_clksrc); /* * Setup clockevent timer (interrupt-driven.) */ setup_irq(irq, &orion_timer_irq); orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); orion_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&orion_clkevt); }
gpl-2.0
vcgato29/cygwin
gdb/features/mips-linux.c
138
5487
/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro: Original: mips-linux.xml */ #include "defs.h" #include "osabi.h" #include "target-descriptions.h" struct target_desc *tdesc_mips_linux; static void initialize_tdesc_mips_linux (void) { struct target_desc *result = allocate_target_description (); struct tdesc_feature *feature; set_tdesc_architecture (result, bfd_scan_arch ("mips")); set_tdesc_osabi (result, osabi_from_tdesc_string ("GNU/Linux")); feature = tdesc_create_feature (result, "org.gnu.gdb.mips.cpu"); tdesc_create_reg (feature, "r0", 0, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r1", 1, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r2", 2, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r3", 3, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r4", 4, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r5", 5, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r6", 6, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r7", 7, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r8", 8, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r9", 9, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r10", 10, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r11", 11, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r12", 12, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r13", 13, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r14", 14, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r15", 15, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r16", 16, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r17", 17, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r18", 18, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r19", 19, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r20", 20, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r21", 21, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r22", 22, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r23", 23, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r24", 24, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r25", 25, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r26", 26, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r27", 27, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r28", 28, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r29", 29, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r30", 30, 1, NULL, 32, "int"); tdesc_create_reg (feature, "r31", 31, 1, NULL, 32, "int"); tdesc_create_reg (feature, "lo", 33, 1, NULL, 32, "int"); tdesc_create_reg (feature, "hi", 34, 1, NULL, 32, "int"); tdesc_create_reg (feature, "pc", 37, 1, NULL, 32, "int"); feature = tdesc_create_feature (result, "org.gnu.gdb.mips.cp0"); tdesc_create_reg (feature, "status", 32, 1, NULL, 32, "int"); tdesc_create_reg (feature, "badvaddr", 35, 1, NULL, 32, "int"); tdesc_create_reg (feature, "cause", 36, 1, NULL, 32, "int"); feature = tdesc_create_feature (result, "org.gnu.gdb.mips.fpu"); tdesc_create_reg (feature, "f0", 38, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f1", 39, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f2", 40, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f3", 41, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f4", 42, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f5", 43, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f6", 44, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f7", 45, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f8", 46, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f9", 47, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f10", 48, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f11", 49, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f12", 50, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f13", 51, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f14", 52, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f15", 53, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f16", 54, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f17", 55, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f18", 56, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f19", 57, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f20", 58, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f21", 59, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f22", 60, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f23", 61, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f24", 62, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f25", 63, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f26", 64, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f27", 65, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f28", 66, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f29", 67, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f30", 68, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "f31", 69, 1, NULL, 32, "ieee_single"); tdesc_create_reg (feature, "fcsr", 70, 1, "float", 32, "int"); tdesc_create_reg (feature, "fir", 71, 1, "float", 32, "int"); feature = tdesc_create_feature (result, "org.gnu.gdb.mips.linux"); tdesc_create_reg (feature, "restart", 72, 1, "system", 32, "int"); tdesc_mips_linux = result; }
gpl-2.0
Evervolv/android_kernel_htc_leo
kernel/compat.c
138
29357
/* * linux/kernel/compat.c * * Kernel compatibililty routines for e.g. 32 bit syscall support * on 64 bit kernels. * * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/signal.h> #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/security.h> #include <linux/timex.h> #include <linux/migrate.h> #include <linux/posix-timers.h> #include <linux/times.h> #include <linux/ptrace.h> #include <linux/module.h> #include <asm/uaccess.h> /* * Note that the native side is already converted to a timespec, because * that's what we want anyway. */ static int compat_get_timeval(struct timespec *o, struct compat_timeval __user *i) { long usec; if (get_user(o->tv_sec, &i->tv_sec) || get_user(usec, &i->tv_usec)) return -EFAULT; o->tv_nsec = usec * 1000; return 0; } static int compat_put_timeval(struct compat_timeval __user *o, struct timeval *i) { return (put_user(i->tv_sec, &o->tv_sec) || put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; } asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (compat_put_timeval(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { struct timespec kts; struct timezone ktz; if (tv) { if (compat_get_timeval(&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(ktz))) return -EFAULT; } return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } static long compat_nanosleep_restart(struct restart_block *restart) { struct compat_timespec __user *rmtp; struct timespec rmt; mm_segment_t oldfs; long ret; restart->nanosleep.rmtp = (struct timespec __user *) &rmt; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep_restart(restart); set_fs(oldfs); if (ret) { rmtp = restart->nanosleep.compat_rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { struct timespec tu, rmt; mm_segment_t oldfs; long ret; if (get_compat_timespec(&tu, rqtp)) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep(&tu, rmtp ? (struct timespec __user *)&rmt : NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); set_fs(oldfs); if (ret) { struct restart_block *restart = &current_thread_info()->restart_block; restart->fn = compat_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } static inline long get_compat_itimerval(struct itimerval *o, struct compat_itimerval __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_compat_itimerval(struct compat_itimerval __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } asmlinkage long compat_sys_getitimer(int which, struct compat_itimerval __user *it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_compat_itimerval(it, &kit)) error = -EFAULT; return error; } asmlinkage long compat_sys_setitimer(int which, struct compat_itimerval __user *in, struct compat_itimerval __user *out) { struct itimerval kin, kout; int error; if (in) { if (get_compat_itimerval(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_compat_itimerval(out, &kout)) return -EFAULT; return 0; } static compat_clock_t clock_t_to_compat_clock_t(clock_t x) { return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); } asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) { if (tbuf) { struct tms tms; struct compat_tms tmp; do_sys_times(&tms); /* Convert our struct tms to the compat version. */ tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); if (copy_to_user(tbuf, &tmp, sizeof(tmp))) return -EFAULT; } force_successful_syscall_return(); return compat_jiffies_to_clock_t(jiffies); } /* * Assumption: old_sigset_t and compat_old_sigset_t are both * types that can be passed to put_user()/get_user(). */ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) { old_sigset_t s; long ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sigpending((old_sigset_t __user *) &s); set_fs(old_fs); if (ret == 0) ret = put_user(s, set); return ret; } asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, compat_old_sigset_t __user *oset) { old_sigset_t s; long ret; mm_segment_t old_fs; if (set && get_user(s, set)) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sigprocmask(how, set ? (old_sigset_t __user *) &s : NULL, oset ? (old_sigset_t __user *) &s : NULL); set_fs(old_fs); if (ret == 0) if (oset) ret = put_user(s, oset); return ret; } asmlinkage long compat_sys_setrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; mm_segment_t old_fs = get_fs (); if (resource >= RLIM_NLIMITS) return -EINVAL; if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || __get_user(r.rlim_cur, &rlim->rlim_cur) || __get_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; if (r.rlim_cur == COMPAT_RLIM_INFINITY) r.rlim_cur = RLIM_INFINITY; if (r.rlim_max == COMPAT_RLIM_INFINITY) r.rlim_max = RLIM_INFINITY; set_fs(KERNEL_DS); ret = sys_setrlimit(resource, (struct rlimit __user *) &r); set_fs(old_fs); return ret; } #ifdef COMPAT_RLIM_OLD_INFINITY asmlinkage long compat_sys_old_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_old_getrlimit(resource, &r); set_fs(old_fs); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } #endif asmlinkage long compat_sys_getrlimit (unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_getrlimit(resource, (struct rlimit __user *) &r); set_fs(old_fs); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru) { if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) || __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) || __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) || __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) || __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) || __put_user(r->ru_maxrss, &ru->ru_maxrss) || __put_user(r->ru_ixrss, &ru->ru_ixrss) || __put_user(r->ru_idrss, &ru->ru_idrss) || __put_user(r->ru_isrss, &ru->ru_isrss) || __put_user(r->ru_minflt, &ru->ru_minflt) || __put_user(r->ru_majflt, &ru->ru_majflt) || __put_user(r->ru_nswap, &ru->ru_nswap) || __put_user(r->ru_inblock, &ru->ru_inblock) || __put_user(r->ru_oublock, &ru->ru_oublock) || __put_user(r->ru_msgsnd, &ru->ru_msgsnd) || __put_user(r->ru_msgrcv, &ru->ru_msgrcv) || __put_user(r->ru_nsignals, &ru->ru_nsignals) || __put_user(r->ru_nvcsw, &ru->ru_nvcsw) || __put_user(r->ru_nivcsw, &ru->ru_nivcsw)) return -EFAULT; return 0; } asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) { struct rusage r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_getrusage(who, (struct rusage __user *) &r); set_fs(old_fs); if (ret) return ret; if (put_compat_rusage(&r, ru)) return -EFAULT; return 0; } asmlinkage long compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru) { if (!ru) { return sys_wait4(pid, stat_addr, options, NULL); } else { struct rusage r; int ret; unsigned int status; mm_segment_t old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (stat_addr ? (unsigned int __user *) &status : NULL), options, (struct rusage __user *) &r); set_fs (old_fs); if (ret > 0) { if (put_compat_rusage(&r, ru)) return -EFAULT; if (stat_addr && put_user(status, stat_addr)) return -EFAULT; } return ret; } } asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, struct compat_siginfo __user *uinfo, int options, struct compat_rusage __user *uru) { siginfo_t info; struct rusage ru; long ret; mm_segment_t old_fs = get_fs(); memset(&info, 0, sizeof(info)); set_fs(KERNEL_DS); ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, uru ? (struct rusage __user *)&ru : NULL); set_fs(old_fs); if ((ret < 0) || (info.si_signo == 0)) return ret; if (uru) { ret = put_compat_rusage(&ru, uru); if (ret) return ret; } BUG_ON(info.si_code & __SI_MASK); info.si_code |= __SI_CHLD; return copy_siginfo_to_user32(uinfo, &info); } static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { unsigned long *k; if (len < cpumask_size()) memset(new_mask, 0, cpumask_size()); else if (len > cpumask_size()) len = cpumask_size(); k = cpumask_bits(new_mask); return compat_get_bitmap(k, user_mask_ptr, len * 8); } asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { cpumask_var_t new_mask; int retval; if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) return -ENOMEM; retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); if (retval) goto out; retval = sched_setaffinity(pid, new_mask); out: free_cpumask_var(new_mask); return retval; } asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { int ret; cpumask_var_t mask; if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { size_t retlen = min_t(size_t, len, cpumask_size()); if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) ret = -EFAULT; else ret = retlen; } free_cpumask_var(mask); return ret; } int get_compat_itimerspec(struct itimerspec *dst, const struct compat_itimerspec __user *src) { if (get_compat_timespec(&dst->it_interval, &src->it_interval) || get_compat_timespec(&dst->it_value, &src->it_value)) return -EFAULT; return 0; } int put_compat_itimerspec(struct compat_itimerspec __user *dst, const struct itimerspec *src) { if (put_compat_timespec(&src->it_interval, &dst->it_interval) || put_compat_timespec(&src->it_value, &dst->it_value)) return -EFAULT; return 0; } long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id) { struct sigevent __user *event = NULL; if (timer_event_spec) { struct sigevent kevent; event = compat_alloc_user_space(sizeof(*event)); if (get_compat_sigevent(&kevent, timer_event_spec) || copy_to_user(event, &kevent, sizeof(*event))) return -EFAULT; } return sys_timer_create(which_clock, event, created_timer_id); } long compat_sys_timer_settime(timer_t timer_id, int flags, struct compat_itimerspec __user *new, struct compat_itimerspec __user *old) { long err; mm_segment_t oldfs; struct itimerspec newts, oldts; if (!new) return -EINVAL; if (get_compat_itimerspec(&newts, new)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_settime(timer_id, flags, (struct itimerspec __user *) &newts, (struct itimerspec __user *) &oldts); set_fs(oldfs); if (!err && old && put_compat_itimerspec(old, &oldts)) return -EFAULT; return err; } long compat_sys_timer_gettime(timer_t timer_id, struct compat_itimerspec __user *setting) { long err; mm_segment_t oldfs; struct itimerspec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_gettime(timer_id, (struct itimerspec __user *) &ts); set_fs(oldfs); if (!err && put_compat_itimerspec(setting, &ts)) return -EFAULT; return err; } long compat_sys_clock_settime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; if (get_compat_timespec(&ts, tp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_settime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); return err; } long compat_sys_clock_gettime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_gettime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } long compat_sys_clock_getres(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_getres(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && tp && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } static long compat_clock_nanosleep_restart(struct restart_block *restart) { long err; mm_segment_t oldfs; struct timespec tu; struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; restart->nanosleep.rmtp = (struct timespec __user *) &tu; oldfs = get_fs(); set_fs(KERNEL_DS); err = clock_nanosleep_restart(restart); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&tu, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { long err; mm_segment_t oldfs; struct timespec in, out; struct restart_block *restart; if (get_compat_timespec(&in, rqtp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_nanosleep(which_clock, flags, (struct timespec __user *) &in, (struct timespec __user *) &out); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&out, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart = &current_thread_info()->restart_block; restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } /* * We currently only need the following fields from the sigevent * structure: sigev_value, sigev_signo, sig_notify and (sometimes * sigev_notify_thread_id). The others are handled in user mode. * We also assume that copying sigev_value.sival_int is sufficient * to keep all the bits of sigev_value.sival_ptr intact. */ int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event) { memset(event, 0, sizeof(*event)); return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || __get_user(event->sigev_value.sival_int, &u_event->sigev_value.sival_int) || __get_user(event->sigev_signo, &u_event->sigev_signo) || __get_user(event->sigev_notify, &u_event->sigev_notify) || __get_user(event->sigev_notify_thread_id, &u_event->sigev_notify_thread_id)) ? -EFAULT : 0; } long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = 0; for (j = 0; j < sizeof(m)/sizeof(um); j++) { /* * We dont want to read past the end of the userspace * bitmap. We must however ensure the end of the * kernel bitmap is zeroed. */ if (nr_compat_longs-- > 0) { if (__get_user(um, umask)) return -EFAULT; } else { um = 0; } umask++; m |= (long)um << (j * BITS_PER_COMPAT_LONG); } *mask++ = m; } return 0; } long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = *mask++; for (j = 0; j < sizeof(m)/sizeof(um); j++) { um = m; /* * We dont want to write past the end of the userspace * bitmap. */ if (nr_compat_longs-- > 0) { if (__put_user(um, umask)) return -EFAULT; } umask++; m >>= 4*sizeof(um); m >>= 4*sizeof(um); } } return 0; } void sigset_from_compat (sigset_t *set, compat_sigset_t *compat) { switch (_NSIG_WORDS) { case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); } } asmlinkage long compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct compat_timespec __user *uts, compat_size_t sigsetsize) { compat_sigset_t s32; sigset_t s; int sig; struct timespec t; siginfo_t info; long ret, timeout = 0; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&s, &s32); sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP)); signotset(&s); if (uts) { if (get_compat_timespec (&t, uts)) return -EFAULT; if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0) return -EINVAL; } spin_lock_irq(&current->sighand->siglock); sig = dequeue_signal(current, &s, &info); if (!sig) { timeout = MAX_SCHEDULE_TIMEOUT; if (uts) timeout = timespec_to_jiffies(&t) +(t.tv_sec || t.tv_nsec); if (timeout) { current->real_blocked = current->blocked; sigandsets(&current->blocked, &current->blocked, &s); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); timeout = schedule_timeout_interruptible(timeout); spin_lock_irq(&current->sighand->siglock); sig = dequeue_signal(current, &s, &info); current->blocked = current->real_blocked; siginitset(&current->real_blocked, 0); recalc_sigpending(); } } spin_unlock_irq(&current->sighand->siglock); if (sig) { ret = sig; if (uinfo) { if (copy_siginfo_to_user32(uinfo, &info)) ret = -EFAULT; } }else { ret = timeout?-EINTR:-EAGAIN; } return ret; } asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo) { siginfo_t info; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } #ifdef __ARCH_WANT_COMPAT_SYS_TIME /* compat_time_t is a 32 bit "long" and needs to get converted. */ asmlinkage long compat_sys_time(compat_time_t __user * tloc) { compat_time_t i; struct timeval tv; do_gettimeofday(&tv); i = tv.tv_sec; if (tloc) { if (put_user(i,tloc)) return -EFAULT; } force_successful_syscall_return(); return i; } asmlinkage long compat_sys_stime(compat_time_t __user *tptr) { struct timespec tv; int err; if (get_user(tv.tv_sec, tptr)) return -EFAULT; tv.tv_nsec = 0; err = security_settime(&tv, NULL); if (err) return err; do_settimeofday(&tv); return 0; } #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize) { sigset_t newset; compat_sigset_t newset32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&newset, &newset32); sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; current->blocked = newset; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) { struct timex txc; int ret; memset(&txc, 0, sizeof(struct timex)); if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || __get_user(txc.modes, &utp->modes) || __get_user(txc.offset, &utp->offset) || __get_user(txc.freq, &utp->freq) || __get_user(txc.maxerror, &utp->maxerror) || __get_user(txc.esterror, &utp->esterror) || __get_user(txc.status, &utp->status) || __get_user(txc.constant, &utp->constant) || __get_user(txc.precision, &utp->precision) || __get_user(txc.tolerance, &utp->tolerance) || __get_user(txc.time.tv_sec, &utp->time.tv_sec) || __get_user(txc.time.tv_usec, &utp->time.tv_usec) || __get_user(txc.tick, &utp->tick) || __get_user(txc.ppsfreq, &utp->ppsfreq) || __get_user(txc.jitter, &utp->jitter) || __get_user(txc.shift, &utp->shift) || __get_user(txc.stabil, &utp->stabil) || __get_user(txc.jitcnt, &utp->jitcnt) || __get_user(txc.calcnt, &utp->calcnt) || __get_user(txc.errcnt, &utp->errcnt) || __get_user(txc.stbcnt, &utp->stbcnt)) return -EFAULT; ret = do_adjtimex(&txc); if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || __put_user(txc.modes, &utp->modes) || __put_user(txc.offset, &utp->offset) || __put_user(txc.freq, &utp->freq) || __put_user(txc.maxerror, &utp->maxerror) || __put_user(txc.esterror, &utp->esterror) || __put_user(txc.status, &utp->status) || __put_user(txc.constant, &utp->constant) || __put_user(txc.precision, &utp->precision) || __put_user(txc.tolerance, &utp->tolerance) || __put_user(txc.time.tv_sec, &utp->time.tv_sec) || __put_user(txc.time.tv_usec, &utp->time.tv_usec) || __put_user(txc.tick, &utp->tick) || __put_user(txc.ppsfreq, &utp->ppsfreq) || __put_user(txc.jitter, &utp->jitter) || __put_user(txc.shift, &utp->shift) || __put_user(txc.stabil, &utp->stabil) || __put_user(txc.jitcnt, &utp->jitcnt) || __put_user(txc.calcnt, &utp->calcnt) || __put_user(txc.errcnt, &utp->errcnt) || __put_user(txc.stbcnt, &utp->stbcnt) || __put_user(txc.tai, &utp->tai)) ret = -EFAULT; return ret; } #ifdef CONFIG_NUMA asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, compat_uptr_t __user *pages32, const int __user *nodes, int __user *status, int flags) { const void __user * __user *pages; int i; pages = compat_alloc_user_space(nr_pages * sizeof(void *)); for (i = 0; i < nr_pages; i++) { compat_uptr_t p; if (get_user(p, pages32 + i) || put_user(compat_ptr(p), pages + i)) return -EFAULT; } return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); } asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, const compat_ulong_t __user *new_nodes) { unsigned long __user *old = NULL; unsigned long __user *new = NULL; nodemask_t tmp_mask; unsigned long nr_bits; unsigned long size; nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (old_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) return -EFAULT; old = compat_alloc_user_space(new_nodes ? size * 2 : size); if (new_nodes) new = old + size / sizeof(unsigned long); if (copy_to_user(old, nodes_addr(tmp_mask), size)) return -EFAULT; } if (new_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) return -EFAULT; if (new == NULL) new = compat_alloc_user_space(size); if (copy_to_user(new, nodes_addr(tmp_mask), size)) return -EFAULT; } return sys_migrate_pages(pid, nr_bits + 1, old, new); } #endif struct compat_sysinfo { s32 uptime; u32 loads[3]; u32 totalram; u32 freeram; u32 sharedram; u32 bufferram; u32 totalswap; u32 freeswap; u16 procs; u16 pad; u32 totalhigh; u32 freehigh; u32 mem_unit; char _f[20-2*sizeof(u32)-sizeof(int)]; }; asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info) { struct sysinfo s; do_sysinfo(&s); /* Check to see if any memory value is too large for 32-bit and scale * down if needed */ if ((s.totalram >> 32) || (s.totalswap >> 32)) { int bitcount = 0; while (s.mem_unit < PAGE_SIZE) { s.mem_unit <<= 1; bitcount++; } s.totalram >>= bitcount; s.freeram >>= bitcount; s.sharedram >>= bitcount; s.bufferram >>= bitcount; s.totalswap >>= bitcount; s.freeswap >>= bitcount; s.totalhigh >>= bitcount; s.freehigh >>= bitcount; } if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || __put_user (s.uptime, &info->uptime) || __put_user (s.loads[0], &info->loads[0]) || __put_user (s.loads[1], &info->loads[1]) || __put_user (s.loads[2], &info->loads[2]) || __put_user (s.totalram, &info->totalram) || __put_user (s.freeram, &info->freeram) || __put_user (s.sharedram, &info->sharedram) || __put_user (s.bufferram, &info->bufferram) || __put_user (s.totalswap, &info->totalswap) || __put_user (s.freeswap, &info->freeswap) || __put_user (s.procs, &info->procs) || __put_user (s.totalhigh, &info->totalhigh) || __put_user (s.freehigh, &info->freehigh) || __put_user (s.mem_unit, &info->mem_unit)) return -EFAULT; return 0; } /* * Allocate user-space memory for the duration of a single system call, * in order to marshall parameters inside a compat thunk. */ void __user *compat_alloc_user_space(unsigned long len) { void __user *ptr; /* If len would occupy more than half of the entire compat space... */ if (unlikely(len > (((compat_uptr_t)~0) >> 1))) return NULL; ptr = arch_compat_alloc_user_space(len); if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) return NULL; return ptr; } EXPORT_SYMBOL_GPL(compat_alloc_user_space);
gpl-2.0
zaventh/nexus7-kernel-grouper
sound/soc/codecs/twl6040.c
394
44379
/* * ALSA SoC TWL6040 codec driver * * Author: Misael Lopez Cruz <x0052729@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/i2c/twl.h> #include <linux/mfd/twl6040.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "twl6040.h" #define TWL6040_RATES SNDRV_PCM_RATE_8000_96000 #define TWL6040_FORMATS (SNDRV_PCM_FMTBIT_S32_LE) #define TWL6040_OUTHS_0dB 0x00 #define TWL6040_OUTHS_M30dB 0x0F #define TWL6040_OUTHF_0dB 0x03 #define TWL6040_OUTHF_M52dB 0x1D #define TWL6040_RAMP_NONE 0 #define TWL6040_RAMP_UP 1 #define TWL6040_RAMP_DOWN 2 #define TWL6040_HSL_VOL_MASK 0x0F #define TWL6040_HSL_VOL_SHIFT 0 #define TWL6040_HSR_VOL_MASK 0xF0 #define TWL6040_HSR_VOL_SHIFT 4 #define TWL6040_HF_VOL_MASK 0x1F #define TWL6040_HF_VOL_SHIFT 0 struct twl6040_output { u16 active; u16 left_vol; u16 right_vol; u16 left_step; u16 right_step; unsigned int step_delay; u16 ramp; u16 mute; struct completion ramp_done; }; struct twl6040_jack_data { struct snd_soc_jack *jack; int report; }; /* codec private data */ struct twl6040_data { int plug_irq; int codec_powered; int pll; int non_lp; int pll_power_mode; int hs_power_mode; int hs_power_mode_locked; unsigned int clk_in; unsigned int sysclk; u16 hs_left_step; u16 hs_right_step; u16 hf_left_step; u16 hf_right_step; struct twl6040_jack_data hs_jack; struct snd_soc_codec *codec; struct workqueue_struct *workqueue; struct delayed_work delayed_work; struct mutex mutex; struct twl6040_output headset; struct twl6040_output handsfree; struct workqueue_struct *hf_workqueue; struct workqueue_struct *hs_workqueue; struct delayed_work hs_delayed_work; struct delayed_work hf_delayed_work; }; /* * twl6040 register cache & default register settings */ static const u8 twl6040_reg[TWL6040_CACHEREGNUM] = { 0x00, /* not used 0x00 */ 0x4B, /* TWL6040_ASICID (ro) 0x01 */ 0x00, /* TWL6040_ASICREV (ro) 0x02 */ 0x00, /* TWL6040_INTID 0x03 */ 0x00, /* TWL6040_INTMR 0x04 */ 0x00, /* TWL6040_NCPCTRL 0x05 */ 0x00, /* TWL6040_LDOCTL 0x06 */ 0x60, /* TWL6040_HPPLLCTL 0x07 */ 0x00, /* TWL6040_LPPLLCTL 0x08 */ 0x4A, /* TWL6040_LPPLLDIV 0x09 */ 0x00, /* TWL6040_AMICBCTL 0x0A */ 0x00, /* TWL6040_DMICBCTL 0x0B */ 0x18, /* TWL6040_MICLCTL 0x0C - No input selected on Left Mic */ 0x18, /* TWL6040_MICRCTL 0x0D - No input selected on Right Mic */ 0x00, /* TWL6040_MICGAIN 0x0E */ 0x1B, /* TWL6040_LINEGAIN 0x0F */ 0x00, /* TWL6040_HSLCTL 0x10 */ 0x00, /* TWL6040_HSRCTL 0x11 */ 0x00, /* TWL6040_HSGAIN 0x12 */ 0x00, /* TWL6040_EARCTL 0x13 */ 0x00, /* TWL6040_HFLCTL 0x14 */ 0x00, /* TWL6040_HFLGAIN 0x15 */ 0x00, /* TWL6040_HFRCTL 0x16 */ 0x00, /* TWL6040_HFRGAIN 0x17 */ 0x00, /* TWL6040_VIBCTLL 0x18 */ 0x00, /* TWL6040_VIBDATL 0x19 */ 0x00, /* TWL6040_VIBCTLR 0x1A */ 0x00, /* TWL6040_VIBDATR 0x1B */ 0x00, /* TWL6040_HKCTL1 0x1C */ 0x00, /* TWL6040_HKCTL2 0x1D */ 0x00, /* TWL6040_GPOCTL 0x1E */ 0x00, /* TWL6040_ALB 0x1F */ 0x00, /* TWL6040_DLB 0x20 */ 0x00, /* not used 0x21 */ 0x00, /* not used 0x22 */ 0x00, /* not used 0x23 */ 0x00, /* not used 0x24 */ 0x00, /* not used 0x25 */ 0x00, /* not used 0x26 */ 0x00, /* not used 0x27 */ 0x00, /* TWL6040_TRIM1 0x28 */ 0x00, /* TWL6040_TRIM2 0x29 */ 0x00, /* TWL6040_TRIM3 0x2A */ 0x00, /* TWL6040_HSOTRIM 0x2B */ 0x00, /* TWL6040_HFOTRIM 0x2C */ 0x09, /* TWL6040_ACCCTL 0x2D */ 0x00, /* TWL6040_STATUS (ro) 0x2E */ }; /* * twl6040 vio/gnd registers: * registers under vio/gnd supply can be accessed * before the power-up sequence, after NRESPWRON goes high */ static const int twl6040_vio_reg[TWL6040_VIOREGNUM] = { TWL6040_REG_ASICID, TWL6040_REG_ASICREV, TWL6040_REG_INTID, TWL6040_REG_INTMR, TWL6040_REG_NCPCTL, TWL6040_REG_LDOCTL, TWL6040_REG_AMICBCTL, TWL6040_REG_DMICBCTL, TWL6040_REG_HKCTL1, TWL6040_REG_HKCTL2, TWL6040_REG_GPOCTL, TWL6040_REG_TRIM1, TWL6040_REG_TRIM2, TWL6040_REG_TRIM3, TWL6040_REG_HSOTRIM, TWL6040_REG_HFOTRIM, TWL6040_REG_ACCCTL, TWL6040_REG_STATUS, }; /* * twl6040 vdd/vss registers: * registers under vdd/vss supplies can only be accessed * after the power-up sequence */ static const int twl6040_vdd_reg[TWL6040_VDDREGNUM] = { TWL6040_REG_HPPLLCTL, TWL6040_REG_LPPLLCTL, TWL6040_REG_LPPLLDIV, TWL6040_REG_MICLCTL, TWL6040_REG_MICRCTL, TWL6040_REG_MICGAIN, TWL6040_REG_LINEGAIN, TWL6040_REG_HSLCTL, TWL6040_REG_HSRCTL, TWL6040_REG_HSGAIN, TWL6040_REG_EARCTL, TWL6040_REG_HFLCTL, TWL6040_REG_HFLGAIN, TWL6040_REG_HFRCTL, TWL6040_REG_HFRGAIN, TWL6040_REG_VIBCTLL, TWL6040_REG_VIBDATL, TWL6040_REG_VIBCTLR, TWL6040_REG_VIBDATR, TWL6040_REG_ALB, TWL6040_REG_DLB, }; /* set of rates for each pll: low-power and high-performance */ static unsigned int lp_rates[] = { 8000, 11250, 16000, 22500, 32000, 44100, 48000, 88200, 96000, }; static unsigned int hp_rates[] = { 8000, 16000, 32000, 48000, 96000, }; static struct snd_pcm_hw_constraint_list sysclk_constraints[] = { { .count = ARRAY_SIZE(lp_rates), .list = lp_rates, }, { .count = ARRAY_SIZE(hp_rates), .list = hp_rates, }, }; /* * read twl6040 register cache */ static inline unsigned int twl6040_read_reg_cache(struct snd_soc_codec *codec, unsigned int reg) { u8 *cache = codec->reg_cache; if (reg >= TWL6040_CACHEREGNUM) return -EIO; return cache[reg]; } /* * write twl6040 register cache */ static inline void twl6040_write_reg_cache(struct snd_soc_codec *codec, u8 reg, u8 value) { u8 *cache = codec->reg_cache; if (reg >= TWL6040_CACHEREGNUM) return; cache[reg] = value; } /* * read from twl6040 hardware register */ static int twl6040_read_reg_volatile(struct snd_soc_codec *codec, unsigned int reg) { struct twl6040 *twl6040 = codec->control_data; u8 value; if (reg >= TWL6040_CACHEREGNUM) return -EIO; value = twl6040_reg_read(twl6040, reg); twl6040_write_reg_cache(codec, reg, value); return value; } /* * write to the twl6040 register space */ static int twl6040_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct twl6040 *twl6040 = codec->control_data; if (reg >= TWL6040_CACHEREGNUM) return -EIO; twl6040_write_reg_cache(codec, reg, value); return twl6040_reg_write(twl6040, reg, value); } static void twl6040_init_vio_regs(struct snd_soc_codec *codec) { u8 *cache = codec->reg_cache; int reg, i; for (i = 0; i < TWL6040_VIOREGNUM; i++) { reg = twl6040_vio_reg[i]; /* * skip read-only registers (ASICID, ASICREV, STATUS) * and registers shared among MFD children */ switch (reg) { case TWL6040_REG_ASICID: case TWL6040_REG_ASICREV: case TWL6040_REG_INTID: case TWL6040_REG_INTMR: case TWL6040_REG_NCPCTL: case TWL6040_REG_LDOCTL: case TWL6040_REG_GPOCTL: case TWL6040_REG_ACCCTL: case TWL6040_REG_STATUS: continue; default: break; } twl6040_write(codec, reg, cache[reg]); } } static void twl6040_init_vdd_regs(struct snd_soc_codec *codec) { u8 *cache = codec->reg_cache; int reg, i; for (i = 0; i < TWL6040_VDDREGNUM; i++) { reg = twl6040_vdd_reg[i]; /* skip vibra and PLL registers */ switch (reg) { case TWL6040_REG_VIBCTLL: case TWL6040_REG_VIBDATL: case TWL6040_REG_VIBCTLR: case TWL6040_REG_VIBDATR: case TWL6040_REG_HPPLLCTL: case TWL6040_REG_LPPLLCTL: case TWL6040_REG_LPPLLDIV: continue; default: break; } twl6040_write(codec, reg, cache[reg]); } } /* * Ramp HS PGA volume to minimise pops at stream startup and shutdown. */ static inline int twl6040_hs_ramp_step(struct snd_soc_codec *codec, unsigned int left_step, unsigned int right_step) { struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); struct twl6040_output *headset = &priv->headset; int left_complete = 0, right_complete = 0; u8 reg, val; /* left channel */ left_step = (left_step > 0xF) ? 0xF : left_step; reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN); val = (~reg & TWL6040_HSL_VOL_MASK); if (headset->ramp == TWL6040_RAMP_UP) { /* ramp step up */ if (val < headset->left_vol) { if (val + left_step > headset->left_vol) val = headset->left_vol; else val += left_step; reg &= ~TWL6040_HSL_VOL_MASK; twl6040_write(codec, TWL6040_REG_HSGAIN, (reg | (~val & TWL6040_HSL_VOL_MASK))); } else { left_complete = 1; } } else if (headset->ramp == TWL6040_RAMP_DOWN) { /* ramp step down */ if (val > 0x0) { if ((int)val - (int)left_step < 0) val = 0; else val -= left_step; reg &= ~TWL6040_HSL_VOL_MASK; twl6040_write(codec, TWL6040_REG_HSGAIN, reg | (~val & TWL6040_HSL_VOL_MASK)); } else { left_complete = 1; } } /* right channel */ right_step = (right_step > 0xF) ? 0xF : right_step; reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN); val = (~reg & TWL6040_HSR_VOL_MASK) >> TWL6040_HSR_VOL_SHIFT; if (headset->ramp == TWL6040_RAMP_UP) { /* ramp step up */ if (val < headset->right_vol) { if (val + right_step > headset->right_vol) val = headset->right_vol; else val += right_step; reg &= ~TWL6040_HSR_VOL_MASK; twl6040_write(codec, TWL6040_REG_HSGAIN, (reg | (~val << TWL6040_HSR_VOL_SHIFT))); } else { right_complete = 1; } } else if (headset->ramp == TWL6040_RAMP_DOWN) { /* ramp step down */ if (val > 0x0) { if ((int)val - (int)right_step < 0) val = 0; else val -= right_step; reg &= ~TWL6040_HSR_VOL_MASK; twl6040_write(codec, TWL6040_REG_HSGAIN, reg | (~val << TWL6040_HSR_VOL_SHIFT)); } else { right_complete = 1; } } return left_complete & right_complete; } /* * Ramp HF PGA volume to minimise pops at stream startup and shutdown. */ static inline int twl6040_hf_ramp_step(struct snd_soc_codec *codec, unsigned int left_step, unsigned int right_step) { struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); struct twl6040_output *handsfree = &priv->handsfree; int left_complete = 0, right_complete = 0; u16 reg, val; /* left channel */ left_step = (left_step > 0x1D) ? 0x1D : left_step; reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFLGAIN); reg = 0x1D - reg; val = (reg & TWL6040_HF_VOL_MASK); if (handsfree->ramp == TWL6040_RAMP_UP) { /* ramp step up */ if (val < handsfree->left_vol) { if (val + left_step > handsfree->left_vol) val = handsfree->left_vol; else val += left_step; reg &= ~TWL6040_HF_VOL_MASK; twl6040_write(codec, TWL6040_REG_HFLGAIN, reg | (0x1D - val)); } else { left_complete = 1; } } else if (handsfree->ramp == TWL6040_RAMP_DOWN) { /* ramp step down */ if (val > 0) { if ((int)val - (int)left_step < 0) val = 0; else val -= left_step; reg &= ~TWL6040_HF_VOL_MASK; twl6040_write(codec, TWL6040_REG_HFLGAIN, reg | (0x1D - val)); } else { left_complete = 1; } } /* right channel */ right_step = (right_step > 0x1D) ? 0x1D : right_step; reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFRGAIN); reg = 0x1D - reg; val = (reg & TWL6040_HF_VOL_MASK); if (handsfree->ramp == TWL6040_RAMP_UP) { /* ramp step up */ if (val < handsfree->right_vol) { if (val + right_step > handsfree->right_vol) val = handsfree->right_vol; else val += right_step; reg &= ~TWL6040_HF_VOL_MASK; twl6040_write(codec, TWL6040_REG_HFRGAIN, reg | (0x1D - val)); } else { right_complete = 1; } } else if (handsfree->ramp == TWL6040_RAMP_DOWN) { /* ramp step down */ if (val > 0) { if ((int)val - (int)right_step < 0) val = 0; else val -= right_step; reg &= ~TWL6040_HF_VOL_MASK; twl6040_write(codec, TWL6040_REG_HFRGAIN, reg | (0x1D - val)); } } return left_complete & right_complete; } /* * This work ramps both output PGAs at stream start/stop time to * minimise pop associated with DAPM power switching. */ static void twl6040_pga_hs_work(struct work_struct *work) { struct twl6040_data *priv = container_of(work, struct twl6040_data, hs_delayed_work.work); struct snd_soc_codec *codec = priv->codec; struct twl6040_output *headset = &priv->headset; unsigned int delay = headset->step_delay; int i, headset_complete; /* do we need to ramp at all ? */ if (headset->ramp == TWL6040_RAMP_NONE) return; /* HS PGA volumes have 4 bits of resolution to ramp */ for (i = 0; i <= 16; i++) { headset_complete = twl6040_hs_ramp_step(codec, headset->left_step, headset->right_step); /* ramp finished ? */ if (headset_complete) break; /* * TODO: tune: delay is longer over 0dB * as increases are larger. */ if (i >= 8) schedule_timeout_interruptible(msecs_to_jiffies(delay + (delay >> 1))); else schedule_timeout_interruptible(msecs_to_jiffies(delay)); } if (headset->ramp == TWL6040_RAMP_DOWN) { headset->active = 0; complete(&headset->ramp_done); } else { headset->active = 1; } headset->ramp = TWL6040_RAMP_NONE; } static void twl6040_pga_hf_work(struct work_struct *work) { struct twl6040_data *priv = container_of(work, struct twl6040_data, hf_delayed_work.work); struct snd_soc_codec *codec = priv->codec; struct twl6040_output *handsfree = &priv->handsfree; unsigned int delay = handsfree->step_delay; int i, handsfree_complete; /* do we need to ramp at all ? */ if (handsfree->ramp == TWL6040_RAMP_NONE) return; /* HF PGA volumes have 5 bits of resolution to ramp */ for (i = 0; i <= 32; i++) { handsfree_complete = twl6040_hf_ramp_step(codec, handsfree->left_step, handsfree->right_step); /* ramp finished ? */ if (handsfree_complete) break; /* * TODO: tune: delay is longer over 0dB * as increases are larger. */ if (i >= 16) schedule_timeout_interruptible(msecs_to_jiffies(delay + (delay >> 1))); else schedule_timeout_interruptible(msecs_to_jiffies(delay)); } if (handsfree->ramp == TWL6040_RAMP_DOWN) { handsfree->active = 0; complete(&handsfree->ramp_done); } else handsfree->active = 1; handsfree->ramp = TWL6040_RAMP_NONE; } static int pga_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); struct twl6040_output *out; struct delayed_work *work; struct workqueue_struct *queue; switch (w->shift) { case 2: case 3: out = &priv->headset; work = &priv->hs_delayed_work; queue = priv->hs_workqueue; out->left_step = priv->hs_left_step; out->right_step = priv->hs_right_step; out->step_delay = 5; /* 5 ms between volume ramp steps */ break; case 4: out = &priv->handsfree; work = &priv->hf_delayed_work; queue = priv->hf_workqueue; out->left_step = priv->hf_left_step; out->right_step = priv->hf_right_step; out->step_delay = 5; /* 5 ms between volume ramp steps */ if (SND_SOC_DAPM_EVENT_ON(event)) priv->non_lp++; else priv->non_lp--; break; default: return -1; } switch (event) { case SND_SOC_DAPM_POST_PMU: if (out->active) break; /* don't use volume ramp for power-up */ out->left_step = out->left_vol; out->right_step = out->right_vol; if (!delayed_work_pending(work)) { out->ramp = TWL6040_RAMP_UP; queue_delayed_work(queue, work, msecs_to_jiffies(1)); } break; case SND_SOC_DAPM_PRE_PMD: if (!out->active) break; if (!delayed_work_pending(work)) { /* use volume ramp for power-down */ out->ramp = TWL6040_RAMP_DOWN; INIT_COMPLETION(out->ramp_done); queue_delayed_work(queue, work, msecs_to_jiffies(1)); wait_for_completion_timeout(&out->ramp_done, msecs_to_jiffies(2000)); } break; } return 0; } /* set headset dac and driver power mode */ static int headset_power_mode(struct snd_soc_codec *codec, int high_perf) { int hslctl, hsrctl; int mask = TWL6040_HSDRVMODEL | TWL6040_HSDACMODEL; hslctl = twl6040_read_reg_cache(codec, TWL6040_REG_HSLCTL); hsrctl = twl6040_read_reg_cache(codec, TWL6040_REG_HSRCTL); if (high_perf) { hslctl &= ~mask; hsrctl &= ~mask; } else { hslctl |= mask; hsrctl |= mask; } twl6040_write(codec, TWL6040_REG_HSLCTL, hslctl); twl6040_write(codec, TWL6040_REG_HSRCTL, hsrctl); return 0; } static int twl6040_hs_dac_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { msleep(1); return 0; } static int twl6040_power_mode_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); int ret = 0; if (SND_SOC_DAPM_EVENT_ON(event)) { priv->non_lp++; if (!strcmp(w->name, "Earphone Driver")) { /* Earphone doesn't support low power mode */ priv->hs_power_mode_locked = 1; ret = headset_power_mode(codec, 1); } } else { priv->non_lp--; if (!strcmp(w->name, "Earphone Driver")) { priv->hs_power_mode_locked = 0; ret = headset_power_mode(codec, priv->hs_power_mode); } } msleep(1); return ret; } static void twl6040_hs_jack_report(struct snd_soc_codec *codec, struct snd_soc_jack *jack, int report) { struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); int status; mutex_lock(&priv->mutex); /* Sync status */ status = twl6040_read_reg_volatile(codec, TWL6040_REG_STATUS); if (status & TWL6040_PLUGCOMP) snd_soc_jack_report(jack, report, report); else snd_soc_jack_report(jack, 0, report); mutex_unlock(&priv->mutex); } void twl6040_hs_jack_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, int report) { struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); struct twl6040_jack_data *hs_jack = &priv->hs_jack; hs_jack->jack = jack; hs_jack->report = report; twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report); } EXPORT_SYMBOL_GPL(twl6040_hs_jack_detect); static void twl6040_accessory_work(struct work_struct *work) { struct twl6040_data *priv = container_of(work, struct twl6040_data, delayed_work.work); struct snd_soc_codec *codec = priv->codec; struct twl6040_jack_data *hs_jack = &priv->hs_jack; twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report); } /* audio interrupt handler */ static irqreturn_t twl6040_audio_handler(int irq, void *data) { struct snd_soc_codec *codec = data; struct twl6040 *twl6040 = codec->control_data; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); u8 intid; intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID); if ((intid & TWL6040_PLUGINT) || (intid & TWL6040_UNPLUGINT)) queue_delayed_work(priv->workqueue, &priv->delayed_work, msecs_to_jiffies(200)); return IRQ_HANDLED; } static int twl6040_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); struct twl6040_output *out = NULL; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int ret; unsigned int reg = mc->reg; /* For HS and HF we shadow the values and only actually write * them out when active in order to ensure the amplifier comes on * as quietly as possible. */ switch (reg) { case TWL6040_REG_HSGAIN: out = &twl6040_priv->headset; break; default: break; } if (out) { out->left_vol = ucontrol->value.integer.value[0]; out->right_vol = ucontrol->value.integer.value[1]; if (!out->active) return 1; } ret = snd_soc_put_volsw(kcontrol, ucontrol); if (ret < 0) return ret; return 1; } static int twl6040_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); struct twl6040_output *out = &twl6040_priv->headset; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; switch (reg) { case TWL6040_REG_HSGAIN: out = &twl6040_priv->headset; ucontrol->value.integer.value[0] = out->left_vol; ucontrol->value.integer.value[1] = out->right_vol; return 0; default: break; } return snd_soc_get_volsw(kcontrol, ucontrol); } static int twl6040_put_volsw_2r_vu(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); struct twl6040_output *out = NULL; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int ret; unsigned int reg = mc->reg; /* For HS and HF we shadow the values and only actually write * them out when active in order to ensure the amplifier comes on * as quietly as possible. */ switch (reg) { case TWL6040_REG_HFLGAIN: case TWL6040_REG_HFRGAIN: out = &twl6040_priv->handsfree; break; default: break; } if (out) { out->left_vol = ucontrol->value.integer.value[0]; out->right_vol = ucontrol->value.integer.value[1]; if (!out->active) return 1; } ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); if (ret < 0) return ret; return 1; } static int twl6040_get_volsw_2r(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec); struct twl6040_output *out = &twl6040_priv->handsfree; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; /* If these are cached registers use the cache */ switch (reg) { case TWL6040_REG_HFLGAIN: case TWL6040_REG_HFRGAIN: out = &twl6040_priv->handsfree; ucontrol->value.integer.value[0] = out->left_vol; ucontrol->value.integer.value[1] = out->right_vol; return 0; default: break; } return snd_soc_get_volsw_2r(kcontrol, ucontrol); } /* double control with volume update */ #define SOC_TWL6040_DOUBLE_TLV(xname, xreg, shift_left, shift_right, xmax,\ xinvert, tlv_array)\ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ SNDRV_CTL_ELEM_ACCESS_READWRITE,\ .tlv.p = (tlv_array), \ .info = snd_soc_info_volsw, .get = twl6040_get_volsw, \ .put = twl6040_put_volsw, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ {.reg = xreg, .shift = shift_left, .rshift = shift_right,\ .max = xmax, .platform_max = xmax, .invert = xinvert} } /* double control with volume update */ #define SOC_TWL6040_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax,\ xinvert, tlv_array)\ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE | \ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ .tlv.p = (tlv_array), \ .info = snd_soc_info_volsw_2r, \ .get = twl6040_get_volsw_2r, .put = twl6040_put_volsw_2r_vu, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ {.reg = reg_left, .rreg = reg_right, .shift = xshift, \ .rshift = xshift, .max = xmax, .invert = xinvert}, } /* * MICATT volume control: * from -6 to 0 dB in 6 dB steps */ static DECLARE_TLV_DB_SCALE(mic_preamp_tlv, -600, 600, 0); /* * MICGAIN volume control: * from 6 to 30 dB in 6 dB steps */ static DECLARE_TLV_DB_SCALE(mic_amp_tlv, 600, 600, 0); /* * AFMGAIN volume control: * from -18 to 24 dB in 6 dB steps */ static DECLARE_TLV_DB_SCALE(afm_amp_tlv, -1800, 600, 0); /* * HSGAIN volume control: * from -30 to 0 dB in 2 dB steps */ static DECLARE_TLV_DB_SCALE(hs_tlv, -3000, 200, 0); /* * HFGAIN volume control: * from -52 to 6 dB in 2 dB steps */ static DECLARE_TLV_DB_SCALE(hf_tlv, -5200, 200, 0); /* * EPGAIN volume control: * from -24 to 6 dB in 2 dB steps */ static DECLARE_TLV_DB_SCALE(ep_tlv, -2400, 200, 0); /* Left analog microphone selection */ static const char *twl6040_amicl_texts[] = {"Headset Mic", "Main Mic", "Aux/FM Left", "Off"}; /* Right analog microphone selection */ static const char *twl6040_amicr_texts[] = {"Headset Mic", "Sub Mic", "Aux/FM Right", "Off"}; static const struct soc_enum twl6040_enum[] = { SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 4, twl6040_amicl_texts), SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 4, twl6040_amicr_texts), }; static const char *twl6040_hs_texts[] = { "Off", "HS DAC", "Line-In amp" }; static const struct soc_enum twl6040_hs_enum[] = { SOC_ENUM_SINGLE(TWL6040_REG_HSLCTL, 5, ARRAY_SIZE(twl6040_hs_texts), twl6040_hs_texts), SOC_ENUM_SINGLE(TWL6040_REG_HSRCTL, 5, ARRAY_SIZE(twl6040_hs_texts), twl6040_hs_texts), }; static const char *twl6040_hf_texts[] = { "Off", "HF DAC", "Line-In amp" }; static const struct soc_enum twl6040_hf_enum[] = { SOC_ENUM_SINGLE(TWL6040_REG_HFLCTL, 2, ARRAY_SIZE(twl6040_hf_texts), twl6040_hf_texts), SOC_ENUM_SINGLE(TWL6040_REG_HFRCTL, 2, ARRAY_SIZE(twl6040_hf_texts), twl6040_hf_texts), }; static const struct snd_kcontrol_new amicl_control = SOC_DAPM_ENUM("Route", twl6040_enum[0]); static const struct snd_kcontrol_new amicr_control = SOC_DAPM_ENUM("Route", twl6040_enum[1]); /* Headset DAC playback switches */ static const struct snd_kcontrol_new hsl_mux_controls = SOC_DAPM_ENUM("Route", twl6040_hs_enum[0]); static const struct snd_kcontrol_new hsr_mux_controls = SOC_DAPM_ENUM("Route", twl6040_hs_enum[1]); /* Handsfree DAC playback switches */ static const struct snd_kcontrol_new hfl_mux_controls = SOC_DAPM_ENUM("Route", twl6040_hf_enum[0]); static const struct snd_kcontrol_new hfr_mux_controls = SOC_DAPM_ENUM("Route", twl6040_hf_enum[1]); static const struct snd_kcontrol_new ep_driver_switch_controls = SOC_DAPM_SINGLE("Switch", TWL6040_REG_EARCTL, 0, 1, 0); /* Headset power mode */ static const char *twl6040_power_mode_texts[] = { "Low-Power", "High-Perfomance", }; static const struct soc_enum twl6040_power_mode_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(twl6040_power_mode_texts), twl6040_power_mode_texts); static int twl6040_headset_power_get_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); ucontrol->value.enumerated.item[0] = priv->hs_power_mode; return 0; } static int twl6040_headset_power_put_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); int high_perf = ucontrol->value.enumerated.item[0]; int ret = 0; if (!priv->hs_power_mode_locked) ret = headset_power_mode(codec, high_perf); if (!ret) priv->hs_power_mode = high_perf; return ret; } static int twl6040_pll_get_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); ucontrol->value.enumerated.item[0] = priv->pll_power_mode; return 0; } static int twl6040_pll_put_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); priv->pll_power_mode = ucontrol->value.enumerated.item[0]; return 0; } int twl6040_get_clk_id(struct snd_soc_codec *codec) { struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); return priv->pll_power_mode; } EXPORT_SYMBOL_GPL(twl6040_get_clk_id); static const struct snd_kcontrol_new twl6040_snd_controls[] = { /* Capture gains */ SOC_DOUBLE_TLV("Capture Preamplifier Volume", TWL6040_REG_MICGAIN, 6, 7, 1, 1, mic_preamp_tlv), SOC_DOUBLE_TLV("Capture Volume", TWL6040_REG_MICGAIN, 0, 3, 4, 0, mic_amp_tlv), /* AFM gains */ SOC_DOUBLE_TLV("Aux FM Volume", TWL6040_REG_LINEGAIN, 0, 3, 7, 0, afm_amp_tlv), /* Playback gains */ SOC_TWL6040_DOUBLE_TLV("Headset Playback Volume", TWL6040_REG_HSGAIN, 0, 4, 0xF, 1, hs_tlv), SOC_TWL6040_DOUBLE_R_TLV("Handsfree Playback Volume", TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1, hf_tlv), SOC_SINGLE_TLV("Earphone Playback Volume", TWL6040_REG_EARCTL, 1, 0xF, 1, ep_tlv), SOC_ENUM_EXT("Headset Power Mode", twl6040_power_mode_enum, twl6040_headset_power_get_enum, twl6040_headset_power_put_enum), SOC_ENUM_EXT("PLL Selection", twl6040_power_mode_enum, twl6040_pll_get_enum, twl6040_pll_put_enum), }; static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = { /* Inputs */ SND_SOC_DAPM_INPUT("MAINMIC"), SND_SOC_DAPM_INPUT("HSMIC"), SND_SOC_DAPM_INPUT("SUBMIC"), SND_SOC_DAPM_INPUT("AFML"), SND_SOC_DAPM_INPUT("AFMR"), /* Outputs */ SND_SOC_DAPM_OUTPUT("HSOL"), SND_SOC_DAPM_OUTPUT("HSOR"), SND_SOC_DAPM_OUTPUT("HFL"), SND_SOC_DAPM_OUTPUT("HFR"), SND_SOC_DAPM_OUTPUT("EP"), /* Analog input muxes for the capture amplifiers */ SND_SOC_DAPM_MUX("Analog Left Capture Route", SND_SOC_NOPM, 0, 0, &amicl_control), SND_SOC_DAPM_MUX("Analog Right Capture Route", SND_SOC_NOPM, 0, 0, &amicr_control), /* Analog capture PGAs */ SND_SOC_DAPM_PGA("MicAmpL", TWL6040_REG_MICLCTL, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("MicAmpR", TWL6040_REG_MICRCTL, 0, 0, NULL, 0), /* Auxiliary FM PGAs */ SND_SOC_DAPM_PGA("AFMAmpL", TWL6040_REG_MICLCTL, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("AFMAmpR", TWL6040_REG_MICRCTL, 1, 0, NULL, 0), /* ADCs */ SND_SOC_DAPM_ADC("ADC Left", "Left Front Capture", TWL6040_REG_MICLCTL, 2, 0), SND_SOC_DAPM_ADC("ADC Right", "Right Front Capture", TWL6040_REG_MICRCTL, 2, 0), /* Microphone bias */ SND_SOC_DAPM_MICBIAS("Headset Mic Bias", TWL6040_REG_AMICBCTL, 0, 0), SND_SOC_DAPM_MICBIAS("Main Mic Bias", TWL6040_REG_AMICBCTL, 4, 0), SND_SOC_DAPM_MICBIAS("Digital Mic1 Bias", TWL6040_REG_DMICBCTL, 0, 0), SND_SOC_DAPM_MICBIAS("Digital Mic2 Bias", TWL6040_REG_DMICBCTL, 4, 0), /* DACs */ SND_SOC_DAPM_DAC_E("HSDAC Left", "Headset Playback", TWL6040_REG_HSLCTL, 0, 0, twl6040_hs_dac_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_DAC_E("HSDAC Right", "Headset Playback", TWL6040_REG_HSRCTL, 0, 0, twl6040_hs_dac_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_DAC_E("HFDAC Left", "Handsfree Playback", TWL6040_REG_HFLCTL, 0, 0, twl6040_power_mode_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_DAC_E("HFDAC Right", "Handsfree Playback", TWL6040_REG_HFRCTL, 0, 0, twl6040_power_mode_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MUX("HF Left Playback", SND_SOC_NOPM, 0, 0, &hfl_mux_controls), SND_SOC_DAPM_MUX("HF Right Playback", SND_SOC_NOPM, 0, 0, &hfr_mux_controls), /* Analog playback Muxes */ SND_SOC_DAPM_MUX("HS Left Playback", SND_SOC_NOPM, 0, 0, &hsl_mux_controls), SND_SOC_DAPM_MUX("HS Right Playback", SND_SOC_NOPM, 0, 0, &hsr_mux_controls), /* Analog playback drivers */ SND_SOC_DAPM_OUT_DRV_E("Handsfree Left Driver", TWL6040_REG_HFLCTL, 4, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_OUT_DRV_E("Handsfree Right Driver", TWL6040_REG_HFRCTL, 4, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_OUT_DRV_E("Headset Left Driver", TWL6040_REG_HSLCTL, 2, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_OUT_DRV_E("Headset Right Driver", TWL6040_REG_HSRCTL, 2, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_SWITCH_E("Earphone Driver", SND_SOC_NOPM, 0, 0, &ep_driver_switch_controls, twl6040_power_mode_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* Analog playback PGAs */ SND_SOC_DAPM_PGA("HFDAC Left PGA", TWL6040_REG_HFLCTL, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("HFDAC Right PGA", TWL6040_REG_HFRCTL, 1, 0, NULL, 0), }; static const struct snd_soc_dapm_route intercon[] = { /* Capture path */ {"Analog Left Capture Route", "Headset Mic", "HSMIC"}, {"Analog Left Capture Route", "Main Mic", "MAINMIC"}, {"Analog Left Capture Route", "Aux/FM Left", "AFML"}, {"Analog Right Capture Route", "Headset Mic", "HSMIC"}, {"Analog Right Capture Route", "Sub Mic", "SUBMIC"}, {"Analog Right Capture Route", "Aux/FM Right", "AFMR"}, {"MicAmpL", NULL, "Analog Left Capture Route"}, {"MicAmpR", NULL, "Analog Right Capture Route"}, {"ADC Left", NULL, "MicAmpL"}, {"ADC Right", NULL, "MicAmpR"}, /* AFM path */ {"AFMAmpL", "NULL", "AFML"}, {"AFMAmpR", "NULL", "AFMR"}, {"HS Left Playback", "HS DAC", "HSDAC Left"}, {"HS Left Playback", "Line-In amp", "AFMAmpL"}, {"HS Right Playback", "HS DAC", "HSDAC Right"}, {"HS Right Playback", "Line-In amp", "AFMAmpR"}, {"Headset Left Driver", "NULL", "HS Left Playback"}, {"Headset Right Driver", "NULL", "HS Right Playback"}, {"HSOL", NULL, "Headset Left Driver"}, {"HSOR", NULL, "Headset Right Driver"}, /* Earphone playback path */ {"Earphone Driver", "Switch", "HSDAC Left"}, {"EP", NULL, "Earphone Driver"}, {"HF Left Playback", "HF DAC", "HFDAC Left"}, {"HF Left Playback", "Line-In amp", "AFMAmpL"}, {"HF Right Playback", "HF DAC", "HFDAC Right"}, {"HF Right Playback", "Line-In amp", "AFMAmpR"}, {"HFDAC Left PGA", NULL, "HF Left Playback"}, {"HFDAC Right PGA", NULL, "HF Right Playback"}, {"Handsfree Left Driver", "Switch", "HFDAC Left PGA"}, {"Handsfree Right Driver", "Switch", "HFDAC Right PGA"}, {"HFL", NULL, "Handsfree Left Driver"}, {"HFR", NULL, "Handsfree Right Driver"}, }; static int twl6040_add_widgets(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_new_controls(dapm, twl6040_dapm_widgets, ARRAY_SIZE(twl6040_dapm_widgets)); snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); snd_soc_dapm_new_widgets(dapm); return 0; } static int twl6040_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct twl6040 *twl6040 = codec->control_data; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); int ret; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (priv->codec_powered) break; ret = twl6040_power(twl6040, 1); if (ret) return ret; priv->codec_powered = 1; /* initialize vdd/vss registers with reg_cache */ twl6040_init_vdd_regs(codec); /* Set external boost GPO */ twl6040_write(codec, TWL6040_REG_GPOCTL, 0x02); break; case SND_SOC_BIAS_OFF: if (!priv->codec_powered) break; twl6040_power(twl6040, 0); priv->codec_powered = 0; break; } codec->dapm.bias_level = level; return 0; } static int twl6040_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &sysclk_constraints[priv->pll_power_mode]); return 0; } static int twl6040_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); int rate; rate = params_rate(params); switch (rate) { case 11250: case 22500: case 44100: case 88200: /* These rates are not supported when HPPLL is in use */ if (unlikely(priv->pll == TWL6040_SYSCLK_SEL_HPPLL)) { dev_err(codec->dev, "HPPLL does not support rate %d\n", rate); return -EINVAL; } /* Capture is not supported with 17.64MHz sysclk */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { dev_err(codec->dev, "capture mode is not supported at %dHz\n", rate); return -EINVAL; } priv->sysclk = 17640000; break; case 8000: case 16000: case 32000: case 48000: case 96000: priv->sysclk = 19200000; break; default: dev_err(codec->dev, "unsupported rate %d\n", rate); return -EINVAL; } return 0; } static int twl6040_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct twl6040 *twl6040 = codec->control_data; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); int ret; if (!priv->sysclk) { dev_err(codec->dev, "no mclk configured, call set_sysclk() on init\n"); return -EINVAL; } if ((priv->sysclk == 17640000) && priv->non_lp) { dev_err(codec->dev, "some enabled paths aren't supported at %dHz\n", priv->sysclk); return -EPERM; } ret = twl6040_set_pll(twl6040, priv->pll, priv->clk_in, priv->sysclk); if (ret) { dev_err(codec->dev, "Can not set PLL (%d)\n", ret); return -EPERM; } return 0; } static int twl6040_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); switch (clk_id) { case TWL6040_SYSCLK_SEL_LPPLL: case TWL6040_SYSCLK_SEL_HPPLL: priv->pll = clk_id; priv->clk_in = freq; break; default: dev_err(codec->dev, "unknown clk_id %d\n", clk_id); return -EINVAL; } return 0; } static struct snd_soc_dai_ops twl6040_dai_ops = { .startup = twl6040_startup, .hw_params = twl6040_hw_params, .prepare = twl6040_prepare, .set_sysclk = twl6040_set_dai_sysclk, }; static struct snd_soc_dai_driver twl6040_dai[] = { { .name = "twl6040-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = TWL6040_RATES, .formats = TWL6040_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = TWL6040_RATES, .formats = TWL6040_FORMATS, }, .ops = &twl6040_dai_ops, }, { .name = "twl6040-ul", .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = TWL6040_RATES, .formats = TWL6040_FORMATS, }, .ops = &twl6040_dai_ops, }, { .name = "twl6040-dl1", .playback = { .stream_name = "Headset Playback", .channels_min = 1, .channels_max = 2, .rates = TWL6040_RATES, .formats = TWL6040_FORMATS, }, .ops = &twl6040_dai_ops, }, { .name = "twl6040-dl2", .playback = { .stream_name = "Handsfree Playback", .channels_min = 1, .channels_max = 2, .rates = TWL6040_RATES, .formats = TWL6040_FORMATS, }, .ops = &twl6040_dai_ops, }, { .name = "twl6040-vib", .playback = { .stream_name = "Vibra Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_CONTINUOUS, .formats = TWL6040_FORMATS, }, .ops = &twl6040_dai_ops, }, }; #ifdef CONFIG_PM static int twl6040_suspend(struct snd_soc_codec *codec, pm_message_t state) { twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int twl6040_resume(struct snd_soc_codec *codec) { twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY); twl6040_set_bias_level(codec, codec->dapm.suspend_bias_level); return 0; } #else #define twl6040_suspend NULL #define twl6040_resume NULL #endif static int twl6040_probe(struct snd_soc_codec *codec) { struct twl6040_data *priv; struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); struct platform_device *pdev = container_of(codec->dev, struct platform_device, dev); int ret = 0; priv = kzalloc(sizeof(struct twl6040_data), GFP_KERNEL); if (priv == NULL) return -ENOMEM; snd_soc_codec_set_drvdata(codec, priv); priv->codec = codec; codec->control_data = dev_get_drvdata(codec->dev->parent); if (pdata && pdata->hs_left_step && pdata->hs_right_step) { priv->hs_left_step = pdata->hs_left_step; priv->hs_right_step = pdata->hs_right_step; } else { priv->hs_left_step = 1; priv->hs_right_step = 1; } if (pdata && pdata->hf_left_step && pdata->hf_right_step) { priv->hf_left_step = pdata->hf_left_step; priv->hf_right_step = pdata->hf_right_step; } else { priv->hf_left_step = 1; priv->hf_right_step = 1; } priv->plug_irq = platform_get_irq(pdev, 0); if (priv->plug_irq < 0) { dev_err(codec->dev, "invalid irq\n"); ret = -EINVAL; goto work_err; } priv->workqueue = create_singlethread_workqueue("twl6040-codec"); if (!priv->workqueue) { ret = -ENOMEM; goto work_err; } INIT_DELAYED_WORK(&priv->delayed_work, twl6040_accessory_work); mutex_init(&priv->mutex); init_completion(&priv->headset.ramp_done); init_completion(&priv->handsfree.ramp_done); priv->hf_workqueue = create_singlethread_workqueue("twl6040-hf"); if (priv->hf_workqueue == NULL) { ret = -ENOMEM; goto hfwq_err; } priv->hs_workqueue = create_singlethread_workqueue("twl6040-hs"); if (priv->hs_workqueue == NULL) { ret = -ENOMEM; goto hswq_err; } INIT_DELAYED_WORK(&priv->hs_delayed_work, twl6040_pga_hs_work); INIT_DELAYED_WORK(&priv->hf_delayed_work, twl6040_pga_hf_work); ret = request_threaded_irq(priv->plug_irq, NULL, twl6040_audio_handler, 0, "twl6040_irq_plug", codec); if (ret) { dev_err(codec->dev, "PLUG IRQ request failed: %d\n", ret); goto plugirq_err; } /* init vio registers */ twl6040_init_vio_regs(codec); /* power on device */ ret = twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY); if (ret) goto bias_err; snd_soc_add_controls(codec, twl6040_snd_controls, ARRAY_SIZE(twl6040_snd_controls)); twl6040_add_widgets(codec); return 0; bias_err: free_irq(priv->plug_irq, codec); plugirq_err: destroy_workqueue(priv->hs_workqueue); hswq_err: destroy_workqueue(priv->hf_workqueue); hfwq_err: destroy_workqueue(priv->workqueue); work_err: kfree(priv); return ret; } static int twl6040_remove(struct snd_soc_codec *codec) { struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec); twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF); free_irq(priv->plug_irq, codec); destroy_workqueue(priv->workqueue); destroy_workqueue(priv->hf_workqueue); destroy_workqueue(priv->hs_workqueue); kfree(priv); return 0; } static struct snd_soc_codec_driver soc_codec_dev_twl6040 = { .probe = twl6040_probe, .remove = twl6040_remove, .suspend = twl6040_suspend, .resume = twl6040_resume, .read = twl6040_read_reg_cache, .write = twl6040_write, .set_bias_level = twl6040_set_bias_level, .reg_cache_size = ARRAY_SIZE(twl6040_reg), .reg_word_size = sizeof(u8), .reg_cache_default = twl6040_reg, }; static int __devinit twl6040_codec_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_twl6040, twl6040_dai, ARRAY_SIZE(twl6040_dai)); } static int __devexit twl6040_codec_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver twl6040_codec_driver = { .driver = { .name = "twl6040-codec", .owner = THIS_MODULE, }, .probe = twl6040_codec_probe, .remove = __devexit_p(twl6040_codec_remove), }; static int __init twl6040_codec_init(void) { return platform_driver_register(&twl6040_codec_driver); } module_init(twl6040_codec_init); static void __exit twl6040_codec_exit(void) { platform_driver_unregister(&twl6040_codec_driver); } module_exit(twl6040_codec_exit); MODULE_DESCRIPTION("ASoC TWL6040 codec driver"); MODULE_AUTHOR("Misael Lopez Cruz"); MODULE_LICENSE("GPL");
gpl-2.0
piccolo-dev/android_kernel_bq_piccolo
drivers/soc/qcom/msm_rq_stats.c
394
10025
/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/hrtimer.h> #include <linux/cpu.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/rq_stats.h> #include <linux/cpufreq.h> #include <linux/kernel_stat.h> #include <linux/tick.h> #include <asm/smp_plat.h> #include <linux/suspend.h> #define MAX_LONG_SIZE 24 #define DEFAULT_RQ_POLL_JIFFIES 1 #define DEFAULT_DEF_TIMER_JIFFIES 5 struct notifier_block freq_transition; struct notifier_block cpu_hotplug; struct cpu_load_data { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; unsigned int avg_load_maxfreq; unsigned int samples; unsigned int window_size; unsigned int cur_freq; unsigned int policy_max; cpumask_var_t related_cpus; struct mutex cpu_load_mutex; }; static DEFINE_PER_CPU(struct cpu_load_data, cpuload); static int update_average_load(unsigned int freq, unsigned int cpu) { struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu); cputime64_t cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; unsigned int cur_load, load_at_max_freq; cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0); wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall); pcpu->prev_cpu_wall = cur_wall_time; idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle); pcpu->prev_cpu_idle = cur_idle_time; if (unlikely(wall_time <= 0 || wall_time < idle_time)) return 0; cur_load = 100 * (wall_time - idle_time) / wall_time; /* Calculate the scaled load across CPU */ load_at_max_freq = (cur_load * freq) / pcpu->policy_max; if (!pcpu->avg_load_maxfreq) { /* This is the first sample in this window*/ pcpu->avg_load_maxfreq = load_at_max_freq; pcpu->window_size = wall_time; } else { /* * The is already a sample available in this window. * Compute weighted average with prev entry, so that we get * the precise weighted load. */ pcpu->avg_load_maxfreq = ((pcpu->avg_load_maxfreq * pcpu->window_size) + (load_at_max_freq * wall_time)) / (wall_time + pcpu->window_size); pcpu->window_size += wall_time; } return 0; } static unsigned int report_load_at_max_freq(void) { int cpu; struct cpu_load_data *pcpu; unsigned int total_load = 0; for_each_online_cpu(cpu) { pcpu = &per_cpu(cpuload, cpu); mutex_lock(&pcpu->cpu_load_mutex); update_average_load(pcpu->cur_freq, cpu); total_load += pcpu->avg_load_maxfreq; pcpu->avg_load_maxfreq = 0; mutex_unlock(&pcpu->cpu_load_mutex); } return total_load; } static int cpufreq_transition_handler(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freqs = data; struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu); int j; switch (val) { case CPUFREQ_POSTCHANGE: for_each_cpu(j, this_cpu->related_cpus) { struct cpu_load_data *pcpu = &per_cpu(cpuload, j); mutex_lock(&pcpu->cpu_load_mutex); update_average_load(freqs->old, j); pcpu->cur_freq = freqs->new; mutex_unlock(&pcpu->cpu_load_mutex); } break; } return 0; } static void update_related_cpus(void) { unsigned cpu; for_each_cpu(cpu, cpu_online_mask) { struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu); struct cpufreq_policy cpu_policy; cpufreq_get_policy(&cpu_policy, cpu); cpumask_copy(this_cpu->related_cpus, cpu_policy.cpus); } } static int cpu_hotplug_handler(struct notifier_block *nb, unsigned long val, void *data) { unsigned int cpu = (unsigned long)data; struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu); switch (val) { case CPU_ONLINE: if (!this_cpu->cur_freq) this_cpu->cur_freq = cpufreq_quick_get(cpu); update_related_cpus(); case CPU_ONLINE_FROZEN: this_cpu->avg_load_maxfreq = 0; } return NOTIFY_OK; } static int system_suspend_handler(struct notifier_block *nb, unsigned long val, void *data) { switch (val) { case PM_POST_HIBERNATION: case PM_POST_SUSPEND: case PM_POST_RESTORE: rq_info.hotplug_disabled = 0; break; case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: rq_info.hotplug_disabled = 1; break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static ssize_t hotplug_disable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int val = 0; val = rq_info.hotplug_disabled; return snprintf(buf, MAX_LONG_SIZE, "%d\n", val); } static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable); static void def_work_fn(struct work_struct *work) { int64_t diff; diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time; do_div(diff, 1000 * 1000); rq_info.def_interval = (unsigned int) diff; /* Notify polling threads on change of value */ sysfs_notify(rq_info.kobj, NULL, "def_timer_ms"); } static ssize_t run_queue_avg_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int val = 0; unsigned long flags = 0; spin_lock_irqsave(&rq_lock, flags); /* rq avg currently available only on one core */ val = rq_info.rq_avg; rq_info.rq_avg = 0; spin_unlock_irqrestore(&rq_lock, flags); return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10); } static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg); static ssize_t show_run_queue_poll_ms(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int ret = 0; unsigned long flags = 0; spin_lock_irqsave(&rq_lock, flags); ret = snprintf(buf, MAX_LONG_SIZE, "%u\n", jiffies_to_msecs(rq_info.rq_poll_jiffies)); spin_unlock_irqrestore(&rq_lock, flags); return ret; } static ssize_t store_run_queue_poll_ms(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int val = 0; unsigned long flags = 0; static DEFINE_MUTEX(lock_poll_ms); mutex_lock(&lock_poll_ms); spin_lock_irqsave(&rq_lock, flags); sscanf(buf, "%u", &val); rq_info.rq_poll_jiffies = msecs_to_jiffies(val); spin_unlock_irqrestore(&rq_lock, flags); mutex_unlock(&lock_poll_ms); return count; } static struct kobj_attribute run_queue_poll_ms_attr = __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms, store_run_queue_poll_ms); static ssize_t show_def_timer_ms(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval); } static ssize_t store_def_timer_ms(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int val = 0; sscanf(buf, "%u", &val); rq_info.def_timer_jiffies = msecs_to_jiffies(val); rq_info.def_start_time = ktime_to_ns(ktime_get()); return count; } static struct kobj_attribute def_timer_ms_attr = __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms, store_def_timer_ms); static ssize_t show_cpu_normalized_load(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq()); } static struct kobj_attribute cpu_normalized_load_attr = __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load, NULL); static struct attribute *rq_attrs[] = { &cpu_normalized_load_attr.attr, &def_timer_ms_attr.attr, &run_queue_avg_attr.attr, &run_queue_poll_ms_attr.attr, &hotplug_disabled_attr.attr, NULL, }; static struct attribute_group rq_attr_group = { .attrs = rq_attrs, }; static int init_rq_attribs(void) { int err; rq_info.rq_avg = 0; rq_info.attr_group = &rq_attr_group; /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */ rq_info.kobj = kobject_create_and_add("rq-stats", &get_cpu_device(0)->kobj); if (!rq_info.kobj) return -ENOMEM; err = sysfs_create_group(rq_info.kobj, rq_info.attr_group); if (err) kobject_put(rq_info.kobj); else kobject_uevent(rq_info.kobj, KOBJ_ADD); return err; } static int __init msm_rq_stats_init(void) { int ret; int i; struct cpufreq_policy cpu_policy; #ifndef CONFIG_SMP /* Bail out if this is not an SMP Target */ rq_info.init = 0; return -ENOSYS; #endif rq_wq = create_singlethread_workqueue("rq_stats"); BUG_ON(!rq_wq); INIT_WORK(&rq_info.def_timer_work, def_work_fn); spin_lock_init(&rq_lock); rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES; rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES; rq_info.rq_poll_last_jiffy = 0; rq_info.def_timer_last_jiffy = 0; rq_info.hotplug_disabled = 0; ret = init_rq_attribs(); rq_info.init = 1; for_each_possible_cpu(i) { struct cpu_load_data *pcpu = &per_cpu(cpuload, i); mutex_init(&pcpu->cpu_load_mutex); cpufreq_get_policy(&cpu_policy, i); pcpu->policy_max = cpu_policy.cpuinfo.max_freq; if (cpu_online(i)) pcpu->cur_freq = cpufreq_quick_get(i); cpumask_copy(pcpu->related_cpus, cpu_policy.cpus); } freq_transition.notifier_call = cpufreq_transition_handler; cpu_hotplug.notifier_call = cpu_hotplug_handler; cpufreq_register_notifier(&freq_transition, CPUFREQ_TRANSITION_NOTIFIER); register_hotcpu_notifier(&cpu_hotplug); return ret; } late_initcall(msm_rq_stats_init); static int __init msm_rq_stats_early_init(void) { #ifndef CONFIG_SMP /* Bail out if this is not an SMP Target */ rq_info.init = 0; return -ENOSYS; #endif pm_notifier(system_suspend_handler, 0); return 0; } core_initcall(msm_rq_stats_early_init);
gpl-2.0
timduru/tf101-katkernel
drivers/net/tulip/media.c
1162
17038
/* drivers/net/tulip/media.c Copyright 2000,2001 The Linux Kernel Team Written/copyright 1994-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} for more information on this driver. Please submit bugs to http://bugzilla.kernel.org/ . */ #include <linux/kernel.h> #include <linux/mii.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include "tulip.h" /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues or future 66Mhz PCI. */ #define mdio_delay() ioread32(mdio_addr) /* Read and write the MII registers using software-generated serial MDIO protocol. It is just different enough from the EEPROM protocol to not share code. The maxium data clock rate is 2.5 Mhz. */ #define MDIO_SHIFT_CLK 0x10000 #define MDIO_DATA_WRITE0 0x00000 #define MDIO_DATA_WRITE1 0x20000 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */ #define MDIO_ENB_IN 0x40000 #define MDIO_DATA_READ 0x80000 static const unsigned char comet_miireg2offset[32] = { 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0, 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, }; /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions") or DP83840A data sheet for more details. */ int tulip_mdio_read(struct net_device *dev, int phy_id, int location) { struct tulip_private *tp = netdev_priv(dev); int i; int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location; int retval = 0; void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return 0xffff; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) return ioread32(ioaddr + comet_miireg2offset[location]); return 0xffff; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0); ioread32(ioaddr + 0xA0); ioread32(ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return retval & 0xffff; } /* Establish sync by sending at least 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); return (retval>>1) & 0xffff; } void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) { struct tulip_private *tp = netdev_priv(dev); int i; int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff); void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) iowrite32(val, ioaddr + comet_miireg2offset[location]); return; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(cmd, ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return; } /* Establish sync by sending 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); } /* Set up the transceiver control registers for the selected media type. */ void tulip_select_media(struct net_device *dev, int startup) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; struct mediatable *mtable = tp->mtable; u32 new_csr6; int i; if (mtable) { struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index]; unsigned char *p = mleaf->leafdata; switch (mleaf->type) { case 0: /* 21140 non-MII xcvr. */ if (tulip_debug > 1) printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n", dev->name, p[1]); dev->if_port = p[0]; if (startup) iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); iowrite32(p[1], ioaddr + CSR12); new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18); break; case 2: case 4: { u16 setup[5]; u32 csr13val, csr14val, csr15dir, csr15val; for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); dev->if_port = p[0] & MEDIA_MASK; if (tulip_media_cap[dev->if_port] & MediaAlwaysFD) tp->full_duplex = 1; if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) printk(KERN_DEBUG "%s: Resetting the transceiver\n", dev->name); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } if (tulip_debug > 1) printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n", dev->name, medianame[dev->if_port], setup[0], setup[1]); if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ csr13val = setup[0]; csr14val = setup[1]; csr15dir = (setup[3]<<16) | setup[2]; csr15val = (setup[4]<<16) | setup[2]; iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ iowrite32(csr13val, ioaddr + CSR13); } else { csr13val = 1; csr14val = 0; csr15dir = (setup[0]<<16) | 0x0008; csr15val = (setup[1]<<16) | 0x0008; if (dev->if_port <= 4) csr14val = t21142_csr14[dev->if_port]; if (startup) { iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); } iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ if (startup) iowrite32(csr13val, ioaddr + CSR13); } if (tulip_debug > 1) printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n", dev->name, csr15dir, csr15val); if (mleaf->type == 4) new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); else new_csr6 = 0x82420000; break; } case 1: case 3: { int phy_num = p[0]; int init_length = p[1]; u16 *misc_info, tmp_info; dev->if_port = 11; new_csr6 = 0x020E0000; if (mleaf->type == 3) { /* 21142 */ u16 *init_sequence = (u16*)(p+2); u16 *reset_sequence = &((u16*)(p+3))[init_length]; int reset_length = p[2 + init_length*2]; misc_info = reset_sequence + reset_length; if (startup) { int timeout = 10; /* max 1 ms */ for (i = 0; i < reset_length; i++) iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15); /* flush posted writes */ ioread32(ioaddr + CSR15); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15); ioread32(ioaddr + CSR15); /* flush posted writes */ } else { u8 *init_sequence = p + 2; u8 *reset_sequence = p + 3 + init_length; int reset_length = p[2 + init_length]; misc_info = (u16*)(reset_sequence + reset_length); if (startup) { int timeout = 10; /* max 1 ms */ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); for (i = 0; i < reset_length; i++) iowrite32(reset_sequence[i], ioaddr + CSR12); /* flush posted writes */ ioread32(ioaddr + CSR12); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(init_sequence[i], ioaddr + CSR12); ioread32(ioaddr + CSR12); /* flush posted writes */ } tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; if (tmp_info && startup < 2) { if (tp->mii_advertise == 0) tp->mii_advertise = tp->advertising[phy_num]; if (tulip_debug > 1) printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n", dev->name, tp->mii_advertise, tp->phys[phy_num]); tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); } break; } case 5: case 6: { u16 setup[5]; new_csr6 = 0; /* FIXME */ for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) printk(KERN_DEBUG "%s: Resetting the transceiver\n", dev->name); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } break; } default: printk(KERN_DEBUG "%s: Invalid media table selection %d\n", dev->name, mleaf->type); new_csr6 = 0x020E0000; } if (tulip_debug > 1) printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n", dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR12) & 0xff); } else if (tp->chip_id == LC82C168) { if (startup && ! tp->medialock) dev->if_port = tp->mii_cnt ? 11 : 0; if (tulip_debug > 1) printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n", dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]); if (tp->mii_cnt) { new_csr6 = 0x810C0000; iowrite32(0x0001, ioaddr + CSR15); iowrite32(0x0201B07A, ioaddr + 0xB8); } else if (startup) { /* Start with 10mbps to do autonegotiation. */ iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x0001B078, ioaddr + 0xB8); iowrite32(0x0201B078, ioaddr + 0xB8); } else if (dev->if_port == 3 || dev->if_port == 5) { iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; /* Trigger autonegotiation. */ iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); } else { iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x1F078, ioaddr + 0xB8); } } else { /* Unknown chip type with no media table. */ if (tp->default_port == 0) dev->if_port = tp->mii_cnt ? 11 : 3; if (tulip_media_cap[dev->if_port] & MediaIsMII) { new_csr6 = 0x020E0000; } else if (tulip_media_cap[dev->if_port] & MediaIsFx) { new_csr6 = 0x02860000; } else new_csr6 = 0x03860000; if (tulip_debug > 1) printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n", dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR12)); } tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); mdelay(1); } /* Check the MII negotiated duplex and change the CSR6 setting if required. Return 0 if everything is OK. Return < 0 if the transceiver is missing or has no link beat. */ int tulip_check_duplex(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); unsigned int bmsr, lpa, negotiated, new_csr6; bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); if (tulip_debug > 1) dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n", bmsr, lpa); if (bmsr == 0xffff) return -2; if ((bmsr & BMSR_LSTATUS) == 0) { int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); if ((new_bmsr & BMSR_LSTATUS) == 0) { if (tulip_debug > 1) dev_info(&dev->dev, "No link beat on the MII interface, status %04x\n", new_bmsr); return -1; } } negotiated = lpa & tp->advertising[0]; tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated); new_csr6 = tp->csr6; if (negotiated & LPA_100) new_csr6 &= ~TxThreshold; else new_csr6 |= TxThreshold; if (tp->full_duplex) new_csr6 |= FullDuplex; else new_csr6 &= ~FullDuplex; if (new_csr6 != tp->csr6) { tp->csr6 = new_csr6; tulip_restart_rxtx(tp); if (tulip_debug > 0) dev_info(&dev->dev, "Setting %s-duplex based on MII#%d link partner capability of %04x\n", tp->full_duplex ? "full" : "half", tp->phys[0], lpa); return 1; } return 0; } void __devinit tulip_find_mii (struct net_device *dev, int board_idx) { struct tulip_private *tp = netdev_priv(dev); int phyn, phy_idx = 0; int mii_reg0; int mii_advert; unsigned int to_advert, new_bmcr, ane_switch; /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but takes much time. */ for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) { int phy = phyn & 0x1f; int mii_status = tulip_mdio_read (dev, phy, MII_BMSR); if ((mii_status & 0x8301) == 0x8001 || ((mii_status & BMSR_100BASE4) == 0 && (mii_status & 0x7800) != 0)) { /* preserve Becker logic, gain indentation level */ } else { continue; } mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR); mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE); ane_switch = 0; /* if not advertising at all, gen an * advertising value from the capability * bits in BMSR */ if ((mii_advert & ADVERTISE_ALL) == 0) { unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR); mii_advert = ((tmpadv >> 6) & 0x3e0) | 1; } if (tp->mii_advertise) { tp->advertising[phy_idx] = to_advert = tp->mii_advertise; } else if (tp->advertising[phy_idx]) { to_advert = tp->advertising[phy_idx]; } else { tp->advertising[phy_idx] = tp->mii_advertise = to_advert = mii_advert; } tp->phys[phy_idx++] = phy; pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n", board_idx, phy, mii_reg0, mii_status, mii_advert); /* Fixup for DLink with miswired PHY. */ if (mii_advert != to_advert) { printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", board_idx, to_advert, phy, mii_advert); tulip_mdio_write (dev, phy, 4, to_advert); } /* Enable autonegotiation: some boards default to off. */ if (tp->default_port == 0) { new_bmcr = mii_reg0 | BMCR_ANENABLE; if (new_bmcr != mii_reg0) { new_bmcr |= BMCR_ANRESTART; ane_switch = 1; } } /* ...or disable nway, if forcing media */ else { new_bmcr = mii_reg0 & ~BMCR_ANENABLE; if (new_bmcr != mii_reg0) ane_switch = 1; } /* clear out bits we never want at this point */ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE | BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK | BMCR_RESET); if (tp->full_duplex) new_bmcr |= BMCR_FULLDPLX; if (tulip_media_cap[tp->default_port] & MediaIs100) new_bmcr |= BMCR_SPEED100; if (new_bmcr != mii_reg0) { /* some phys need the ANE switch to * happen before forced media settings * will "take." However, we write the * same value twice in order not to * confuse the sane phys. */ if (ane_switch) { tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); udelay (10); } tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); } } tp->mii_cnt = phy_idx; if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n", board_idx); tp->phys[0] = 1; } }
gpl-2.0
XePeleato/android_ALE-L21_kernel
arch/mips/kernel/asm-offsets.c
1930
13458
/* * offset.c: Calculate pt_regs and task_struct offsets. * * Copyright (C) 1996 David S. Miller * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. */ #include <linux/compat.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/kbuild.h> #include <linux/suspend.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <linux/kvm_host.h> void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); OFFSET(PT_R0, pt_regs, regs[0]); OFFSET(PT_R1, pt_regs, regs[1]); OFFSET(PT_R2, pt_regs, regs[2]); OFFSET(PT_R3, pt_regs, regs[3]); OFFSET(PT_R4, pt_regs, regs[4]); OFFSET(PT_R5, pt_regs, regs[5]); OFFSET(PT_R6, pt_regs, regs[6]); OFFSET(PT_R7, pt_regs, regs[7]); OFFSET(PT_R8, pt_regs, regs[8]); OFFSET(PT_R9, pt_regs, regs[9]); OFFSET(PT_R10, pt_regs, regs[10]); OFFSET(PT_R11, pt_regs, regs[11]); OFFSET(PT_R12, pt_regs, regs[12]); OFFSET(PT_R13, pt_regs, regs[13]); OFFSET(PT_R14, pt_regs, regs[14]); OFFSET(PT_R15, pt_regs, regs[15]); OFFSET(PT_R16, pt_regs, regs[16]); OFFSET(PT_R17, pt_regs, regs[17]); OFFSET(PT_R18, pt_regs, regs[18]); OFFSET(PT_R19, pt_regs, regs[19]); OFFSET(PT_R20, pt_regs, regs[20]); OFFSET(PT_R21, pt_regs, regs[21]); OFFSET(PT_R22, pt_regs, regs[22]); OFFSET(PT_R23, pt_regs, regs[23]); OFFSET(PT_R24, pt_regs, regs[24]); OFFSET(PT_R25, pt_regs, regs[25]); OFFSET(PT_R26, pt_regs, regs[26]); OFFSET(PT_R27, pt_regs, regs[27]); OFFSET(PT_R28, pt_regs, regs[28]); OFFSET(PT_R29, pt_regs, regs[29]); OFFSET(PT_R30, pt_regs, regs[30]); OFFSET(PT_R31, pt_regs, regs[31]); OFFSET(PT_LO, pt_regs, lo); OFFSET(PT_HI, pt_regs, hi); #ifdef CONFIG_CPU_HAS_SMARTMIPS OFFSET(PT_ACX, pt_regs, acx); #endif OFFSET(PT_EPC, pt_regs, cp0_epc); OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); OFFSET(PT_STATUS, pt_regs, cp0_status); OFFSET(PT_CAUSE, pt_regs, cp0_cause); #ifdef CONFIG_MIPS_MT_SMTC OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); #endif /* CONFIG_MIPS_MT_SMTC */ #ifdef CONFIG_CPU_CAVIUM_OCTEON OFFSET(PT_MPL, pt_regs, mpl); OFFSET(PT_MTP, pt_regs, mtp); #endif /* CONFIG_CPU_CAVIUM_OCTEON */ DEFINE(PT_SIZE, sizeof(struct pt_regs)); BLANK(); } void output_task_defines(void) { COMMENT("MIPS task_struct offsets."); OFFSET(TASK_STATE, task_struct, state); OFFSET(TASK_THREAD_INFO, task_struct, stack); OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); BLANK(); } void output_thread_info_defines(void) { COMMENT("MIPS thread_info offsets."); OFFSET(TI_TASK, thread_info, task); OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); BLANK(); } void output_thread_defines(void) { COMMENT("MIPS specific thread_struct offsets."); OFFSET(THREAD_REG16, task_struct, thread.reg16); OFFSET(THREAD_REG17, task_struct, thread.reg17); OFFSET(THREAD_REG18, task_struct, thread.reg18); OFFSET(THREAD_REG19, task_struct, thread.reg19); OFFSET(THREAD_REG20, task_struct, thread.reg20); OFFSET(THREAD_REG21, task_struct, thread.reg21); OFFSET(THREAD_REG22, task_struct, thread.reg22); OFFSET(THREAD_REG23, task_struct, thread.reg23); OFFSET(THREAD_REG29, task_struct, thread.reg29); OFFSET(THREAD_REG30, task_struct, thread.reg30); OFFSET(THREAD_REG31, task_struct, thread.reg31); OFFSET(THREAD_STATUS, task_struct, thread.cp0_status); OFFSET(THREAD_FPU, task_struct, thread.fpu); OFFSET(THREAD_BVADDR, task_struct, \ thread.cp0_badvaddr); OFFSET(THREAD_BUADDR, task_struct, \ thread.cp0_baduaddr); OFFSET(THREAD_ECODE, task_struct, \ thread.error_code); BLANK(); } void output_thread_fpu_defines(void) { OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); BLANK(); } void output_mm_defines(void) { COMMENT("Size of struct page"); DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); BLANK(); COMMENT("Linux mm_struct offsets."); OFFSET(MM_USERS, mm_struct, mm_users); OFFSET(MM_PGD, mm_struct, pgd); OFFSET(MM_CONTEXT, mm_struct, context); BLANK(); DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); DEFINE(_PTE_T_SIZE, sizeof(pte_t)); BLANK(); DEFINE(_PGD_T_LOG2, PGD_T_LOG2); #ifndef __PAGETABLE_PMD_FOLDED DEFINE(_PMD_T_LOG2, PMD_T_LOG2); #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); #ifndef __PAGETABLE_PMD_FOLDED DEFINE(_PMD_ORDER, PMD_ORDER); #endif DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); BLANK(); DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); BLANK(); DEFINE(_PAGE_SHIFT, PAGE_SHIFT); DEFINE(_PAGE_SIZE, PAGE_SIZE); BLANK(); } #ifdef CONFIG_32BIT void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); OFFSET(SC_REGS, sigcontext, sc_regs); OFFSET(SC_FPREGS, sigcontext, sc_fpregs); OFFSET(SC_ACX, sigcontext, sc_acx); OFFSET(SC_MDHI, sigcontext, sc_mdhi); OFFSET(SC_MDLO, sigcontext, sc_mdlo); OFFSET(SC_PC, sigcontext, sc_pc); OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir); OFFSET(SC_HI1, sigcontext, sc_hi1); OFFSET(SC_LO1, sigcontext, sc_lo1); OFFSET(SC_HI2, sigcontext, sc_hi2); OFFSET(SC_LO2, sigcontext, sc_lo2); OFFSET(SC_HI3, sigcontext, sc_hi3); OFFSET(SC_LO3, sigcontext, sc_lo3); BLANK(); } #endif #ifdef CONFIG_64BIT void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); OFFSET(SC_REGS, sigcontext, sc_regs); OFFSET(SC_FPREGS, sigcontext, sc_fpregs); OFFSET(SC_MDHI, sigcontext, sc_mdhi); OFFSET(SC_MDLO, sigcontext, sc_mdlo); OFFSET(SC_PC, sigcontext, sc_pc); OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); BLANK(); } #endif #ifdef CONFIG_MIPS32_COMPAT void output_sc32_defines(void) { COMMENT("Linux 32-bit sigcontext offsets."); OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); BLANK(); } #endif void output_signal_defined(void) { COMMENT("Linux signal numbers."); DEFINE(_SIGHUP, SIGHUP); DEFINE(_SIGINT, SIGINT); DEFINE(_SIGQUIT, SIGQUIT); DEFINE(_SIGILL, SIGILL); DEFINE(_SIGTRAP, SIGTRAP); DEFINE(_SIGIOT, SIGIOT); DEFINE(_SIGABRT, SIGABRT); DEFINE(_SIGEMT, SIGEMT); DEFINE(_SIGFPE, SIGFPE); DEFINE(_SIGKILL, SIGKILL); DEFINE(_SIGBUS, SIGBUS); DEFINE(_SIGSEGV, SIGSEGV); DEFINE(_SIGSYS, SIGSYS); DEFINE(_SIGPIPE, SIGPIPE); DEFINE(_SIGALRM, SIGALRM); DEFINE(_SIGTERM, SIGTERM); DEFINE(_SIGUSR1, SIGUSR1); DEFINE(_SIGUSR2, SIGUSR2); DEFINE(_SIGCHLD, SIGCHLD); DEFINE(_SIGPWR, SIGPWR); DEFINE(_SIGWINCH, SIGWINCH); DEFINE(_SIGURG, SIGURG); DEFINE(_SIGIO, SIGIO); DEFINE(_SIGSTOP, SIGSTOP); DEFINE(_SIGTSTP, SIGTSTP); DEFINE(_SIGCONT, SIGCONT); DEFINE(_SIGTTIN, SIGTTIN); DEFINE(_SIGTTOU, SIGTTOU); DEFINE(_SIGVTALRM, SIGVTALRM); DEFINE(_SIGPROF, SIGPROF); DEFINE(_SIGXCPU, SIGXCPU); DEFINE(_SIGXFSZ, SIGXFSZ); BLANK(); } #ifdef CONFIG_CPU_CAVIUM_OCTEON void output_octeon_cop2_state_defines(void) { COMMENT("Octeon specific octeon_cop2_state offsets."); OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv); OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length); OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly); OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat); OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv); OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key); OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result); OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0); OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv); OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key); OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen); OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result); OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult); OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly); OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); OFFSET(THREAD_CP2, task_struct, thread.cp2); OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); BLANK(); } #endif #ifdef CONFIG_HIBERNATION void output_pbe_defines(void) { COMMENT(" Linux struct pbe offsets. "); OFFSET(PBE_ADDRESS, pbe, address); OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); OFFSET(PBE_NEXT, pbe, next); DEFINE(PBE_SIZE, sizeof(struct pbe)); BLANK(); } #endif void output_kvm_defines(void) { COMMENT(" KVM/MIPS Specfic offsets. "); DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch)); OFFSET(VCPU_RUN, kvm_vcpu, run); OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch); OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase); OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase); OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack); OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp); OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr); OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause); OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc); OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi); OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst); OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]); OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]); OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]); OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]); OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]); OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]); OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]); OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]); OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]); OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]); OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]); OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]); OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]); OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]); OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]); OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]); OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]); OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]); OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]); OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]); OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]); OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]); OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]); OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]); OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]); OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]); OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]); OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]); OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]); OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]); OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]); OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]); OFFSET(VCPU_LO, kvm_vcpu_arch, lo); OFFSET(VCPU_HI, kvm_vcpu_arch, hi); OFFSET(VCPU_PC, kvm_vcpu_arch, pc); OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]); OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]); BLANK(); }
gpl-2.0
ardatdat/a500-kernel
arch/blackfin/kernel/reboot.c
1930
2695
/* * arch/blackfin/kernel/reboot.c - handle shutdown/reboot * * Copyright 2004-2007 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <asm/bfin-global.h> #include <asm/reboot.h> #include <asm/system.h> #include <asm/bfrom.h> /* A system soft reset makes external memory unusable so force * this function into L1. We use the compiler ssync here rather * than SSYNC() because it's safe (no interrupts and such) and * we save some L1. We do not need to force sanity in the SYSCR * register as the BMODE selection bit is cleared by the soft * reset while the Core B bit (on dual core parts) is cleared by * the core reset. */ __attribute__ ((__l1_text__, __noreturn__)) static void bfin_reset(void) { /* Wait for completion of "system" events such as cache line * line fills so that we avoid infinite stalls later on as * much as possible. This code is in L1, so it won't trigger * any such event after this point in time. */ __builtin_bfin_ssync(); /* The bootrom checks to see how it was reset and will * automatically perform a software reset for us when * it starts executing after the core reset. */ if (ANOMALY_05000353 || ANOMALY_05000386) { /* Initiate System software reset. */ bfin_write_SWRST(0x7); /* Due to the way reset is handled in the hardware, we need * to delay for 10 SCLKS. The only reliable way to do this is * to calculate the CCLK/SCLK ratio and multiply 10. For now, * we'll assume worse case which is a 1:15 ratio. */ asm( "LSETUP (1f, 1f) LC0 = %0\n" "1: nop;" : : "a" (15 * 10) : "LC0", "LB0", "LT0" ); /* Clear System software reset */ bfin_write_SWRST(0); /* The BF526 ROM will crash during reset */ #if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__) bfin_read_SWRST(); #endif /* Wait for the SWRST write to complete. Cannot rely on SSYNC * though as the System state is all reset now. */ asm( "LSETUP (1f, 1f) LC1 = %0\n" "1: nop;" : : "a" (15 * 1) : "LC1", "LB1", "LT1" ); } while (1) /* Issue core reset */ asm("raise 1"); } __attribute__((weak)) void native_machine_restart(char *cmd) { } void machine_restart(char *cmd) { native_machine_restart(cmd); local_irq_disable(); if (smp_processor_id()) smp_call_function((void *)bfin_reset, 0, 1); else bfin_reset(); } __attribute__((weak)) void native_machine_halt(void) { idle_with_irq_disabled(); } void machine_halt(void) { native_machine_halt(); } __attribute__((weak)) void native_machine_power_off(void) { idle_with_irq_disabled(); } void machine_power_off(void) { native_machine_power_off(); }
gpl-2.0
gchild320/kernel_lge_g3
drivers/video/msm/mdp4_overlay_lcdc.c
2186
23873
/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <linux/delay.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/fb.h> #include "mdp.h" #include "msm_fb.h" #include "mdp4.h" #ifdef CONFIG_FB_MSM_MDP40 #define LCDC_BASE 0xC0000 #else #define LCDC_BASE 0xE0000 #endif int first_pixel_start_x; int first_pixel_start_y; static int lcdc_enabled; #define MAX_CONTROLLER 1 static struct vsycn_ctrl { struct device *dev; int inited; int update_ndx; int ov_koff; int ov_done; atomic_t suspend; atomic_t vsync_resume; int wait_vsync_cnt; int blt_change; int blt_free; int sysfs_created; struct mutex update_lock; struct completion ov_comp; struct completion dmap_comp; struct completion vsync_comp; spinlock_t spin_lock; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *base_pipe; struct vsync_update vlist[2]; int vsync_irq_enabled; ktime_t vsync_time; } vsync_ctrl_db[MAX_CONTROLLER]; /******************************************************* to do: 1) move vsync_irq_enable/vsync_irq_disable to mdp.c to be shared *******************************************************/ static void vsync_irq_enable(int intr, int term) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask |= intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(term); spin_unlock_irqrestore(&mdp_spin_lock, flag); pr_debug("%s: IRQ-en done, term=%x\n", __func__, term); } static void vsync_irq_disable(int intr, int term) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask &= ~intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_disable_irq_nosync(term); spin_unlock_irqrestore(&mdp_spin_lock, flag); pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term); } static void mdp4_overlay_lcdc_start(void) { if (!lcdc_enabled) { /* enable DSI block */ mdp4_iommu_attach(); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE); MDP_OUTP(MDP_BASE + LCDC_BASE, 1); lcdc_enabled = 1; } } /* * mdp4_lcdc_pipe_queue: * called from thread context */ void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe) { struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pp; int undx; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */ pr_debug("%s: vndx=%d pipe_ndx=%d pid=%d\n", __func__, undx, pipe->pipe_ndx, current->pid); *pp = *pipe; /* clone it */ vp->update_cnt++; mutex_unlock(&vctrl->update_lock); mdp4_stat.overlay_play[pipe->mixer_num]++; } static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe); static void mdp4_lcdc_wait4dmap(int cndx); static void mdp4_lcdc_wait4ov(int cndx); int mdp4_lcdc_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } return cnt; } void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; if (enable) vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); else vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); if (vctrl->vsync_irq_enabled && atomic_read(&vctrl->suspend) == 0) atomic_set(&vctrl->vsync_resume, 1); } void mdp4_lcdc_wait4vsync(int cndx, long long *vtime) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (atomic_read(&vctrl->suspend) > 0) { *vtime = -1; return; } /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->wait_vsync_cnt == 0) INIT_COMPLETION(vctrl->vsync_comp); vctrl->wait_vsync_cnt++; spin_unlock_irqrestore(&vctrl->spin_lock, flags); wait_for_completion(&vctrl->vsync_comp); mdp4_stat.wait4vsync0++; *vtime = vctrl->vsync_time.tv64; } static void mdp4_lcdc_wait4dmap(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; wait_for_completion(&vctrl->dmap_comp); } static void mdp4_lcdc_wait4ov(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; if (atomic_read(&vctrl->suspend) > 0) return; wait_for_completion(&vctrl->ov_comp); } ssize_t mdp4_lcdc_show_event(struct device *dev, struct device_attribute *attr, char *buf) { int cndx; struct vsycn_ctrl *vctrl; ssize_t ret = 0; unsigned long flags; u64 vsync_tick; cndx = 0; vctrl = &vsync_ctrl_db[0]; if (atomic_read(&vctrl->suspend) > 0 || atomic_read(&vctrl->vsync_resume) == 0) return 0; spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->wait_vsync_cnt == 0) INIT_COMPLETION(vctrl->vsync_comp); vctrl->wait_vsync_cnt++; spin_unlock_irqrestore(&vctrl->spin_lock, flags); ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp, msecs_to_jiffies(VSYNC_PERIOD * 4)); if (ret <= 0) { vctrl->wait_vsync_cnt = 0; vsync_tick = ktime_to_ns(ktime_get()); ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; return ret; } spin_lock_irqsave(&vctrl->spin_lock, flags); vsync_tick = ktime_to_ns(vctrl->vsync_time); spin_unlock_irqrestore(&vctrl->spin_lock, flags); ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick); buf[strlen(buf) + 1] = '\0'; return ret; } void mdp4_lcdc_vsync_init(int cndx) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } pr_info("%s: ndx=%d\n", __func__, cndx); vctrl = &vsync_ctrl_db[cndx]; if (vctrl->inited) return; vctrl->inited = 1; vctrl->update_ndx = 0; mutex_init(&vctrl->update_lock); init_completion(&vctrl->vsync_comp); init_completion(&vctrl->dmap_comp); init_completion(&vctrl->ov_comp); atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 1); spin_lock_init(&vctrl->spin_lock); } void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe) { struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; vctrl->base_pipe = pipe; } int mdp4_lcdc_on(struct platform_device *pdev) { int lcdc_width; int lcdc_height; int lcdc_bpp; int lcdc_border_clr; int lcdc_underflow_clr; int lcdc_hsync_skew; int hsync_period; int hsync_ctrl; int vsync_period; int display_hctl; int display_v_start; int display_v_end; int active_hctl; int active_h_start; int active_h_end; int active_v_start; int active_v_end; int ctrl_polarity; int h_back_porch; int h_front_porch; int v_back_porch; int v_front_porch; int hsync_pulse_width; int vsync_pulse_width; int hsync_polarity; int vsync_polarity; int data_en_polarity; int hsync_start_x; int hsync_end_x; uint8 *buf; unsigned int buf_offset; int bpp, ptype; struct fb_info *fbi; struct fb_var_screeninfo *var; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *pipe; int ret = 0; int cndx = 0; struct vsycn_ctrl *vctrl; vctrl = &vsync_ctrl_db[cndx]; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; vctrl->mfd = mfd; vctrl->dev = mfd->fbi->dev; /* mdp clock on */ mdp_clk_ctrl(1); fbi = mfd->fbi; var = &fbi->var; bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (vctrl->base_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); if (ptype < 0) printk(KERN_INFO "%s: format2type failed\n", __func__); pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0); if (pipe == NULL) printk(KERN_INFO "%s: pipe_alloc failed\n", __func__); pipe->pipe_used++; pipe->mixer_stage = MDP4_MIXER_STAGE_BASE; pipe->mixer_num = MDP4_MIXER0; pipe->src_format = mfd->fb_imgType; mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_LCDC); ret = mdp4_overlay_format2pipe(pipe); if (ret < 0) printk(KERN_INFO "%s: format2pipe failed\n", __func__); mdp4_init_writeback_buf(mfd, MDP4_MIXER0); pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; vctrl->base_pipe = pipe; /* keep it */ } else { pipe = vctrl->base_pipe; } pipe->src_height = fbi->var.yres; pipe->src_width = fbi->var.xres; pipe->src_h = fbi->var.yres; pipe->src_w = fbi->var.xres; pipe->src_y = 0; pipe->src_x = 0; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); pipe->srcp0_ystride = fbi->fix.line_length; pipe->bpp = bpp; mdp4_overlay_mdp_pipe_req(pipe, mfd); mdp4_calc_blt_mdp_bw(mfd, pipe); atomic_set(&vctrl->suspend, 0); mdp4_overlay_dmap_xy(pipe); mdp4_overlay_dmap_cfg(mfd, 1); mdp4_overlay_rgb_setup(pipe); mdp4_overlayproc_cfg(pipe); mdp4_overlay_reg_flush(pipe, 1); mdp4_mixer_stage_up(pipe, 0); /* * LCDC timing setting */ h_back_porch = var->left_margin; h_front_porch = var->right_margin; v_back_porch = var->upper_margin; v_front_porch = var->lower_margin; hsync_pulse_width = var->hsync_len; vsync_pulse_width = var->vsync_len; lcdc_border_clr = mfd->panel_info.lcdc.border_clr; lcdc_underflow_clr = mfd->panel_info.lcdc.underflow_clr; lcdc_hsync_skew = mfd->panel_info.lcdc.hsync_skew; lcdc_width = var->xres + mfd->panel_info.lcdc.xres_pad; lcdc_height = var->yres + mfd->panel_info.lcdc.yres_pad; lcdc_bpp = mfd->panel_info.bpp; hsync_period = hsync_pulse_width + h_back_porch + h_front_porch; if ((mfd->panel_info.type == LVDS_PANEL) && (mfd->panel_info.lvds.channel_mode == LVDS_DUAL_CHANNEL_MODE)) hsync_period += lcdc_width / 2; else hsync_period += lcdc_width; hsync_ctrl = (hsync_period << 16) | hsync_pulse_width; hsync_start_x = hsync_pulse_width + h_back_porch; hsync_end_x = hsync_period - h_front_porch - 1; display_hctl = (hsync_end_x << 16) | hsync_start_x; vsync_period = (vsync_pulse_width + v_back_porch + lcdc_height + v_front_porch) * hsync_period; display_v_start = (vsync_pulse_width + v_back_porch) * hsync_period + lcdc_hsync_skew; display_v_end = vsync_period - (v_front_porch * hsync_period) + lcdc_hsync_skew - 1; if (lcdc_width != var->xres) { active_h_start = hsync_start_x + first_pixel_start_x; active_h_end = active_h_start + var->xres - 1; active_hctl = ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start; } else { active_hctl = 0; } if (lcdc_height != var->yres) { active_v_start = display_v_start + first_pixel_start_y * hsync_period; active_v_end = active_v_start + (var->yres) * hsync_period - 1; active_v_start |= ACTIVE_START_Y_EN; } else { active_v_start = 0; active_v_end = 0; } #ifdef CONFIG_FB_MSM_MDP40 if (mfd->panel_info.lcdc.is_sync_active_high) { hsync_polarity = 0; vsync_polarity = 0; } else { hsync_polarity = 1; vsync_polarity = 1; } lcdc_underflow_clr |= 0x80000000; /* enable recovery */ #else hsync_polarity = 0; vsync_polarity = 0; #endif data_en_polarity = 0; ctrl_polarity = (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period); MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x10, display_hctl); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x14, display_v_start); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x18, display_v_end); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x28, lcdc_border_clr); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x2c, lcdc_underflow_clr); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x30, lcdc_hsync_skew); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x38, ctrl_polarity); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start); MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_histogram_ctrl_all(TRUE); return ret; } int mdp4_lcdc_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; unsigned long flags; int undx, need_wait = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ complete_all(&vctrl->vsync_comp); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_lcdc_wait4ov(0); } mdp_histogram_ctrl_all(FALSE); MDP_OUTP(MDP_BASE + LCDC_BASE, 0); lcdc_enabled = 0; if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } /* MDP clock disable */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); return ret; } static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe) { uint32 off, addr; int bpp; char *overlay_base; if (pipe->ov_blt_addr == 0) return; #ifdef BLT_RGB565 bpp = 2; /* overlay ouput is RGB565 */ #else bpp = 3; /* overlay ouput is RGB888 */ #endif off = 0; if (pipe->ov_cnt & 0x01) off = pipe->src_height * pipe->src_width * bpp; addr = pipe->ov_blt_addr + off; /* overlay 0 */ overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */ outpdw(overlay_base + 0x000c, addr); outpdw(overlay_base + 0x001c, addr); } static void mdp4_lcdc_blt_dmap_update(struct mdp4_overlay_pipe *pipe) { uint32 off, addr; int bpp; if (pipe->ov_blt_addr == 0) return; #ifdef BLT_RGB565 bpp = 2; /* overlay ouput is RGB565 */ #else bpp = 3; /* overlay ouput is RGB888 */ #endif off = 0; if (pipe->dmap_cnt & 0x01) off = pipe->src_height * pipe->src_width * bpp; addr = pipe->dma_blt_addr + off; /* dmap */ MDP_OUTP(MDP_BASE + 0x90008, addr); } /* * mdp4_primary_vsync_lcdc: called from isr */ void mdp4_primary_vsync_lcdc(void) { int cndx; struct vsycn_ctrl *vctrl; cndx = 0; vctrl = &vsync_ctrl_db[cndx]; pr_debug("%s: cpu=%d\n", __func__, smp_processor_id()); spin_lock(&vctrl->spin_lock); vctrl->vsync_time = ktime_get(); if (vctrl->wait_vsync_cnt) { complete_all(&vctrl->vsync_comp); vctrl->wait_vsync_cnt = 0; } spin_unlock(&vctrl->spin_lock); } /* * mdp4_dma_p_done_lcdc: called from isr */ void mdp4_dmap_done_lcdc(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) return; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); if (vctrl->blt_change) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; /* Prefill one frame */ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); /* kickoff overlay0 engine */ mdp4_stat.kickoff_ov0++; vctrl->ov_koff++; /* make up for prefill */ outpdw(MDP_BASE + 0x0004, 0); } vctrl->blt_change = 0; } complete_all(&vctrl->dmap_comp); if (mdp_rev <= MDP_REV_41) mdp4_mixer_blend_cfg(MDP4_MIXER0); mdp4_overlay_dma_commit(cndx); spin_unlock(&vctrl->spin_lock); } /* * mdp4_overlay0_done_lcdc: called from isr */ void mdp4_overlay0_done_lcdc(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) return; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } mdp4_lcdc_blt_dmap_update(pipe); pipe->dmap_cnt++; spin_unlock(&vctrl->spin_lock); } static void mdp4_lcdc_do_blt(struct msm_fb_data_type *mfd, int enable) { unsigned long flag; int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0); if (mfd->ov0_wb_buf->write_addr == 0) { pr_info("%s: no blt_base assigned\n", __func__); return; } spin_lock_irqsave(&vctrl->spin_lock, flag); if (enable && pipe->ov_blt_addr == 0) { pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr; pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr; pipe->ov_cnt = 0; pipe->dmap_cnt = 0; vctrl->ov_koff = 0; vctrl->ov_done = 0; vctrl->blt_free = 0; mdp4_stat.blt_lcdc++; vctrl->blt_change++; } else if (enable == 0 && pipe->ov_blt_addr) { pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; vctrl->blt_free = 4; /* 4 commits to free wb buf */ vctrl->blt_change++; } pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__, vctrl->blt_change, enable, (int)pipe->ov_blt_addr); if (!vctrl->blt_change) { spin_unlock_irqrestore(&vctrl->spin_lock, flag); return; } spin_unlock_irqrestore(&vctrl->spin_lock, flag); } void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req) { mdp4_lcdc_do_blt(mfd, req->enable); } void mdp4_lcdc_overlay_blt_start(struct msm_fb_data_type *mfd) { mdp4_lcdc_do_blt(mfd, 1); } void mdp4_lcdc_overlay_blt_stop(struct msm_fb_data_type *mfd) { mdp4_lcdc_do_blt(mfd, 0); } void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_lcdc_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = mdp4_lcdc_pipe_commit(cndx, 0); if (cnt) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
gpl-2.0
londbell/ZTE_U988S_JellyBean-4.2.2-Kernel-3.4.35
drivers/net/ethernet/sfc/siena_sriov.c
3466
45705
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2010-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/pci.h> #include <linux/module.h> #include "net_driver.h" #include "efx.h" #include "nic.h" #include "io.h" #include "mcdi.h" #include "filter.h" #include "mcdi_pcol.h" #include "regs.h" #include "vfdi.h" /* Number of longs required to track all the VIs in a VF */ #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) /** * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour * @VF_TX_FILTER_OFF: Disabled * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only * 2 TX queues allowed per VF. * @VF_TX_FILTER_ON: Enabled */ enum efx_vf_tx_filter_mode { VF_TX_FILTER_OFF, VF_TX_FILTER_AUTO, VF_TX_FILTER_ON, }; /** * struct efx_vf - Back-end resource and protocol state for a PCI VF * @efx: The Efx NIC owning this VF * @pci_rid: The PCI requester ID for this VF * @pci_name: The PCI name (formatted address) of this VF * @index: Index of VF within its port and PF. * @req: VFDI incoming request work item. Incoming USR_EV events are received * by the NAPI handler, but must be handled by executing MCDI requests * inside a work item. * @req_addr: VFDI incoming request DMA address (in VF's PCI address space). * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member. * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member. * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by * @status_lock * @busy: VFDI request queued to be processed or being processed. Receiving * a VFDI request when @busy is set is an error condition. * @buf: Incoming VFDI requests are DMA from the VF into this buffer. * @buftbl_base: Buffer table entries for this VF start at this index. * @rx_filtering: Receive filtering has been requested by the VF driver. * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request. * @rx_filter_qid: VF relative qid for RX filter requested by VF. * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported. * @tx_filter_mode: Transmit MAC filtering mode. * @tx_filter_id: Transmit MAC filter ID. * @addr: The MAC address and outer vlan tag of the VF. * @status_addr: VF DMA address of page for &struct vfdi_status updates. * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, * @peer_page_addrs and @peer_page_count from simultaneous * updates by the VM and consumption by * efx_sriov_update_vf_addr() * @peer_page_addrs: Pointer to an array of guest pages for local addresses. * @peer_page_count: Number of entries in @peer_page_count. * @evq0_addrs: Array of guest pages backing evq0. * @evq0_count: Number of entries in @evq0_addrs. * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler * to wait for flush completions. * @txq_lock: Mutex for TX queue allocation. * @txq_mask: Mask of initialized transmit queues. * @txq_count: Number of initialized transmit queues. * @rxq_mask: Mask of initialized receive queues. * @rxq_count: Number of initialized receive queues. * @rxq_retry_mask: Mask or receive queues that need to be flushed again * due to flush failure. * @rxq_retry_count: Number of receive queues in @rxq_retry_mask. * @reset_work: Work item to schedule a VF reset. */ struct efx_vf { struct efx_nic *efx; unsigned int pci_rid; char pci_name[13]; /* dddd:bb:dd.f */ unsigned int index; struct work_struct req; u64 req_addr; int req_type; unsigned req_seqno; unsigned msg_seqno; bool busy; struct efx_buffer buf; unsigned buftbl_base; bool rx_filtering; enum efx_filter_flags rx_filter_flags; unsigned rx_filter_qid; int rx_filter_id; enum efx_vf_tx_filter_mode tx_filter_mode; int tx_filter_id; struct vfdi_endpoint addr; u64 status_addr; struct mutex status_lock; u64 *peer_page_addrs; unsigned peer_page_count; u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE]; unsigned evq0_count; wait_queue_head_t flush_waitq; struct mutex txq_lock; unsigned long txq_mask[VI_MASK_LENGTH]; unsigned txq_count; unsigned long rxq_mask[VI_MASK_LENGTH]; unsigned rxq_count; unsigned long rxq_retry_mask[VI_MASK_LENGTH]; atomic_t rxq_retry_count; struct work_struct reset_work; }; struct efx_memcpy_req { unsigned int from_rid; void *from_buf; u64 from_addr; unsigned int to_rid; u64 to_addr; unsigned length; }; /** * struct efx_local_addr - A MAC address on the vswitch without a VF. * * Siena does not have a switch, so VFs can't transmit data to each * other. Instead the VFs must be made aware of the local addresses * on the vswitch, so that they can arrange for an alternative * software datapath to be used. * * @link: List head for insertion into efx->local_addr_list. * @addr: Ethernet address */ struct efx_local_addr { struct list_head link; u8 addr[ETH_ALEN]; }; /** * struct efx_endpoint_page - Page of vfdi_endpoint structures * * @link: List head for insertion into efx->local_page_list. * @ptr: Pointer to page. * @addr: DMA address of page. */ struct efx_endpoint_page { struct list_head link; void *ptr; dma_addr_t addr; }; /* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */ #define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \ ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid)) #define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \ (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) #define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \ (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) #define EFX_FIELD_MASK(_field) \ ((1 << _field ## _WIDTH) - 1) /* VFs can only use this many transmit channels */ static unsigned int vf_max_tx_channels = 2; module_param(vf_max_tx_channels, uint, 0444); MODULE_PARM_DESC(vf_max_tx_channels, "Limit the number of TX channels VFs can use"); static int max_vfs = -1; module_param(max_vfs, int, 0444); MODULE_PARM_DESC(max_vfs, "Reduce the number of VFs initialized by the driver"); /* Workqueue used by VFDI communication. We can't use the global * workqueue because it may be running the VF driver's probe() * routine, which will be blocked there waiting for a VFDI response. */ static struct workqueue_struct *vfdi_workqueue; static unsigned abs_index(struct efx_vf *vf, unsigned index) { return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; } static int efx_sriov_cmd(struct efx_nic *efx, bool enable, unsigned *vi_scale_out, unsigned *vf_total_out) { u8 inbuf[MC_CMD_SRIOV_IN_LEN]; u8 outbuf[MC_CMD_SRIOV_OUT_LEN]; unsigned vi_scale, vf_total; size_t outlen; int rc; MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0); MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE); MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count); rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN, outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen); if (rc) return rc; if (outlen < MC_CMD_SRIOV_OUT_LEN) return -EIO; vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL); vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE); if (vi_scale > EFX_VI_SCALE_MAX) return -EOPNOTSUPP; if (vi_scale_out) *vi_scale_out = vi_scale; if (vf_total_out) *vf_total_out = vf_total; return 0; } static void efx_sriov_usrev(struct efx_nic *efx, bool enabled) { efx_oword_t reg; EFX_POPULATE_OWORD_2(reg, FRF_CZ_USREV_DIS, enabled ? 0 : 1, FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel); efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG); } static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, unsigned int count) { u8 *inbuf, *record; unsigned int used; u32 from_rid, from_hi, from_lo; int rc; mb(); /* Finish writing source/reading dest before DMA starts */ used = MC_CMD_MEMCPY_IN_LEN(count); if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX)) return -ENOBUFS; /* Allocate room for the largest request */ inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL); if (inbuf == NULL) return -ENOMEM; record = inbuf; MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count); while (count-- > 0) { MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, req->to_rid); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO, (u32)req->to_addr); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI, (u32)(req->to_addr >> 32)); if (req->from_buf == NULL) { from_rid = req->from_rid; from_lo = (u32)req->from_addr; from_hi = (u32)(req->from_addr >> 32); } else { if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) { rc = -ENOBUFS; goto out; } from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; from_lo = used; from_hi = 0; memcpy(inbuf + used, req->from_buf, req->length); used += req->length; } MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO, from_lo); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI, from_hi); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, req->length); ++req; record += MC_CMD_MEMCPY_IN_RECORD_LEN; } rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); out: kfree(inbuf); mb(); /* Don't write source/read dest before DMA is complete */ return rc; } /* The TX filter is entirely controlled by this driver, and is modified * underneath the feet of the VF */ static void efx_sriov_reset_tx_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; u16 vlan; int rc; if (vf->tx_filter_id != -1) { efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, vf->tx_filter_id); netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n", vf->pci_name, vf->tx_filter_id); vf->tx_filter_id = -1; } if (is_zero_ether_addr(vf->addr.mac_addr)) return; /* Turn on TX filtering automatically if not explicitly * enabled or disabled. */ if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) vf->tx_filter_mode = VF_TX_FILTER_ON; vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; efx_filter_init_tx(&filter, abs_index(vf, 0)); rc = efx_filter_set_eth_local(&filter, vlan ? vlan : EFX_FILTER_VID_UNSPEC, vf->addr.mac_addr); BUG_ON(rc); rc = efx_filter_insert_filter(efx, &filter, true); if (rc < 0) { netif_warn(efx, hw, efx->net_dev, "Unable to migrate tx filter for vf %s\n", vf->pci_name); } else { netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n", vf->pci_name, rc); vf->tx_filter_id = rc; } } /* The RX filter is managed here on behalf of the VF driver */ static void efx_sriov_reset_rx_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; u16 vlan; int rc; if (vf->rx_filter_id != -1) { efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, vf->rx_filter_id); netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n", vf->pci_name, vf->rx_filter_id); vf->rx_filter_id = -1; } if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr)) return; vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED, vf->rx_filter_flags, abs_index(vf, vf->rx_filter_qid)); rc = efx_filter_set_eth_local(&filter, vlan ? vlan : EFX_FILTER_VID_UNSPEC, vf->addr.mac_addr); BUG_ON(rc); rc = efx_filter_insert_filter(efx, &filter, true); if (rc < 0) { netif_warn(efx, hw, efx->net_dev, "Unable to insert rx filter for vf %s\n", vf->pci_name); } else { netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n", vf->pci_name, rc); vf->rx_filter_id = rc; } } static void __efx_sriov_update_vf_addr(struct efx_vf *vf) { efx_sriov_reset_tx_filter(vf); efx_sriov_reset_rx_filter(vf); queue_work(vfdi_workqueue, &vf->efx->peer_work); } /* Push the peer list to this VF. The caller must hold status_lock to interlock * with VFDI requests, and they must be serialised against manipulation of * local_page_list, either by acquiring local_lock or by running from * efx_sriov_peer_work() */ static void __efx_sriov_push_vf_status(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_status *status = efx->vfdi_status.addr; struct efx_memcpy_req copy[4]; struct efx_endpoint_page *epp; unsigned int pos, count; unsigned data_offset; efx_qword_t event; WARN_ON(!mutex_is_locked(&vf->status_lock)); WARN_ON(!vf->status_addr); status->local = vf->addr; status->generation_end = ++status->generation_start; memset(copy, '\0', sizeof(copy)); /* Write generation_start */ copy[0].from_buf = &status->generation_start; copy[0].to_rid = vf->pci_rid; copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status, generation_start); copy[0].length = sizeof(status->generation_start); /* DMA the rest of the structure (excluding the generations). This * assumes that the non-generation portion of vfdi_status is in * one chunk starting at the version member. */ data_offset = offsetof(struct vfdi_status, version); copy[1].from_rid = efx->pci_dev->devfn; copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset; copy[1].to_rid = vf->pci_rid; copy[1].to_addr = vf->status_addr + data_offset; copy[1].length = status->length - data_offset; /* Copy the peer pages */ pos = 2; count = 0; list_for_each_entry(epp, &efx->local_page_list, link) { if (count == vf->peer_page_count) { /* The VF driver will know they need to provide more * pages because peer_addr_count is too large. */ break; } copy[pos].from_buf = NULL; copy[pos].from_rid = efx->pci_dev->devfn; copy[pos].from_addr = epp->addr; copy[pos].to_rid = vf->pci_rid; copy[pos].to_addr = vf->peer_page_addrs[count]; copy[pos].length = EFX_PAGE_SIZE; if (++pos == ARRAY_SIZE(copy)) { efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); pos = 0; } ++count; } /* Write generation_end */ copy[pos].from_buf = &status->generation_end; copy[pos].to_rid = vf->pci_rid; copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, generation_end); copy[pos].length = sizeof(status->generation_end); efx_sriov_memcpy(efx, copy, pos + 1); /* Notify the guest */ EFX_POPULATE_QWORD_3(event, FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, VFDI_EV_SEQ, (vf->msg_seqno & 0xff), VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); ++vf->msg_seqno; efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx), &event); } static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, u64 *addr, unsigned count) { efx_qword_t buf; unsigned pos; for (pos = 0; pos < count; ++pos) { EFX_POPULATE_QWORD_3(buf, FRF_AZ_BUF_ADR_REGION, 0, FRF_AZ_BUF_ADR_FBUF, addr ? addr[pos] >> 12 : 0, FRF_AZ_BUF_OWNER_ID_FBUF, 0); efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL, &buf, offset + pos); } } static bool bad_vf_index(struct efx_nic *efx, unsigned index) { return index >= efx_vf_size(efx); } static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count) { unsigned max_buf_count = max_entry_count * sizeof(efx_qword_t) / EFX_BUF_SIZE; return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count); } /* Check that VI specified by per-port index belongs to a VF. * Optionally set VF index and VI index within the VF. */ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index, struct efx_vf **vf_out, unsigned *rel_index_out) { unsigned vf_i; if (abs_index < EFX_VI_BASE) return true; vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx); if (vf_i >= efx->vf_init_count) return true; if (vf_out) *vf_out = efx->vf + vf_i; if (rel_index_out) *rel_index_out = abs_index % efx_vf_size(efx); return false; } static int efx_vfdi_init_evq(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; unsigned vf_evq = req->u.init_evq.index; unsigned buf_count = req->u.init_evq.buf_count; unsigned abs_evq = abs_index(vf, vf_evq); unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq); efx_oword_t reg; if (bad_vf_index(efx, vf_evq) || bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n", vf->pci_name, vf_evq, buf_count); return VFDI_RC_EINVAL; } efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); EFX_POPULATE_OWORD_3(reg, FRF_CZ_TIMER_Q_EN, 1, FRF_CZ_HOST_NOTIFY_MODE, 0, FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq); EFX_POPULATE_OWORD_3(reg, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, __ffs(buf_count), FRF_AZ_EVQ_BUF_BASE_ID, buftbl); efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq); if (vf_evq == 0) { memcpy(vf->evq0_addrs, req->u.init_evq.addr, buf_count * sizeof(u64)); vf->evq0_count = buf_count; } return VFDI_RC_SUCCESS; } static int efx_vfdi_init_rxq(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; unsigned vf_rxq = req->u.init_rxq.index; unsigned vf_evq = req->u.init_rxq.evq; unsigned buf_count = req->u.init_rxq.buf_count; unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq); unsigned label; efx_oword_t reg; if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d " "buf_count %d\n", vf->pci_name, vf_rxq, vf_evq, buf_count); return VFDI_RC_EINVAL; } if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) ++vf->rxq_count; efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); EFX_POPULATE_OWORD_6(reg, FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl, FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), FRF_AZ_RX_DESCQ_LABEL, label, FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count), FRF_AZ_RX_DESCQ_JUMBO, !!(req->u.init_rxq.flags & VFDI_RXQ_FLAG_SCATTER_EN), FRF_AZ_RX_DESCQ_EN, 1); efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL, abs_index(vf, vf_rxq)); return VFDI_RC_SUCCESS; } static int efx_vfdi_init_txq(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; unsigned vf_txq = req->u.init_txq.index; unsigned vf_evq = req->u.init_txq.evq; unsigned buf_count = req->u.init_txq.buf_count; unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq); unsigned label, eth_filt_en; efx_oword_t reg; if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) || vf_txq >= vf_max_tx_channels || bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d " "buf_count %d\n", vf->pci_name, vf_txq, vf_evq, buf_count); return VFDI_RC_EINVAL; } mutex_lock(&vf->txq_lock); if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) ++vf->txq_count; mutex_unlock(&vf->txq_lock); efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL); EFX_POPULATE_OWORD_8(reg, FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U), FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en, FRF_AZ_TX_DESCQ_EN, 1, FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl, FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), FRF_AZ_TX_DESCQ_LABEL, label, FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count), FRF_BZ_TX_NON_IP_DROP_DIS, 1); efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL, abs_index(vf, vf_txq)); return VFDI_RC_SUCCESS; } /* Returns true when efx_vfdi_fini_all_queues should wake */ static bool efx_vfdi_flush_wake(struct efx_vf *vf) { /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */ smp_mb(); return (!vf->txq_count && !vf->rxq_count) || atomic_read(&vf->rxq_retry_count); } static void efx_vfdi_flush_clear(struct efx_vf *vf) { memset(vf->txq_mask, 0, sizeof(vf->txq_mask)); vf->txq_count = 0; memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask)); vf->rxq_count = 0; memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask)); atomic_set(&vf->rxq_retry_count, 0); } static int efx_vfdi_fini_all_queues(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; efx_oword_t reg; unsigned count = efx_vf_size(efx); unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); unsigned timeout = HZ; unsigned index, rxqs_count; __le32 *rxqs; int rc; rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL); if (rxqs == NULL) return VFDI_RC_ENOMEM; rtnl_lock(); if (efx->fc_disable++ == 0) efx_mcdi_set_mac(efx); rtnl_unlock(); /* Flush all the initialized queues */ rxqs_count = 0; for (index = 0; index < count; ++index) { if (test_bit(index, vf->txq_mask)) { EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, FRF_AZ_TX_FLUSH_DESCQ, vf_offset + index); efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ); } if (test_bit(index, vf->rxq_mask)) rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index); } atomic_set(&vf->rxq_retry_count, 0); while (timeout && (vf->rxq_count || vf->txq_count)) { rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs, rxqs_count * sizeof(*rxqs), NULL, 0, NULL); WARN_ON(rc < 0); timeout = wait_event_timeout(vf->flush_waitq, efx_vfdi_flush_wake(vf), timeout); rxqs_count = 0; for (index = 0; index < count; ++index) { if (test_and_clear_bit(index, vf->rxq_retry_mask)) { atomic_dec(&vf->rxq_retry_count); rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index); } } } rtnl_lock(); if (--efx->fc_disable == 0) efx_mcdi_set_mac(efx); rtnl_unlock(); /* Irrespective of success/failure, fini the queues */ EFX_ZERO_OWORD(reg); for (index = 0; index < count; ++index) { efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL, vf_offset + index); efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL, vf_offset + index); efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, vf_offset + index); efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, vf_offset + index); } efx_sriov_bufs(efx, vf->buftbl_base, NULL, EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); kfree(rxqs); efx_vfdi_flush_clear(vf); vf->evq0_count = 0; return timeout ? 0 : VFDI_RC_ETIMEDOUT; } static int efx_vfdi_insert_filter(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; unsigned vf_rxq = req->u.mac_filter.rxq; unsigned flags; if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Invalid INSERT_FILTER from %s: rxq %d " "flags 0x%x\n", vf->pci_name, vf_rxq, req->u.mac_filter.flags); return VFDI_RC_EINVAL; } flags = 0; if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS) flags |= EFX_FILTER_FLAG_RX_RSS; if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER) flags |= EFX_FILTER_FLAG_RX_SCATTER; vf->rx_filter_flags = flags; vf->rx_filter_qid = vf_rxq; vf->rx_filtering = true; efx_sriov_reset_rx_filter(vf); queue_work(vfdi_workqueue, &efx->peer_work); return VFDI_RC_SUCCESS; } static int efx_vfdi_remove_all_filters(struct efx_vf *vf) { vf->rx_filtering = false; efx_sriov_reset_rx_filter(vf); queue_work(vfdi_workqueue, &vf->efx->peer_work); return VFDI_RC_SUCCESS; } static int efx_vfdi_set_status_page(struct efx_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; u64 page_count = req->u.set_status_page.peer_page_count; u64 max_page_count = (EFX_PAGE_SIZE - offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0])) / sizeof(req->u.set_status_page.peer_page_addr[0]); if (!req->u.set_status_page.dma_addr || page_count > max_page_count) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Invalid SET_STATUS_PAGE from %s\n", vf->pci_name); return VFDI_RC_EINVAL; } mutex_lock(&efx->local_lock); mutex_lock(&vf->status_lock); vf->status_addr = req->u.set_status_page.dma_addr; kfree(vf->peer_page_addrs); vf->peer_page_addrs = NULL; vf->peer_page_count = 0; if (page_count) { vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), GFP_KERNEL); if (vf->peer_page_addrs) { memcpy(vf->peer_page_addrs, req->u.set_status_page.peer_page_addr, page_count * sizeof(u64)); vf->peer_page_count = page_count; } } __efx_sriov_push_vf_status(vf); mutex_unlock(&vf->status_lock); mutex_unlock(&efx->local_lock); return VFDI_RC_SUCCESS; } static int efx_vfdi_clear_status_page(struct efx_vf *vf) { mutex_lock(&vf->status_lock); vf->status_addr = 0; mutex_unlock(&vf->status_lock); return VFDI_RC_SUCCESS; } typedef int (*efx_vfdi_op_t)(struct efx_vf *vf); static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq, [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq, [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq, [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues, [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter, [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters, [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page, [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, }; static void efx_sriov_vfdi(struct work_struct *work) { struct efx_vf *vf = container_of(work, struct efx_vf, req); struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; struct efx_memcpy_req copy[2]; int rc; /* Copy this page into the local address space */ memset(copy, '\0', sizeof(copy)); copy[0].from_rid = vf->pci_rid; copy[0].from_addr = vf->req_addr; copy[0].to_rid = efx->pci_dev->devfn; copy[0].to_addr = vf->buf.dma_addr; copy[0].length = EFX_PAGE_SIZE; rc = efx_sriov_memcpy(efx, copy, 1); if (rc) { /* If we can't get the request, we can't reply to the caller */ if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Unable to fetch VFDI request from %s rc %d\n", vf->pci_name, -rc); vf->busy = false; return; } if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) { rc = vfdi_ops[req->op](vf); if (rc == 0) { netif_dbg(efx, hw, efx->net_dev, "vfdi request %d from %s ok\n", req->op, vf->pci_name); } } else { netif_dbg(efx, hw, efx->net_dev, "ERROR: Unrecognised request %d from VF %s addr " "%llx\n", req->op, vf->pci_name, (unsigned long long)vf->req_addr); rc = VFDI_RC_EOPNOTSUPP; } /* Allow subsequent VF requests */ vf->busy = false; smp_wmb(); /* Respond to the request */ req->rc = rc; req->op = VFDI_OP_RESPONSE; memset(copy, '\0', sizeof(copy)); copy[0].from_buf = &req->rc; copy[0].to_rid = vf->pci_rid; copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc); copy[0].length = sizeof(req->rc); copy[1].from_buf = &req->op; copy[1].to_rid = vf->pci_rid; copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); copy[1].length = sizeof(req->op); (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); } /* After a reset the event queues inside the guests no longer exist. Fill the * event ring in guest memory with VFDI reset events, then (re-initialise) the * event queue to raise an interrupt. The guest driver will then recover. */ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) { struct efx_nic *efx = vf->efx; struct efx_memcpy_req copy_req[4]; efx_qword_t event; unsigned int pos, count, k, buftbl, abs_evq; efx_oword_t reg; efx_dword_t ptr; int rc; BUG_ON(buffer->len != EFX_PAGE_SIZE); if (!vf->evq0_count) return; BUG_ON(vf->evq0_count & (vf->evq0_count - 1)); mutex_lock(&vf->status_lock); EFX_POPULATE_QWORD_3(event, FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, VFDI_EV_SEQ, vf->msg_seqno, VFDI_EV_TYPE, VFDI_EV_TYPE_RESET); vf->msg_seqno++; for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event)) memcpy(buffer->addr + pos, &event, sizeof(event)); for (pos = 0; pos < vf->evq0_count; pos += count) { count = min_t(unsigned, vf->evq0_count - pos, ARRAY_SIZE(copy_req)); for (k = 0; k < count; k++) { copy_req[k].from_buf = NULL; copy_req[k].from_rid = efx->pci_dev->devfn; copy_req[k].from_addr = buffer->dma_addr; copy_req[k].to_rid = vf->pci_rid; copy_req[k].to_addr = vf->evq0_addrs[pos + k]; copy_req[k].length = EFX_PAGE_SIZE; } rc = efx_sriov_memcpy(efx, copy_req, count); if (rc) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Unable to notify %s of reset" ": %d\n", vf->pci_name, -rc); break; } } /* Reinitialise, arm and trigger evq0 */ abs_evq = abs_index(vf, 0); buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); EFX_POPULATE_OWORD_3(reg, FRF_CZ_TIMER_Q_EN, 1, FRF_CZ_HOST_NOTIFY_MODE, 0, FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq); EFX_POPULATE_OWORD_3(reg, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count), FRF_AZ_EVQ_BUF_BASE_ID, buftbl); efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq); EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq); mutex_unlock(&vf->status_lock); } static void efx_sriov_reset_vf_work(struct work_struct *work) { struct efx_vf *vf = container_of(work, struct efx_vf, req); struct efx_nic *efx = vf->efx; struct efx_buffer buf; if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) { efx_sriov_reset_vf(vf, &buf); efx_nic_free_buffer(efx, &buf); } } static void efx_sriov_handle_no_channel(struct efx_nic *efx) { netif_err(efx, drv, efx->net_dev, "ERROR: IOV requires MSI-X and 1 additional interrupt" "vector. IOV disabled\n"); efx->vf_count = 0; } static int efx_sriov_probe_channel(struct efx_channel *channel) { channel->efx->vfdi_channel = channel; return 0; } static void efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len) { snprintf(buf, len, "%s-iov", channel->efx->name); } static const struct efx_channel_type efx_sriov_channel_type = { .handle_no_channel = efx_sriov_handle_no_channel, .pre_probe = efx_sriov_probe_channel, .get_name = efx_sriov_get_channel_name, /* no copy operation; channel must not be reallocated */ .keep_eventq = true, }; void efx_sriov_probe(struct efx_nic *efx) { unsigned count; if (!max_vfs) return; if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count)) return; if (count > 0 && count > max_vfs) count = max_vfs; /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ efx->vf_count = count; efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type; } /* Copy the list of individual addresses into the vfdi_status.peers * array and auxillary pages, protected by %local_lock. Drop that lock * and then broadcast the address list to every VF. */ static void efx_sriov_peer_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, peer_work); struct vfdi_status *vfdi_status = efx->vfdi_status.addr; struct efx_vf *vf; struct efx_local_addr *local_addr; struct vfdi_endpoint *peer; struct efx_endpoint_page *epp; struct list_head pages; unsigned int peer_space; unsigned int peer_count; unsigned int pos; mutex_lock(&efx->local_lock); /* Move the existing peer pages off %local_page_list */ INIT_LIST_HEAD(&pages); list_splice_tail_init(&efx->local_page_list, &pages); /* Populate the VF addresses starting from entry 1 (entry 0 is * the PF address) */ peer = vfdi_status->peers + 1; peer_space = ARRAY_SIZE(vfdi_status->peers) - 1; peer_count = 1; for (pos = 0; pos < efx->vf_count; ++pos) { vf = efx->vf + pos; mutex_lock(&vf->status_lock); if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) { *peer++ = vf->addr; ++peer_count; --peer_space; BUG_ON(peer_space == 0); } mutex_unlock(&vf->status_lock); } /* Fill the remaining addresses */ list_for_each_entry(local_addr, &efx->local_addr_list, link) { memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN); peer->tci = 0; ++peer; ++peer_count; if (--peer_space == 0) { if (list_empty(&pages)) { epp = kmalloc(sizeof(*epp), GFP_KERNEL); if (!epp) break; epp->ptr = dma_alloc_coherent( &efx->pci_dev->dev, EFX_PAGE_SIZE, &epp->addr, GFP_KERNEL); if (!epp->ptr) { kfree(epp); break; } } else { epp = list_first_entry( &pages, struct efx_endpoint_page, link); list_del(&epp->link); } list_add_tail(&epp->link, &efx->local_page_list); peer = (struct vfdi_endpoint *)epp->ptr; peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); } } vfdi_status->peer_count = peer_count; mutex_unlock(&efx->local_lock); /* Free any now unused endpoint pages */ while (!list_empty(&pages)) { epp = list_first_entry( &pages, struct efx_endpoint_page, link); list_del(&epp->link); dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, epp->ptr, epp->addr); kfree(epp); } /* Finally, push the pages */ for (pos = 0; pos < efx->vf_count; ++pos) { vf = efx->vf + pos; mutex_lock(&vf->status_lock); if (vf->status_addr) __efx_sriov_push_vf_status(vf); mutex_unlock(&vf->status_lock); } } static void efx_sriov_free_local(struct efx_nic *efx) { struct efx_local_addr *local_addr; struct efx_endpoint_page *epp; while (!list_empty(&efx->local_addr_list)) { local_addr = list_first_entry(&efx->local_addr_list, struct efx_local_addr, link); list_del(&local_addr->link); kfree(local_addr); } while (!list_empty(&efx->local_page_list)) { epp = list_first_entry(&efx->local_page_list, struct efx_endpoint_page, link); list_del(&epp->link); dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, epp->ptr, epp->addr); kfree(epp); } } static int efx_sriov_vf_alloc(struct efx_nic *efx) { unsigned index; struct efx_vf *vf; efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL); if (!efx->vf) return -ENOMEM; for (index = 0; index < efx->vf_count; ++index) { vf = efx->vf + index; vf->efx = efx; vf->index = index; vf->rx_filter_id = -1; vf->tx_filter_mode = VF_TX_FILTER_AUTO; vf->tx_filter_id = -1; INIT_WORK(&vf->req, efx_sriov_vfdi); INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work); init_waitqueue_head(&vf->flush_waitq); mutex_init(&vf->status_lock); mutex_init(&vf->txq_lock); } return 0; } static void efx_sriov_vfs_fini(struct efx_nic *efx) { struct efx_vf *vf; unsigned int pos; for (pos = 0; pos < efx->vf_count; ++pos) { vf = efx->vf + pos; efx_nic_free_buffer(efx, &vf->buf); kfree(vf->peer_page_addrs); vf->peer_page_addrs = NULL; vf->peer_page_count = 0; vf->evq0_count = 0; } } static int efx_sriov_vfs_init(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; unsigned index, devfn, sriov, buftbl_base; u16 offset, stride; struct efx_vf *vf; int rc; sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); if (!sriov) return -ENOENT; pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); buftbl_base = efx->vf_buftbl_base; devfn = pci_dev->devfn + offset; for (index = 0; index < efx->vf_count; ++index) { vf = efx->vf + index; /* Reserve buffer entries */ vf->buftbl_base = buftbl_base; buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx); vf->pci_rid = devfn; snprintf(vf->pci_name, sizeof(vf->pci_name), "%04x:%02x:%02x.%d", pci_domain_nr(pci_dev->bus), pci_dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE); if (rc) goto fail; devfn += stride; } return 0; fail: efx_sriov_vfs_fini(efx); return rc; } int efx_sriov_init(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; struct vfdi_status *vfdi_status; int rc; /* Ensure there's room for vf_channel */ BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE); /* Ensure that VI_BASE is aligned on VI_SCALE */ BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1)); if (efx->vf_count == 0) return 0; rc = efx_sriov_cmd(efx, true, NULL, NULL); if (rc) goto fail_cmd; rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status)); if (rc) goto fail_status; vfdi_status = efx->vfdi_status.addr; memset(vfdi_status, 0, sizeof(*vfdi_status)); vfdi_status->version = 1; vfdi_status->length = sizeof(*vfdi_status); vfdi_status->max_tx_channels = vf_max_tx_channels; vfdi_status->vi_scale = efx->vi_scale; vfdi_status->rss_rxq_count = efx->rss_spread; vfdi_status->peer_count = 1 + efx->vf_count; vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; rc = efx_sriov_vf_alloc(efx); if (rc) goto fail_alloc; mutex_init(&efx->local_lock); INIT_WORK(&efx->peer_work, efx_sriov_peer_work); INIT_LIST_HEAD(&efx->local_addr_list); INIT_LIST_HEAD(&efx->local_page_list); rc = efx_sriov_vfs_init(efx); if (rc) goto fail_vfs; rtnl_lock(); memcpy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr, ETH_ALEN); efx->vf_init_count = efx->vf_count; rtnl_unlock(); efx_sriov_usrev(efx, true); /* At this point we must be ready to accept VFDI requests */ rc = pci_enable_sriov(efx->pci_dev, efx->vf_count); if (rc) goto fail_pci; netif_info(efx, probe, net_dev, "enabled SR-IOV for %d VFs, %d VI per VF\n", efx->vf_count, efx_vf_size(efx)); return 0; fail_pci: efx_sriov_usrev(efx, false); rtnl_lock(); efx->vf_init_count = 0; rtnl_unlock(); efx_sriov_vfs_fini(efx); fail_vfs: cancel_work_sync(&efx->peer_work); efx_sriov_free_local(efx); kfree(efx->vf); fail_alloc: efx_nic_free_buffer(efx, &efx->vfdi_status); fail_status: efx_sriov_cmd(efx, false, NULL, NULL); fail_cmd: return rc; } void efx_sriov_fini(struct efx_nic *efx) { struct efx_vf *vf; unsigned int pos; if (efx->vf_init_count == 0) return; /* Disable all interfaces to reconfiguration */ BUG_ON(efx->vfdi_channel->enabled); efx_sriov_usrev(efx, false); rtnl_lock(); efx->vf_init_count = 0; rtnl_unlock(); /* Flush all reconfiguration work */ for (pos = 0; pos < efx->vf_count; ++pos) { vf = efx->vf + pos; cancel_work_sync(&vf->req); cancel_work_sync(&vf->reset_work); } cancel_work_sync(&efx->peer_work); pci_disable_sriov(efx->pci_dev); /* Tear down back-end state */ efx_sriov_vfs_fini(efx); efx_sriov_free_local(efx); kfree(efx->vf); efx_nic_free_buffer(efx, &efx->vfdi_status); efx_sriov_cmd(efx, false, NULL, NULL); } void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct efx_vf *vf; unsigned qid, seq, type, data; qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */ BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0); seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ); type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE); data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA); netif_vdbg(efx, hw, efx->net_dev, "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n", qid, seq, type, data); if (map_vi_index(efx, qid, &vf, NULL)) return; if (vf->busy) goto error; if (type == VFDI_EV_TYPE_REQ_WORD0) { /* Resynchronise */ vf->req_type = VFDI_EV_TYPE_REQ_WORD0; vf->req_seqno = seq + 1; vf->req_addr = 0; } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type) goto error; switch (vf->req_type) { case VFDI_EV_TYPE_REQ_WORD0: case VFDI_EV_TYPE_REQ_WORD1: case VFDI_EV_TYPE_REQ_WORD2: vf->req_addr |= (u64)data << (vf->req_type << 4); ++vf->req_type; return; case VFDI_EV_TYPE_REQ_WORD3: vf->req_addr |= (u64)data << 48; vf->req_type = VFDI_EV_TYPE_REQ_WORD0; vf->busy = true; queue_work(vfdi_workqueue, &vf->req); return; } error: if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "ERROR: Screaming VFDI request from %s\n", vf->pci_name); /* Reset the request and sequence number */ vf->req_type = VFDI_EV_TYPE_REQ_WORD0; vf->req_seqno = seq + 1; } void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i) { struct efx_vf *vf; if (vf_i > efx->vf_init_count) return; vf = efx->vf + vf_i; netif_info(efx, hw, efx->net_dev, "FLR on VF %s\n", vf->pci_name); vf->status_addr = 0; efx_vfdi_remove_all_filters(vf); efx_vfdi_flush_clear(vf); vf->evq0_count = 0; } void efx_sriov_mac_address_changed(struct efx_nic *efx) { struct vfdi_status *vfdi_status = efx->vfdi_status.addr; if (!efx->vf_init_count) return; memcpy(vfdi_status->peers[0].mac_addr, efx->net_dev->dev_addr, ETH_ALEN); queue_work(vfdi_workqueue, &efx->peer_work); } void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_vf *vf; unsigned queue, qid; queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); if (map_vi_index(efx, queue, &vf, &qid)) return; /* Ignore flush completions triggered by an FLR */ if (!test_bit(qid, vf->txq_mask)) return; __clear_bit(qid, vf->txq_mask); --vf->txq_count; if (efx_vfdi_flush_wake(vf)) wake_up(&vf->flush_waitq); } void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) { struct efx_vf *vf; unsigned ev_failed, queue, qid; queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); ev_failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); if (map_vi_index(efx, queue, &vf, &qid)) return; if (!test_bit(qid, vf->rxq_mask)) return; if (ev_failed) { set_bit(qid, vf->rxq_retry_mask); atomic_inc(&vf->rxq_retry_count); } else { __clear_bit(qid, vf->rxq_mask); --vf->rxq_count; } if (efx_vfdi_flush_wake(vf)) wake_up(&vf->flush_waitq); } /* Called from napi. Schedule the reset work item */ void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) { struct efx_vf *vf; unsigned int rel; if (map_vi_index(efx, dmaq, &vf, &rel)) return; if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, "VF %d DMA Q %d reports descriptor fetch error.\n", vf->index, rel); queue_work(vfdi_workqueue, &vf->reset_work); } /* Reset all VFs */ void efx_sriov_reset(struct efx_nic *efx) { unsigned int vf_i; struct efx_buffer buf; struct efx_vf *vf; ASSERT_RTNL(); if (efx->vf_init_count == 0) return; efx_sriov_usrev(efx, true); (void)efx_sriov_cmd(efx, true, NULL, NULL); if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) return; for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { vf = efx->vf + vf_i; efx_sriov_reset_vf(vf, &buf); } efx_nic_free_buffer(efx, &buf); } int efx_init_sriov(void) { /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and * efx_sriov_peer_work() spend almost all their time sleeping for * MCDI to complete anyway */ vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); if (!vfdi_workqueue) return -ENOMEM; return 0; } void efx_fini_sriov(void) { destroy_workqueue(vfdi_workqueue); } int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; if (vf_i >= efx->vf_init_count) return -EINVAL; vf = efx->vf + vf_i; mutex_lock(&vf->status_lock); memcpy(vf->addr.mac_addr, mac, ETH_ALEN); __efx_sriov_update_vf_addr(vf); mutex_unlock(&vf->status_lock); return 0; } int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, u8 qos) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; u16 tci; if (vf_i >= efx->vf_init_count) return -EINVAL; vf = efx->vf + vf_i; mutex_lock(&vf->status_lock); tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); vf->addr.tci = htons(tci); __efx_sriov_update_vf_addr(vf); mutex_unlock(&vf->status_lock); return 0; } int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, bool spoofchk) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; int rc; if (vf_i >= efx->vf_init_count) return -EINVAL; vf = efx->vf + vf_i; mutex_lock(&vf->txq_lock); if (vf->txq_count == 0) { vf->tx_filter_mode = spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF; rc = 0; } else { /* This cannot be changed while TX queues are running */ rc = -EBUSY; } mutex_unlock(&vf->txq_lock); return rc; } int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, struct ifla_vf_info *ivi) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_vf *vf; u16 tci; if (vf_i >= efx->vf_init_count) return -EINVAL; vf = efx->vf + vf_i; ivi->vf = vf_i; memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN); ivi->tx_rate = 0; tci = ntohs(vf->addr.tci); ivi->vlan = tci & VLAN_VID_MASK; ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON; return 0; }
gpl-2.0
LEPT-Development/Android_kernel_lge_C50
arch/arm/mach-omap2/clock2xxx.c
3722
1315
/* * clock2xxx.c - OMAP2xxx-specific clock integration code * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include "soc.h" #include "clock.h" #include "clock2xxx.h" #include "cm.h" #include "cm-regbits-24xx.h" struct clk_hw *dclk_hw; /* * Omap24xx specific clock functions */ /* * Switch the MPU rate if specified on cmdline. We cannot do this * early until cmdline is parsed. XXX This should be removed from the * clock code and handled by the OPP layer code in the near future. */ static int __init omap2xxx_clk_arch_init(void) { int ret; if (!cpu_is_omap24xx()) return 0; ret = omap2_clk_switch_mpurate_at_boot("virt_prcm_set"); if (!ret) omap2_clk_print_new_rates("sys_ck", "dpll_ck", "mpu_ck"); return ret; } omap_arch_initcall(omap2xxx_clk_arch_init);
gpl-2.0
chevanlol360/Android_Kernel_LGE_Fx1
arch/ia64/kernel/smpboot.c
3978
22145
/* * SMP boot-related support * * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2001, 2004-2005 Intel Corp * Rohit Seth <rohit.seth@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com> * Gordon Jin <gordon.jin@intel.com> * Ashok Raj <ashok.raj@intel.com> * * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. * smp_boot_cpus()/smp_commence() is replaced by * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support * 04/12/26 Jin Gordon <gordon.jin@intel.com> * 04/12/26 Rohit Seth <rohit.seth@intel.com> * Add multi-threading and multi-core detection * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com> * Setup cpu_sibling_map and cpu_core_map */ #include <linux/module.h> #include <linux/acpi.h> #include <linux/bootmem.h> #include <linux/cpu.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/efi.h> #include <linux/percpu.h> #include <linux/bitops.h> #include <linux/atomic.h> #include <asm/cache.h> #include <asm/current.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/machvec.h> #include <asm/mca.h> #include <asm/page.h> #include <asm/paravirt.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/sn/arch.h> #define SMP_DEBUG 0 #if SMP_DEBUG #define Dprintk(x...) printk(x) #else #define Dprintk(x...) #endif #ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_PERMIT_BSP_REMOVE #define bsp_remove_ok 1 #else #define bsp_remove_ok 0 #endif /* * Store all idle threads, this can be reused instead of creating * a new thread. Also avoids complicated thread destroy functionality * for idle threads. */ struct task_struct *idle_thread_array[NR_CPUS]; /* * Global array allocated for NR_CPUS at boot time */ struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; /* * start_ap in head.S uses this to store current booting cpu * info. */ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); #define get_idle_for_cpu(x) (idle_thread_array[(x)]) #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) #else #define get_idle_for_cpu(x) (NULL) #define set_idle_for_cpu(x,p) #define set_brendez_area(x) #endif /* * ITC synchronization related stuff: */ #define MASTER (0) #define SLAVE (SMP_CACHE_BYTES/8) #define NUM_ROUNDS 64 /* magic value */ #define NUM_ITERS 5 /* likewise */ static DEFINE_SPINLOCK(itc_sync_lock); static volatile unsigned long go[SLAVE + 1]; #define DEBUG_ITC_SYNC 0 extern void start_ap (void); extern unsigned long ia64_iobase; struct task_struct *task_for_booting_cpu; /* * State for each CPU */ DEFINE_PER_CPU(int, cpu_state); cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; EXPORT_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); int smp_num_siblings = 1; /* which logical CPU number maps to which CPU (physical APIC ID) */ volatile int ia64_cpu_to_sapicid[NR_CPUS]; EXPORT_SYMBOL(ia64_cpu_to_sapicid); static volatile cpumask_t cpu_callin_map; struct smp_boot_data smp_boot_data __initdata; unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */ char __initdata no_int_routing; unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ #ifdef CONFIG_FORCE_CPEI_RETARGET #define CPEI_OVERRIDE_DEFAULT (1) #else #define CPEI_OVERRIDE_DEFAULT (0) #endif unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; static int __init cmdl_force_cpei(char *str) { int value=0; get_option (&str, &value); force_cpei_retarget = value; return 1; } __setup("force_cpei=", cmdl_force_cpei); static int __init nointroute (char *str) { no_int_routing = 1; printk ("no_int_routing on\n"); return 1; } __setup("nointroute", nointroute); static void fix_b0_for_bsp(void) { #ifdef CONFIG_HOTPLUG_CPU int cpuid; static int fix_bsp_b0 = 1; cpuid = smp_processor_id(); /* * Cache the b0 value on the first AP that comes up */ if (!(fix_bsp_b0 && cpuid)) return; sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; printk ("Fixed BSP b0 value from CPU %d\n", cpuid); fix_bsp_b0 = 0; #endif } void sync_master (void *arg) { unsigned long flags, i; go[MASTER] = 0; local_irq_save(flags); { for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { while (!go[MASTER]) cpu_relax(); go[MASTER] = 0; go[SLAVE] = ia64_get_itc(); } } local_irq_restore(flags); } /* * Return the number of cycles by which our itc differs from the itc on the master * (time-keeper) CPU. A positive number indicates our itc is ahead of the master, * negative that it is behind. */ static inline long get_delta (long *rt, long *master) { unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; unsigned long tcenter, t0, t1, tm; long i; for (i = 0; i < NUM_ITERS; ++i) { t0 = ia64_get_itc(); go[MASTER] = 1; while (!(tm = go[SLAVE])) cpu_relax(); go[SLAVE] = 0; t1 = ia64_get_itc(); if (t1 - t0 < best_t1 - best_t0) best_t0 = t0, best_t1 = t1, best_tm = tm; } *rt = best_t1 - best_t0; *master = best_tm - best_t0; /* average best_t0 and best_t1 without overflow: */ tcenter = (best_t0/2 + best_t1/2); if (best_t0 % 2 + best_t1 % 2 == 2) ++tcenter; return tcenter - best_tm; } /* * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of * unaccounted-for errors (such as getting a machine check in the middle of a calibration * step). The basic idea is for the slave to ask the master what itc value it has and to * read its own itc before and after the master responds. Each iteration gives us three * timestamps: * * slave master * * t0 ---\ * ---\ * ---> * tm * /--- * /--- * t1 <--- * * * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0 * and t1. If we achieve this, the clocks are synchronized provided the interconnect * between the slave and the master is symmetric. Even if the interconnect were * asymmetric, we would still know that the synchronization error is smaller than the * roundtrip latency (t0 - t1). * * When the interconnect is quiet and symmetric, this lets us synchronize the itc to * within one or two cycles. However, we can only *guarantee* that the synchronization is * accurate to within a round-trip time, which is typically in the range of several * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better * than half a micro second or so. */ void ia64_sync_itc (unsigned int master) { long i, delta, adj, adjust_latency = 0, done = 0; unsigned long flags, rt, master_time_stamp, bound; #if DEBUG_ITC_SYNC struct { long rt; /* roundtrip time */ long master; /* master's timestamp */ long diff; /* difference between midpoint and master's timestamp */ long lat; /* estimate of itc adjustment latency */ } t[NUM_ROUNDS]; #endif /* * Make sure local timer ticks are disabled while we sync. If * they were enabled, we'd have to worry about nasty issues * like setting the ITC ahead of (or a long time before) the * next scheduled tick. */ BUG_ON((ia64_get_itv() & (1 << 16)) == 0); go[MASTER] = 1; if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); return; } while (go[MASTER]) cpu_relax(); /* wait for master to be ready */ spin_lock_irqsave(&itc_sync_lock, flags); { for (i = 0; i < NUM_ROUNDS; ++i) { delta = get_delta(&rt, &master_time_stamp); if (delta == 0) { done = 1; /* let's lock on to this... */ bound = rt; } if (!done) { if (i > 0) { adjust_latency += -delta; adj = -delta + adjust_latency/4; } else adj = -delta; ia64_set_itc(ia64_get_itc() + adj); } #if DEBUG_ITC_SYNC t[i].rt = rt; t[i].master = master_time_stamp; t[i].diff = delta; t[i].lat = adjust_latency/4; #endif } } spin_unlock_irqrestore(&itc_sync_lock, flags); #if DEBUG_ITC_SYNC for (i = 0; i < NUM_ROUNDS; ++i) printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", t[i].rt, t[i].master, t[i].diff, t[i].lat); #endif printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, " "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt); } /* * Ideally sets up per-cpu profiling hooks. Doesn't do much now... */ static inline void __devinit smp_setup_percpu_timer (void) { } static void __cpuinit smp_callin (void) { int cpuid, phys_id, itc_master; struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo; extern void ia64_init_itm(void); extern volatile int time_keeper_id; #ifdef CONFIG_PERFMON extern void pfm_init_percpu(void); #endif cpuid = smp_processor_id(); phys_id = hard_smp_processor_id(); itc_master = time_keeper_id; if (cpu_online(cpuid)) { printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", phys_id, cpuid); BUG(); } fix_b0_for_bsp(); /* * numa_node_id() works after this. */ set_numa_node(cpu_to_node_map[cpuid]); set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); notify_cpu_starting(cpuid); set_cpu_online(cpuid, true); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); ipi_call_unlock_irq(); smp_setup_percpu_timer(); ia64_mca_cmc_vector_setup(); /* Setup vector on AP */ #ifdef CONFIG_PERFMON pfm_init_percpu(); #endif local_irq_enable(); if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { /* * Synchronize the ITC with the BP. Need to do this after irqs are * enabled because ia64_sync_itc() calls smp_call_function_single(), which * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls * local_bh_enable(), which bugs out if irqs are not enabled... */ Dprintk("Going to syncup ITC with ITC Master.\n"); ia64_sync_itc(itc_master); } /* * Get our bogomips. */ ia64_init_itm(); /* * Delay calibration can be skipped if new processor is identical to the * previous processor. */ last_cpuinfo = cpu_data(cpuid - 1); this_cpuinfo = local_cpu_data; if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq || last_cpuinfo->proc_freq != this_cpuinfo->proc_freq || last_cpuinfo->features != this_cpuinfo->features || last_cpuinfo->revision != this_cpuinfo->revision || last_cpuinfo->family != this_cpuinfo->family || last_cpuinfo->archrev != this_cpuinfo->archrev || last_cpuinfo->model != this_cpuinfo->model) calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; /* * Allow the master to continue. */ cpu_set(cpuid, cpu_callin_map); Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); } /* * Activate a secondary processor. head.S calls this. */ int __cpuinit start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); #ifndef CONFIG_PRINTK_TIME Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); #endif efi_map_pal_code(); cpu_init(); preempt_disable(); smp_callin(); cpu_idle(); return 0; } struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) { return NULL; } struct create_idle { struct work_struct work; struct task_struct *idle; struct completion done; int cpu; }; void __cpuinit do_fork_idle(struct work_struct *work) { struct create_idle *c_idle = container_of(work, struct create_idle, work); c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); } static int __cpuinit do_boot_cpu (int sapicid, int cpu) { int timeout; struct create_idle c_idle = { .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), .cpu = cpu, .done = COMPLETION_INITIALIZER(c_idle.done), }; /* * We can't use kernel_thread since we must avoid to * reschedule the child. */ c_idle.idle = get_idle_for_cpu(cpu); if (c_idle.idle) { init_idle(c_idle.idle, cpu); goto do_rest; } schedule_work(&c_idle.work); wait_for_completion(&c_idle.done); if (IS_ERR(c_idle.idle)) panic("failed fork for CPU %d", cpu); set_idle_for_cpu(cpu, c_idle.idle); do_rest: task_for_booting_cpu = c_idle.idle; Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); set_brendez_area(cpu); platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); /* * Wait 10s total for the AP to start */ Dprintk("Waiting on callin_map ..."); for (timeout = 0; timeout < 100000; timeout++) { if (cpu_isset(cpu, cpu_callin_map)) break; /* It has booted */ udelay(100); } Dprintk("\n"); if (!cpu_isset(cpu, cpu_callin_map)) { printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); ia64_cpu_to_sapicid[cpu] = -1; set_cpu_online(cpu, false); /* was set in smp_callin() */ return -EINVAL; } return 0; } static int __init decay (char *str) { int ticks; get_option (&str, &ticks); return 1; } __setup("decay=", decay); /* * Initialize the logical CPU number to SAPICID mapping */ void __init smp_build_cpu_map (void) { int sapicid, cpu, i; int boot_cpu_id = hard_smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { ia64_cpu_to_sapicid[cpu] = -1; } ia64_cpu_to_sapicid[0] = boot_cpu_id; init_cpu_present(cpumask_of(0)); set_cpu_possible(0, true); for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { sapicid = smp_boot_data.cpu_phys_id[i]; if (sapicid == boot_cpu_id) continue; set_cpu_present(cpu, true); set_cpu_possible(cpu, true); ia64_cpu_to_sapicid[cpu] = sapicid; cpu++; } } /* * Cycle through the APs sending Wakeup IPIs to boot each. */ void __init smp_prepare_cpus (unsigned int max_cpus) { int boot_cpu_id = hard_smp_processor_id(); /* * Initialize the per-CPU profiling counter/multiplier */ smp_setup_percpu_timer(); cpu_set(0, cpu_callin_map); local_cpu_data->loops_per_jiffy = loops_per_jiffy; ia64_cpu_to_sapicid[0] = boot_cpu_id; printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); current_thread_info()->cpu = 0; /* * If SMP should be disabled, then really disable it! */ if (!max_cpus) { printk(KERN_INFO "SMP mode deactivated.\n"); init_cpu_online(cpumask_of(0)); init_cpu_present(cpumask_of(0)); init_cpu_possible(cpumask_of(0)); return; } } void __devinit smp_prepare_boot_cpu(void) { set_cpu_online(smp_processor_id(), true); cpu_set(smp_processor_id(), cpu_callin_map); set_numa_node(cpu_to_node_map[smp_processor_id()]); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; paravirt_post_smp_prepare_boot_cpu(); } #ifdef CONFIG_HOTPLUG_CPU static inline void clear_cpu_sibling_map(int cpu) { int i; for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); for_each_cpu_mask(i, cpu_core_map[cpu]) cpu_clear(cpu, cpu_core_map[i]); per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; } static void remove_siblinginfo(int cpu) { int last = 0; if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpu_clear(cpu, cpu_core_map[cpu]); cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); return; } last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); } extern void fixup_irqs(void); int migrate_platform_irqs(unsigned int cpu) { int new_cpei_cpu; struct irq_data *data = NULL; const struct cpumask *mask; int retval = 0; /* * dont permit CPEI target to removed. */ if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { printk ("CPU (%d) is CPEI Target\n", cpu); if (can_cpei_retarget()) { /* * Now re-target the CPEI to a different processor */ new_cpei_cpu = cpumask_any(cpu_online_mask); mask = cpumask_of(new_cpei_cpu); set_cpei_target_cpu(new_cpei_cpu); data = irq_get_irq_data(ia64_cpe_irq); /* * Switch for now, immediately, we need to do fake intr * as other interrupts, but need to study CPEI behaviour with * polling before making changes. */ if (data && data->chip) { data->chip->irq_disable(data); data->chip->irq_set_affinity(data, mask, false); data->chip->irq_enable(data); printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu); } } if (!data) { printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); retval = -EBUSY; } } return retval; } /* must be called with cpucontrol mutex held */ int __cpu_disable(void) { int cpu = smp_processor_id(); /* * dont permit boot processor for now */ if (cpu == 0 && !bsp_remove_ok) { printk ("Your platform does not support removal of BSP\n"); return (-EBUSY); } if (ia64_platform_is("sn2")) { if (!sn_cpu_disable_allowed(cpu)) return -EBUSY; } set_cpu_online(cpu, false); if (migrate_platform_irqs(cpu)) { set_cpu_online(cpu, true); return -EBUSY; } remove_siblinginfo(cpu); fixup_irqs(); local_flush_tlb_all(); cpu_clear(cpu, cpu_callin_map); return 0; } void __cpu_die(unsigned int cpu) { unsigned int i; for (i = 0; i < 100; i++) { /* They ack this in play_dead by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { printk ("CPU %d is now offline\n", cpu); return; } msleep(100); } printk(KERN_ERR "CPU %u didn't die...\n", cpu); } #endif /* CONFIG_HOTPLUG_CPU */ void smp_cpus_done (unsigned int dummy) { int cpu; unsigned long bogosum = 0; /* * Allow the user to impress friends. */ for_each_online_cpu(cpu) { bogosum += cpu_data(cpu)->loops_per_jiffy; } printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); } static inline void __devinit set_cpu_sibling_map(int cpu) { int i; for_each_online_cpu(i) { if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { cpu_set(i, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, per_cpu(cpu_sibling_map, i)); } } } } int __cpuinit __cpu_up (unsigned int cpu) { int ret; int sapicid; sapicid = ia64_cpu_to_sapicid[cpu]; if (sapicid == -1) return -EINVAL; /* * Already booted cpu? not valid anymore since we dont * do idle loop tightspin anymore. */ if (cpu_isset(cpu, cpu_callin_map)) return -EINVAL; per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Processor goes to start_secondary(), sets online flag */ ret = do_boot_cpu(sapicid, cpu); if (ret < 0) return ret; if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, cpu_core_map[cpu]); return 0; } set_cpu_sibling_map(cpu); return 0; } /* * Assume that CPUs have been discovered by some platform-dependent interface. For * SoftSDV/Lion, that would be ACPI. * * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). */ void __init init_smp_config(void) { struct fptr { unsigned long fp; unsigned long gp; } *ap_startup; long sal_ret; /* Tell SAL where to drop the APs. */ ap_startup = (struct fptr *) start_ap; sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); if (sal_ret < 0) printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret)); } /* * identify_siblings(cpu) gets called from identify_cpu. This populates the * information related to logical execution units in per_cpu_data structure. */ void __devinit identify_siblings(struct cpuinfo_ia64 *c) { long status; u16 pltid; pal_logical_to_physical_t info; status = ia64_pal_logical_to_phys(-1, &info); if (status != PAL_STATUS_SUCCESS) { if (status != PAL_STATUS_UNIMPLEMENTED) { printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", status); return; } info.overview_ppid = 0; info.overview_cpp = 1; info.overview_tpc = 1; } status = ia64_sal_physical_id_info(&pltid); if (status != PAL_STATUS_SUCCESS) { if (status != PAL_STATUS_UNIMPLEMENTED) printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); return; } c->socket_id = (pltid << 8) | info.overview_ppid; if (info.overview_cpp == 1 && info.overview_tpc == 1) return; c->cores_per_socket = info.overview_cpp; c->threads_per_core = info.overview_tpc; c->num_log = info.overview_num_log; c->core_id = info.log1_cid; c->thread_id = info.log1_tid; } /* * returns non zero, if multi-threading is enabled * on at least one physical package. Due to hotplug cpu * and (maxcpus=), all threads may not necessarily be enabled * even though the processor supports multi-threading. */ int is_multithreading_enabled(void) { int i, j; for_each_present_cpu(i) { for_each_present_cpu(j) { if (j == i) continue; if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) { if (cpu_data(j)->core_id == cpu_data(i)->core_id) return 1; } } } return 0; } EXPORT_SYMBOL_GPL(is_multithreading_enabled);
gpl-2.0
nemomobile/kernel-adaptation-n950-n9
arch/arm/mach-clps711x/cdb89712.c
5002
1802
/* * linux/arch/arm/mach-clps711x/cdb89712.c * * Copyright (C) 2000-2001 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "common.h" /* * Map the CS89712 Ethernet port. That should be moved to the * ethernet driver, perhaps. */ static struct map_desc cdb89712_io_desc[] __initdata = { { .virtual = ETHER_BASE, .pfn =__phys_to_pfn(ETHER_START), .length = ETHER_SIZE, .type = MT_DEVICE } }; static void __init cdb89712_map_io(void) { clps711x_map_io(); iotable_init(cdb89712_io_desc, ARRAY_SIZE(cdb89712_io_desc)); } MACHINE_START(CDB89712, "Cirrus-CDB89712") /* Maintainer: Ray Lehtiniemi */ .atag_offset = 0x100, .map_io = cdb89712_map_io, .init_irq = clps711x_init_irq, .timer = &clps711x_timer, .restart = clps711x_restart, MACHINE_END
gpl-2.0
CyanogenMod/android_kernel_htc_m7
arch/sh/kernel/cpu/init.c
6538
8619
/* * arch/sh/kernel/cpu/init.c * * CPU init code * * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/log2.h> #include <asm/mmu_context.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/cache.h> #include <asm/elf.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/sh_bios.h> #include <asm/setup.h> #ifdef CONFIG_SH_FPU #define cpu_has_fpu 1 #else #define cpu_has_fpu 0 #endif #ifdef CONFIG_SH_DSP #define cpu_has_dsp 1 #else #define cpu_has_dsp 0 #endif /* * Generic wrapper for command line arguments to disable on-chip * peripherals (nofpu, nodsp, and so forth). */ #define onchip_setup(x) \ static int x##_disabled __cpuinitdata = !cpu_has_##x; \ \ static int __cpuinit x##_setup(char *opts) \ { \ x##_disabled = 1; \ return 1; \ } \ __setup("no" __stringify(x), x##_setup); onchip_setup(fpu); onchip_setup(dsp); #ifdef CONFIG_SPECULATIVE_EXECUTION #define CPUOPM 0xff2f0000 #define CPUOPM_RABD (1 << 5) static void __cpuinit speculative_execution_init(void) { /* Clear RABD */ __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); /* Flush the update */ (void)__raw_readl(CPUOPM); ctrl_barrier(); } #else #define speculative_execution_init() do { } while (0) #endif #ifdef CONFIG_CPU_SH4A #define EXPMASK 0xff2f0004 #define EXPMASK_RTEDS (1 << 0) #define EXPMASK_BRDSSLP (1 << 1) #define EXPMASK_MMCAW (1 << 4) static void __cpuinit expmask_init(void) { unsigned long expmask = __raw_readl(EXPMASK); /* * Future proofing. * * Disable support for slottable sleep instruction, non-nop * instructions in the rte delay slot, and associative writes to * the memory-mapped cache array. */ expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); __raw_writel(expmask, EXPMASK); ctrl_barrier(); } #else #define expmask_init() do { } while (0) #endif /* 2nd-level cache init */ void __attribute__ ((weak)) l2_cache_init(void) { } /* * Generic first-level cache init */ #ifdef CONFIG_SUPERH32 static void cache_init(void) { unsigned long ccr, flags; jump_to_uncached(); ccr = __raw_readl(CCR); /* * At this point we don't know whether the cache is enabled or not - a * bootloader may have enabled it. There are at least 2 things that * could be dirty in the cache at this point: * 1. kernel command line set up by boot loader * 2. spilled registers from the prolog of this function * => before re-initialising the cache, we must do a purge of the whole * cache out to memory for safety. As long as nothing is spilled * during the loop to lines that have already been done, this is safe. * - RPC */ if (ccr & CCR_CACHE_ENABLE) { unsigned long ways, waysize, addrstart; waysize = current_cpu_data.dcache.sets; #ifdef CCR_CACHE_ORA /* * If the OC is already in RAM mode, we only have * half of the entries to flush.. */ if (ccr & CCR_CACHE_ORA) waysize >>= 1; #endif waysize <<= current_cpu_data.dcache.entry_shift; #ifdef CCR_CACHE_EMODE /* If EMODE is not set, we only have 1 way to flush. */ if (!(ccr & CCR_CACHE_EMODE)) ways = 1; else #endif ways = current_cpu_data.dcache.ways; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) __raw_writel(0, addr); addrstart += current_cpu_data.dcache.way_incr; } while (--ways); } /* * Default CCR values .. enable the caches * and invalidate them immediately.. */ flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; #ifdef CCR_CACHE_EMODE /* Force EMODE if possible */ if (current_cpu_data.dcache.ways > 1) flags |= CCR_CACHE_EMODE; else flags &= ~CCR_CACHE_EMODE; #endif #if defined(CONFIG_CACHE_WRITETHROUGH) /* Write-through */ flags |= CCR_CACHE_WT; #elif defined(CONFIG_CACHE_WRITEBACK) /* Write-back */ flags |= CCR_CACHE_CB; #else /* Off */ flags &= ~CCR_CACHE_ENABLE; #endif l2_cache_init(); __raw_writel(flags, CCR); back_to_cached(); } #else #define cache_init() do { } while (0) #endif #define CSHAPE(totalsize, linesize, assoc) \ ((totalsize & ~0xff) | (linesize << 4) | assoc) #define CACHE_DESC_SHAPE(desc) \ CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways) static void detect_cache_shape(void) { l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache); if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED) l1i_cache_shape = l1d_cache_shape; else l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache); if (current_cpu_data.flags & CPU_HAS_L2_CACHE) l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache); else l2_cache_shape = -1; /* No S-cache */ } static void __cpuinit fpu_init(void) { /* Disable the FPU */ if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { printk("FPU Disabled\n"); current_cpu_data.flags &= ~CPU_HAS_FPU; } disable_fpu(); clear_used_math(); } #ifdef CONFIG_SH_DSP static void __cpuinit release_dsp(void) { unsigned long sr; /* Clear SR.DSP bit */ __asm__ __volatile__ ( "stc\tsr, %0\n\t" "and\t%1, %0\n\t" "ldc\t%0, sr\n\t" : "=&r" (sr) : "r" (~SR_DSP) ); } static void __cpuinit dsp_init(void) { unsigned long sr; /* * Set the SR.DSP bit, wait for one instruction, and then read * back the SR value. */ __asm__ __volatile__ ( "stc\tsr, %0\n\t" "or\t%1, %0\n\t" "ldc\t%0, sr\n\t" "nop\n\t" "stc\tsr, %0\n\t" : "=&r" (sr) : "r" (SR_DSP) ); /* If the DSP bit is still set, this CPU has a DSP */ if (sr & SR_DSP) current_cpu_data.flags |= CPU_HAS_DSP; /* Disable the DSP */ if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { printk("DSP Disabled\n"); current_cpu_data.flags &= ~CPU_HAS_DSP; } /* Now that we've determined the DSP status, clear the DSP bit. */ release_dsp(); } #else static inline void __cpuinit dsp_init(void) { } #endif /* CONFIG_SH_DSP */ /** * cpu_init * * This is our initial entry point for each CPU, and is invoked on the * boot CPU prior to calling start_kernel(). For SMP, a combination of * this and start_secondary() will bring up each processor to a ready * state prior to hand forking the idle loop. * * We do all of the basic processor init here, including setting up * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and * subsequently platform_setup()) things like determining the CPU * subtype and initial configuration will all be done. * * Each processor family is still responsible for doing its own probing * and cache configuration in cpu_probe(). */ asmlinkage void __cpuinit cpu_init(void) { current_thread_info()->cpu = hard_smp_processor_id(); /* First, probe the CPU */ cpu_probe(); if (current_cpu_data.type == CPU_SH_NONE) panic("Unknown CPU"); /* First setup the rest of the I-cache info */ current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - current_cpu_data.icache.linesz; current_cpu_data.icache.way_size = current_cpu_data.icache.sets * current_cpu_data.icache.linesz; /* And the D-cache too */ current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - current_cpu_data.dcache.linesz; current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * current_cpu_data.dcache.linesz; /* Init the cache */ cache_init(); if (raw_smp_processor_id() == 0) { shm_align_mask = max_t(unsigned long, current_cpu_data.dcache.way_size - 1, PAGE_SIZE - 1); /* Boot CPU sets the cache shape */ detect_cache_shape(); } fpu_init(); dsp_init(); /* * Initialize the per-CPU ASID cache very early, since the * TLB flushing routines depend on this being setup. */ current_cpu_data.asid_cache = NO_CONTEXT; current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32; speculative_execution_init(); expmask_init(); /* Do the rest of the boot processor setup */ if (raw_smp_processor_id() == 0) { /* Save off the BIOS VBR, if there is one */ sh_bios_vbr_init(); /* * Setup VBR for boot CPU. Secondary CPUs do this through * start_secondary(). */ per_cpu_trap_init(); /* * Boot processor to setup the FP and extended state * context info. */ init_thread_xstate(); } }
gpl-2.0
friedrich420/N3-AEL-Kernel-NF1-v5-
sound/pci/echoaudio/indigo.c
8074
2898
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO #define ECHOCARD_NAME "Indigo" #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 0 */ #define PX_DIGITAL_IN 8 /* 0 */ #define PX_NUM 8 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 0 */ #define BX_ANALOG_IN 2 /* 0 */ #define BX_DIGITAL_IN 2 /* 0 */ #define BX_NUM 2 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x0090, 0, 0, 0}, /* Indigo */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigo_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
kelvinbui31/android_mediatek_muse72
sound/pci/echoaudio/indigo.c
8074
2898
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO #define ECHOCARD_NAME "Indigo" #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 0 */ #define PX_DIGITAL_IN 8 /* 0 */ #define PX_NUM 8 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 0 */ #define BX_ANALOG_IN 2 /* 0 */ #define BX_DIGITAL_IN 2 /* 0 */ #define BX_NUM 2 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x0090, 0, 0, 0}, /* Indigo */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigo_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
GustavoRD78/78Kernel-ZL-new-construction-283
sound/pci/echoaudio/indigoiox.c
8074
3012
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2009 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO_IOX #define ECHOCARD_NAME "Indigo IOx" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 2 */ #define PX_DIGITAL_IN 10 /* 0 */ #define PX_NUM 10 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 0 */ #define BX_ANALOG_IN 2 /* 2 */ #define BX_DIGITAL_IN 4 /* 0 */ #define BX_NUM 4 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_iox_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_IOX_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_iox_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x00D0, 0, 0, 0}, /* Indigo IOx */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigoiox_dsp.c" #include "indigo_express_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
Howpathetic/Villec2-Kernel
drivers/mfd/ucb1400_core.c
8330
3601
/* * Core functions for: * Philips UCB1400 multifunction chip * * Based on ucb1400_ts.c: * Author: Nicolas Pitre * Created: September 25, 2006 * Copyright: MontaVista Software, Inc. * * Spliting done by: Marek Vasut <marek.vasut@gmail.com> * If something doesn't work and it worked before spliting, e-mail me, * dont bother Nicolas please ;-) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This code is heavily based on ucb1x00-*.c copyrighted by Russell King * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/ucb1400.h> unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, int adcsync) { unsigned int val; if (adcsync) adc_channel |= UCB_ADC_SYNC_ENA; ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel); ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel | UCB_ADC_START); while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA)) & UCB_ADC_DAT_VALID)) schedule_timeout_uninterruptible(1); return val & UCB_ADC_DAT_MASK; } EXPORT_SYMBOL_GPL(ucb1400_adc_read); static int ucb1400_core_probe(struct device *dev) { int err; struct ucb1400 *ucb; struct ucb1400_ts ucb_ts; struct ucb1400_gpio ucb_gpio; struct snd_ac97 *ac97; struct ucb1400_pdata *pdata = dev->platform_data; memset(&ucb_ts, 0, sizeof(ucb_ts)); memset(&ucb_gpio, 0, sizeof(ucb_gpio)); ucb = kzalloc(sizeof(struct ucb1400), GFP_KERNEL); if (!ucb) { err = -ENOMEM; goto err; } dev_set_drvdata(dev, ucb); ac97 = to_ac97_t(dev); ucb_ts.id = ucb1400_reg_read(ac97, UCB_ID); if (ucb_ts.id != UCB_ID_1400) { err = -ENODEV; goto err0; } /* GPIO */ ucb_gpio.ac97 = ac97; ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1); if (!ucb->ucb1400_gpio) { err = -ENOMEM; goto err0; } err = platform_device_add_data(ucb->ucb1400_gpio, &ucb_gpio, sizeof(ucb_gpio)); if (err) goto err1; err = platform_device_add(ucb->ucb1400_gpio); if (err) goto err1; /* TOUCHSCREEN */ ucb_ts.ac97 = ac97; if (pdata != NULL && pdata->irq >= 0) ucb_ts.irq = pdata->irq; else ucb_ts.irq = -1; ucb->ucb1400_ts = platform_device_alloc("ucb1400_ts", -1); if (!ucb->ucb1400_ts) { err = -ENOMEM; goto err2; } err = platform_device_add_data(ucb->ucb1400_ts, &ucb_ts, sizeof(ucb_ts)); if (err) goto err3; err = platform_device_add(ucb->ucb1400_ts); if (err) goto err3; return 0; err3: platform_device_put(ucb->ucb1400_ts); err2: platform_device_del(ucb->ucb1400_gpio); err1: platform_device_put(ucb->ucb1400_gpio); err0: kfree(ucb); err: return err; } static int ucb1400_core_remove(struct device *dev) { struct ucb1400 *ucb = dev_get_drvdata(dev); platform_device_unregister(ucb->ucb1400_ts); platform_device_unregister(ucb->ucb1400_gpio); kfree(ucb); return 0; } static struct device_driver ucb1400_core_driver = { .name = "ucb1400_core", .bus = &ac97_bus_type, .probe = ucb1400_core_probe, .remove = ucb1400_core_remove, }; static int __init ucb1400_core_init(void) { return driver_register(&ucb1400_core_driver); } static void __exit ucb1400_core_exit(void) { driver_unregister(&ucb1400_core_driver); } module_init(ucb1400_core_init); module_exit(ucb1400_core_exit); MODULE_DESCRIPTION("Philips UCB1400 driver"); MODULE_LICENSE("GPL");
gpl-2.0
yajnab/android_kernel_samsung_baffin
drivers/scsi/stex.c
8330
44285
/* * SuperTrak EX Series Storage Controller driver for Linux * * Copyright (C) 2005-2009 Promise Technology Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Written By: * Ed Lin <promise_linux@promise.com> * */ #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/module.h> #include <linux/spinlock.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #define DRV_NAME "stex" #define ST_DRIVER_VERSION "4.6.0000.4" #define ST_VER_MAJOR 4 #define ST_VER_MINOR 6 #define ST_OEM 0 #define ST_BUILD_VER 4 enum { /* MU register offset */ IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */ IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */ OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */ OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */ IDBL = 0x20, /* MU_INBOUND_DOORBELL */ IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */ IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */ ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */ OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ YIOA_STATUS = 0x00, YH2I_INT = 0x20, YINT_EN = 0x34, YI2H_INT = 0x9c, YI2H_INT_C = 0xa0, YH2I_REQ = 0xc0, YH2I_REQ_HI = 0xc4, /* MU register value */ MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), MU_INBOUND_DOORBELL_REQHEADCHANGED = (1 << 1), MU_INBOUND_DOORBELL_STATUSTAILCHANGED = (1 << 2), MU_INBOUND_DOORBELL_HMUSTOPPED = (1 << 3), MU_INBOUND_DOORBELL_RESET = (1 << 4), MU_OUTBOUND_DOORBELL_HANDSHAKE = (1 << 0), MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1), MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = (1 << 2), MU_OUTBOUND_DOORBELL_BUSCHANGE = (1 << 3), MU_OUTBOUND_DOORBELL_HASEVENT = (1 << 4), MU_OUTBOUND_DOORBELL_REQUEST_RESET = (1 << 27), /* MU status code */ MU_STATE_STARTING = 1, MU_STATE_STARTED = 2, MU_STATE_RESETTING = 3, MU_STATE_FAILED = 4, MU_MAX_DELAY = 120, MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000, MU_HARD_RESET_WAIT = 30000, HMU_PARTNER_TYPE = 2, /* firmware returned values */ SRB_STATUS_SUCCESS = 0x01, SRB_STATUS_ERROR = 0x04, SRB_STATUS_BUSY = 0x05, SRB_STATUS_INVALID_REQUEST = 0x06, SRB_STATUS_SELECTION_TIMEOUT = 0x0A, SRB_SEE_SENSE = 0x80, /* task attribute */ TASK_ATTRIBUTE_SIMPLE = 0x0, TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, TASK_ATTRIBUTE_ORDERED = 0x2, TASK_ATTRIBUTE_ACA = 0x4, SS_STS_NORMAL = 0x80000000, SS_STS_DONE = 0x40000000, SS_STS_HANDSHAKE = 0x20000000, SS_HEAD_HANDSHAKE = 0x80, SS_H2I_INT_RESET = 0x100, SS_I2H_REQUEST_RESET = 0x2000, SS_MU_OPERATIONAL = 0x80000000, STEX_CDB_LENGTH = 16, STATUS_VAR_LEN = 128, /* sg flags */ SG_CF_EOT = 0x80, /* end of table */ SG_CF_64B = 0x40, /* 64 bit item */ SG_CF_HOST = 0x20, /* sg in host memory */ MSG_DATA_DIR_ND = 0, MSG_DATA_DIR_IN = 1, MSG_DATA_DIR_OUT = 2, st_shasta = 0, st_vsc = 1, st_yosemite = 2, st_seq = 3, st_yel = 4, PASSTHRU_REQ_TYPE = 0x00000001, PASSTHRU_REQ_NO_WAKEUP = 0x00000100, ST_INTERNAL_TIMEOUT = 180, ST_TO_CMD = 0, ST_FROM_CMD = 1, /* vendor specific commands of Promise */ MGT_CMD = 0xd8, SINBAND_MGT_CMD = 0xd9, ARRAY_CMD = 0xe0, CONTROLLER_CMD = 0xe1, DEBUGGING_CMD = 0xe2, PASSTHRU_CMD = 0xe3, PASSTHRU_GET_ADAPTER = 0x05, PASSTHRU_GET_DRVVER = 0x10, CTLR_CONFIG_CMD = 0x03, CTLR_SHUTDOWN = 0x0d, CTLR_POWER_STATE_CHANGE = 0x0e, CTLR_POWER_SAVING = 0x01, PASSTHRU_SIGNATURE = 0x4e415041, MGT_CMD_SIGNATURE = 0xba, INQUIRY_EVPD = 0x01, ST_ADDITIONAL_MEM = 0x200000, ST_ADDITIONAL_MEM_MIN = 0x80000, }; struct st_sgitem { u8 ctrl; /* SG_CF_xxx */ u8 reserved[3]; __le32 count; __le64 addr; }; struct st_ss_sgitem { __le32 addr; __le32 addr_hi; __le32 count; }; struct st_sgtable { __le16 sg_count; __le16 max_sg_count; __le32 sz_in_byte; }; struct st_msg_header { __le64 handle; u8 flag; u8 channel; __le16 timeout; u32 reserved; }; struct handshake_frame { __le64 rb_phy; /* request payload queue physical address */ __le16 req_sz; /* size of each request payload */ __le16 req_cnt; /* count of reqs the buffer can hold */ __le16 status_sz; /* size of each status payload */ __le16 status_cnt; /* count of status the buffer can hold */ __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */ u8 partner_type; /* who sends this frame */ u8 reserved0[7]; __le32 partner_ver_major; __le32 partner_ver_minor; __le32 partner_ver_oem; __le32 partner_ver_build; __le32 extra_offset; /* NEW */ __le32 extra_size; /* NEW */ __le32 scratch_size; u32 reserved1; }; struct req_msg { __le16 tag; u8 lun; u8 target; u8 task_attr; u8 task_manage; u8 data_dir; u8 payload_sz; /* payload size in 4-byte, not used */ u8 cdb[STEX_CDB_LENGTH]; u32 variable[0]; }; struct status_msg { __le16 tag; u8 lun; u8 target; u8 srb_status; u8 scsi_status; u8 reserved; u8 payload_sz; /* payload size in 4-byte */ u8 variable[STATUS_VAR_LEN]; }; struct ver_info { u32 major; u32 minor; u32 oem; u32 build; u32 reserved[2]; }; struct st_frame { u32 base[6]; u32 rom_addr; struct ver_info drv_ver; struct ver_info bios_ver; u32 bus; u32 slot; u32 irq_level; u32 irq_vec; u32 id; u32 subid; u32 dimm_size; u8 dimm_type; u8 reserved[3]; u32 channel; u32 reserved1; }; struct st_drvver { u32 major; u32 minor; u32 oem; u32 build; u32 signature[2]; u8 console_id; u8 host_no; u8 reserved0[2]; u32 reserved[3]; }; struct st_ccb { struct req_msg *req; struct scsi_cmnd *cmd; void *sense_buffer; unsigned int sense_bufflen; int sg_count; u32 req_type; u8 srb_status; u8 scsi_status; u8 reserved[2]; }; struct st_hba { void __iomem *mmio_base; /* iomapped PCI memory space */ void *dma_mem; dma_addr_t dma_handle; size_t dma_size; struct Scsi_Host *host; struct pci_dev *pdev; struct req_msg * (*alloc_rq) (struct st_hba *); int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); void (*send) (struct st_hba *, struct req_msg *, u16); u32 req_head; u32 req_tail; u32 status_head; u32 status_tail; struct status_msg *status_buffer; void *copy_buffer; /* temp buffer for driver-handled commands */ struct st_ccb *ccb; struct st_ccb *wait_ccb; __le32 *scratch; char work_q_name[20]; struct workqueue_struct *work_q; struct work_struct reset_work; wait_queue_head_t reset_waitq; unsigned int mu_status; unsigned int cardtype; int msi_enabled; int out_req_cnt; u32 extra_offset; u16 rq_count; u16 rq_size; u16 sts_count; }; struct st_card_info { struct req_msg * (*alloc_rq) (struct st_hba *); int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); void (*send) (struct st_hba *, struct req_msg *, u16); unsigned int max_id; unsigned int max_lun; unsigned int max_channel; u16 rq_count; u16 rq_size; u16 sts_count; }; static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)"); static const char console_inq_page[] = { 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30, 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */ 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */ 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */ 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */ 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */ 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */ 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20 }; MODULE_AUTHOR("Ed Lin"); MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers"); MODULE_LICENSE("GPL"); MODULE_VERSION(ST_DRIVER_VERSION); static void stex_gettime(__le64 *time) { struct timeval tv; do_gettimeofday(&tv); *time = cpu_to_le64(tv.tv_sec); } static struct status_msg *stex_get_status(struct st_hba *hba) { struct status_msg *status = hba->status_buffer + hba->status_tail; ++hba->status_tail; hba->status_tail %= hba->sts_count+1; return status; } static void stex_invalid_field(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; /* "Invalid field in cdb" */ scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24, 0x0); done(cmd); } static struct req_msg *stex_alloc_req(struct st_hba *hba) { struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; ++hba->req_head; hba->req_head %= hba->rq_count+1; return req; } static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) { return (struct req_msg *)(hba->dma_mem + hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); } static int stex_map_sg(struct st_hba *hba, struct req_msg *req, struct st_ccb *ccb) { struct scsi_cmnd *cmd; struct scatterlist *sg; struct st_sgtable *dst; struct st_sgitem *table; int i, nseg; cmd = ccb->cmd; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { dst = (struct st_sgtable *)req->variable; ccb->sg_count = nseg; dst->sg_count = cpu_to_le16((u16)nseg); dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); table = (struct st_sgitem *)(dst + 1); scsi_for_each_sg(cmd, sg, nseg, i) { table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table[i].addr = cpu_to_le64(sg_dma_address(sg)); table[i].ctrl = SG_CF_64B | SG_CF_HOST; } table[--i].ctrl |= SG_CF_EOT; } return nseg; } static int stex_ss_map_sg(struct st_hba *hba, struct req_msg *req, struct st_ccb *ccb) { struct scsi_cmnd *cmd; struct scatterlist *sg; struct st_sgtable *dst; struct st_ss_sgitem *table; int i, nseg; cmd = ccb->cmd; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { dst = (struct st_sgtable *)req->variable; ccb->sg_count = nseg; dst->sg_count = cpu_to_le16((u16)nseg); dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); table = (struct st_ss_sgitem *)(dst + 1); scsi_for_each_sg(cmd, sg, nseg, i) { table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); table[i].addr = cpu_to_le32(sg_dma_address(sg) & 0xffffffff); table[i].addr_hi = cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); } } return nseg; } static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) { struct st_frame *p; size_t count = sizeof(struct st_frame); p = hba->copy_buffer; scsi_sg_copy_to_buffer(ccb->cmd, p, count); memset(p->base, 0, sizeof(u32)*6); *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); p->rom_addr = 0; p->drv_ver.major = ST_VER_MAJOR; p->drv_ver.minor = ST_VER_MINOR; p->drv_ver.oem = ST_OEM; p->drv_ver.build = ST_BUILD_VER; p->bus = hba->pdev->bus->number; p->slot = hba->pdev->devfn; p->irq_level = 0; p->irq_vec = hba->pdev->irq; p->id = hba->pdev->vendor << 16 | hba->pdev->device; p->subid = hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; scsi_sg_copy_from_buffer(ccb->cmd, p, count); } static void stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) { req->tag = cpu_to_le16(tag); hba->ccb[tag].req = req; hba->out_req_cnt++; writel(hba->req_head, hba->mmio_base + IMR0); writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); readl(hba->mmio_base + IDBL); /* flush */ } static void stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) { struct scsi_cmnd *cmd; struct st_msg_header *msg_h; dma_addr_t addr; req->tag = cpu_to_le16(tag); hba->ccb[tag].req = req; hba->out_req_cnt++; cmd = hba->ccb[tag].cmd; msg_h = (struct st_msg_header *)req - 1; if (likely(cmd)) { msg_h->channel = (u8)cmd->device->channel; msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ); } addr = hba->dma_handle + hba->req_head * hba->rq_size; addr += (hba->ccb[tag].sg_count+4)/11; msg_h->handle = cpu_to_le64(addr); ++hba->req_head; hba->req_head %= hba->rq_count+1; writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ writel(addr, hba->mmio_base + YH2I_REQ); readl(hba->mmio_base + YH2I_REQ); /* flush */ } static int stex_slave_alloc(struct scsi_device *sdev) { /* Cheat: usually extracted from Inquiry data */ sdev->tagged_supported = 1; scsi_activate_tcq(sdev, sdev->host->can_queue); return 0; } static int stex_slave_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); sdev->tagged_supported = 1; return 0; } static void stex_slave_destroy(struct scsi_device *sdev) { scsi_deactivate_tcq(sdev, 1); } static int stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct st_hba *hba; struct Scsi_Host *host; unsigned int id, lun; struct req_msg *req; u16 tag; host = cmd->device->host; id = cmd->device->id; lun = cmd->device->lun; hba = (struct st_hba *) &host->hostdata[0]; if (unlikely(hba->mu_status == MU_STATE_RESETTING)) return SCSI_MLQUEUE_HOST_BUSY; switch (cmd->cmnd[0]) { case MODE_SENSE_10: { static char ms10_caching_page[12] = { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 }; unsigned char page; page = cmd->cmnd[2] & 0x3f; if (page == 0x8 || page == 0x3f) { scsi_sg_copy_from_buffer(cmd, ms10_caching_page, sizeof(ms10_caching_page)); cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; done(cmd); } else stex_invalid_field(cmd, done); return 0; } case REPORT_LUNS: /* * The shasta firmware does not report actual luns in the * target, so fail the command to force sequential lun scan. * Also, the console device does not support this command. */ if (hba->cardtype == st_shasta || id == host->max_id - 1) { stex_invalid_field(cmd, done); return 0; } break; case TEST_UNIT_READY: if (id == host->max_id - 1) { cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; done(cmd); return 0; } break; case INQUIRY: if (lun >= host->max_lun) { cmd->result = DID_NO_CONNECT << 16; done(cmd); return 0; } if (id != host->max_id - 1) break; if (!lun && !cmd->device->channel && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page, sizeof(console_inq_page)); cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; done(cmd); } else stex_invalid_field(cmd, done); return 0; case PASSTHRU_CMD: if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { struct st_drvver ver; size_t cp_len = sizeof(ver); ver.major = ST_VER_MAJOR; ver.minor = ST_VER_MINOR; ver.oem = ST_OEM; ver.build = ST_BUILD_VER; ver.signature[0] = PASSTHRU_SIGNATURE; ver.console_id = host->max_id - 1; ver.host_no = hba->host->host_no; cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); cmd->result = sizeof(ver) == cp_len ? DID_OK << 16 | COMMAND_COMPLETE << 8 : DID_ERROR << 16 | COMMAND_COMPLETE << 8; done(cmd); return 0; } default: break; } cmd->scsi_done = done; tag = cmd->request->tag; if (unlikely(tag >= host->can_queue)) return SCSI_MLQUEUE_HOST_BUSY; req = hba->alloc_rq(hba); req->lun = lun; req->target = id; /* cdb */ memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); if (cmd->sc_data_direction == DMA_FROM_DEVICE) req->data_dir = MSG_DATA_DIR_IN; else if (cmd->sc_data_direction == DMA_TO_DEVICE) req->data_dir = MSG_DATA_DIR_OUT; else req->data_dir = MSG_DATA_DIR_ND; hba->ccb[tag].cmd = cmd; hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; hba->ccb[tag].sense_buffer = cmd->sense_buffer; if (!hba->map_sg(hba, req, &hba->ccb[tag])) { hba->ccb[tag].sg_count = 0; memset(&req->variable[0], 0, 8); } hba->send(hba, req, tag); return 0; } static DEF_SCSI_QCMD(stex_queuecommand) static void stex_scsi_done(struct st_ccb *ccb) { struct scsi_cmnd *cmd = ccb->cmd; int result; if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) { result = ccb->scsi_status; switch (ccb->scsi_status) { case SAM_STAT_GOOD: result |= DID_OK << 16 | COMMAND_COMPLETE << 8; break; case SAM_STAT_CHECK_CONDITION: result |= DRIVER_SENSE << 24; break; case SAM_STAT_BUSY: result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; break; default: result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; } } else if (ccb->srb_status & SRB_SEE_SENSE) result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION; else switch (ccb->srb_status) { case SRB_STATUS_SELECTION_TIMEOUT: result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; break; case SRB_STATUS_BUSY: result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; break; case SRB_STATUS_INVALID_REQUEST: case SRB_STATUS_ERROR: default: result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; break; } cmd->result = result; cmd->scsi_done(cmd); } static void stex_copy_data(struct st_ccb *ccb, struct status_msg *resp, unsigned int variable) { if (resp->scsi_status != SAM_STAT_GOOD) { if (ccb->sense_buffer != NULL) memcpy(ccb->sense_buffer, resp->variable, min(variable, ccb->sense_bufflen)); return; } if (ccb->cmd == NULL) return; scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable); } static void stex_check_cmd(struct st_hba *hba, struct st_ccb *ccb, struct status_msg *resp) { if (ccb->cmd->cmnd[0] == MGT_CMD && resp->scsi_status != SAM_STAT_CHECK_CONDITION) scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) - le32_to_cpu(*(__le32 *)&resp->variable[0])); } static void stex_mu_intr(struct st_hba *hba, u32 doorbell) { void __iomem *base = hba->mmio_base; struct status_msg *resp; struct st_ccb *ccb; unsigned int size; u16 tag; if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))) return; /* status payloads */ hba->status_head = readl(base + OMR1); if (unlikely(hba->status_head > hba->sts_count)) { printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n", pci_name(hba->pdev)); return; } /* * it's not a valid status payload if: * 1. there are no pending requests(e.g. during init stage) * 2. there are some pending requests, but the controller is in * reset status, and its type is not st_yosemite * firmware of st_yosemite in reset status will return pending requests * to driver, so we allow it to pass */ if (unlikely(hba->out_req_cnt <= 0 || (hba->mu_status == MU_STATE_RESETTING && hba->cardtype != st_yosemite))) { hba->status_tail = hba->status_head; goto update_status; } while (hba->status_tail != hba->status_head) { resp = stex_get_status(hba); tag = le16_to_cpu(resp->tag); if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } hba->out_req_cnt--; ccb = &hba->ccb[tag]; if (unlikely(hba->wait_ccb == ccb)) hba->wait_ccb = NULL; if (unlikely(ccb->req == NULL)) { printk(KERN_WARNING DRV_NAME "(%s): lagging req\n", pci_name(hba->pdev)); continue; } size = resp->payload_sz * sizeof(u32); /* payload size */ if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || size > sizeof(*resp))) { printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", pci_name(hba->pdev)); } else { size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */ if (size) stex_copy_data(ccb, resp, size); } ccb->req = NULL; ccb->srb_status = resp->srb_status; ccb->scsi_status = resp->scsi_status; if (likely(ccb->cmd != NULL)) { if (hba->cardtype == st_yosemite) stex_check_cmd(hba, ccb, resp); if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD && ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) stex_controller_info(hba, ccb); scsi_dma_unmap(ccb->cmd); stex_scsi_done(ccb); } else ccb->req_type = 0; } update_status: writel(hba->status_head, base + IMR1); readl(base + IMR1); /* flush */ } static irqreturn_t stex_intr(int irq, void *__hba) { struct st_hba *hba = __hba; void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); data = readl(base + ODBL); if (data && data != 0xffffffff) { /* clear the interrupt */ writel(data, base + ODBL); readl(base + ODBL); /* flush */ stex_mu_intr(hba, data); spin_unlock_irqrestore(hba->host->host_lock, flags); if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET && hba->cardtype == st_shasta)) queue_work(hba->work_q, &hba->reset_work); return IRQ_HANDLED; } spin_unlock_irqrestore(hba->host->host_lock, flags); return IRQ_NONE; } static void stex_ss_mu_intr(struct st_hba *hba) { struct status_msg *resp; struct st_ccb *ccb; __le32 *scratch; unsigned int size; int count = 0; u32 value; u16 tag; if (unlikely(hba->out_req_cnt <= 0 || hba->mu_status == MU_STATE_RESETTING)) return; while (count < hba->sts_count) { scratch = hba->scratch + hba->status_tail; value = le32_to_cpu(*scratch); if (unlikely(!(value & SS_STS_NORMAL))) return; resp = hba->status_buffer + hba->status_tail; *scratch = 0; ++count; ++hba->status_tail; hba->status_tail %= hba->sts_count+1; tag = (u16)value; if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } hba->out_req_cnt--; ccb = &hba->ccb[tag]; if (unlikely(hba->wait_ccb == ccb)) hba->wait_ccb = NULL; if (unlikely(ccb->req == NULL)) { printk(KERN_WARNING DRV_NAME "(%s): lagging req\n", pci_name(hba->pdev)); continue; } ccb->req = NULL; if (likely(value & SS_STS_DONE)) { /* normal case */ ccb->srb_status = SRB_STATUS_SUCCESS; ccb->scsi_status = SAM_STAT_GOOD; } else { ccb->srb_status = resp->srb_status; ccb->scsi_status = resp->scsi_status; size = resp->payload_sz * sizeof(u32); if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || size > sizeof(*resp))) { printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", pci_name(hba->pdev)); } else { size -= sizeof(*resp) - STATUS_VAR_LEN; if (size) stex_copy_data(ccb, resp, size); } if (likely(ccb->cmd != NULL)) stex_check_cmd(hba, ccb, resp); } if (likely(ccb->cmd != NULL)) { scsi_dma_unmap(ccb->cmd); stex_scsi_done(ccb); } else ccb->req_type = 0; } } static irqreturn_t stex_ss_intr(int irq, void *__hba) { struct st_hba *hba = __hba; void __iomem *base = hba->mmio_base; u32 data; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); data = readl(base + YI2H_INT); if (data && data != 0xffffffff) { /* clear the interrupt */ writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); if (unlikely(data & SS_I2H_REQUEST_RESET)) queue_work(hba->work_q, &hba->reset_work); return IRQ_HANDLED; } spin_unlock_irqrestore(hba->host->host_lock, flags); return IRQ_NONE; } static int stex_common_handshake(struct st_hba *hba) { void __iomem *base = hba->mmio_base; struct handshake_frame *h; dma_addr_t status_phys; u32 data; unsigned long before; if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); readl(base + IDBL); before = jiffies; while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no handshake signature\n", pci_name(hba->pdev)); return -1; } rmb(); msleep(1); } } udelay(10); data = readl(base + OMR1); if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) { data &= 0x0000ffff; if (hba->host->can_queue > data) { hba->host->can_queue = data; hba->host->cmd_per_lun = data; } } h = (struct handshake_frame *)hba->status_buffer; h->rb_phy = cpu_to_le64(hba->dma_handle); h->req_sz = cpu_to_le16(hba->rq_size); h->req_cnt = cpu_to_le16(hba->rq_count+1); h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h->status_cnt = cpu_to_le16(hba->sts_count+1); stex_gettime(&h->hosttime); h->partner_type = HMU_PARTNER_TYPE; if (hba->extra_offset) { h->extra_offset = cpu_to_le32(hba->extra_offset); h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); } else h->extra_offset = h->extra_size = 0; status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; writel(status_phys, base + IMR0); readl(base + IMR0); writel((status_phys >> 16) >> 16, base + IMR1); readl(base + IMR1); writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */ readl(base + OMR0); writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); readl(base + IDBL); /* flush */ udelay(10); before = jiffies; while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no signature after handshake frame\n", pci_name(hba->pdev)); return -1; } rmb(); msleep(1); } writel(0, base + IMR0); readl(base + IMR0); writel(0, base + OMR0); readl(base + OMR0); writel(0, base + IMR1); readl(base + IMR1); writel(0, base + OMR1); readl(base + OMR1); /* flush */ return 0; } static int stex_ss_handshake(struct st_hba *hba) { void __iomem *base = hba->mmio_base; struct st_msg_header *msg_h; struct handshake_frame *h; __le32 *scratch; u32 data, scratch_size; unsigned long before; int ret = 0; before = jiffies; while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): firmware not operational\n", pci_name(hba->pdev)); return -1; } msleep(1); } msg_h = (struct st_msg_header *)hba->dma_mem; msg_h->handle = cpu_to_le64(hba->dma_handle); msg_h->flag = SS_HEAD_HANDSHAKE; h = (struct handshake_frame *)(msg_h + 1); h->rb_phy = cpu_to_le64(hba->dma_handle); h->req_sz = cpu_to_le16(hba->rq_size); h->req_cnt = cpu_to_le16(hba->rq_count+1); h->status_sz = cpu_to_le16(sizeof(struct status_msg)); h->status_cnt = cpu_to_le16(hba->sts_count+1); stex_gettime(&h->hosttime); h->partner_type = HMU_PARTNER_TYPE; h->extra_offset = h->extra_size = 0; scratch_size = (hba->sts_count+1)*sizeof(u32); h->scratch_size = cpu_to_le32(scratch_size); data = readl(base + YINT_EN); data &= ~4; writel(data, base + YINT_EN); writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); readl(base + YH2I_REQ_HI); writel(hba->dma_handle, base + YH2I_REQ); readl(base + YH2I_REQ); /* flush */ scratch = hba->scratch; before = jiffies; while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { printk(KERN_ERR DRV_NAME "(%s): no signature after handshake frame\n", pci_name(hba->pdev)); ret = -1; break; } rmb(); msleep(1); } memset(scratch, 0, scratch_size); msg_h->flag = 0; return ret; } static int stex_handshake(struct st_hba *hba) { int err; unsigned long flags; unsigned int mu_status; err = (hba->cardtype == st_yel) ? stex_ss_handshake(hba) : stex_common_handshake(hba); spin_lock_irqsave(hba->host->host_lock, flags); mu_status = hba->mu_status; if (err == 0) { hba->req_head = 0; hba->req_tail = 0; hba->status_head = 0; hba->status_tail = 0; hba->out_req_cnt = 0; hba->mu_status = MU_STATE_STARTED; } else hba->mu_status = MU_STATE_FAILED; if (mu_status == MU_STATE_RESETTING) wake_up_all(&hba->reset_waitq); spin_unlock_irqrestore(hba->host->host_lock, flags); return err; } static int stex_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct st_hba *hba = (struct st_hba *)host->hostdata; u16 tag = cmd->request->tag; void __iomem *base; u32 data; int result = SUCCESS; unsigned long flags; printk(KERN_INFO DRV_NAME "(%s): aborting command\n", pci_name(hba->pdev)); scsi_print_command(cmd); base = hba->mmio_base; spin_lock_irqsave(host->host_lock, flags); if (tag < host->can_queue && hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) hba->wait_ccb = &hba->ccb[tag]; else goto out; if (hba->cardtype == st_yel) { data = readl(base + YI2H_INT); if (data == 0 || data == 0xffffffff) goto fail_out; writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); } else { data = readl(base + ODBL); if (data == 0 || data == 0xffffffff) goto fail_out; writel(data, base + ODBL); readl(base + ODBL); /* flush */ stex_mu_intr(hba, data); } if (hba->wait_ccb == NULL) { printk(KERN_WARNING DRV_NAME "(%s): lost interrupt\n", pci_name(hba->pdev)); goto out; } fail_out: scsi_dma_unmap(cmd); hba->wait_ccb->req = NULL; /* nullify the req's future return */ hba->wait_ccb = NULL; result = FAILED; out: spin_unlock_irqrestore(host->host_lock, flags); return result; } static void stex_hard_reset(struct st_hba *hba) { struct pci_bus *bus; int i; u16 pci_cmd; u8 pci_bctl; for (i = 0; i < 16; i++) pci_read_config_dword(hba->pdev, i * 4, &hba->pdev->saved_config_space[i]); /* Reset secondary bus. Our controller(MU/ATU) is the only device on secondary bus. Consult Intel 80331/3 developer's manual for detail */ bus = hba->pdev->bus; pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl); pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); /* * 1 ms may be enough for 8-port controllers. But 16-port controllers * require more time to finish bus reset. Use 100 ms here for safety */ msleep(100); pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); for (i = 0; i < MU_HARD_RESET_WAIT; i++) { pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER)) break; msleep(1); } ssleep(5); for (i = 0; i < 16; i++) pci_write_config_dword(hba->pdev, i * 4, hba->pdev->saved_config_space[i]); } static int stex_yos_reset(struct st_hba *hba) { void __iomem *base; unsigned long flags, before; int ret = 0; base = hba->mmio_base; writel(MU_INBOUND_DOORBELL_RESET, base + IDBL); readl(base + IDBL); /* flush */ before = jiffies; while (hba->out_req_cnt > 0) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { printk(KERN_WARNING DRV_NAME "(%s): reset timeout\n", pci_name(hba->pdev)); ret = -1; break; } msleep(1); } spin_lock_irqsave(hba->host->host_lock, flags); if (ret == -1) hba->mu_status = MU_STATE_FAILED; else hba->mu_status = MU_STATE_STARTED; wake_up_all(&hba->reset_waitq); spin_unlock_irqrestore(hba->host->host_lock, flags); return ret; } static void stex_ss_reset(struct st_hba *hba) { writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); readl(hba->mmio_base + YH2I_INT); ssleep(5); } static int stex_do_reset(struct st_hba *hba) { struct st_ccb *ccb; unsigned long flags; unsigned int mu_status = MU_STATE_RESETTING; u16 tag; spin_lock_irqsave(hba->host->host_lock, flags); if (hba->mu_status == MU_STATE_STARTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); printk(KERN_INFO DRV_NAME "(%s): request reset during init\n", pci_name(hba->pdev)); return 0; } while (hba->mu_status == MU_STATE_RESETTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); wait_event_timeout(hba->reset_waitq, hba->mu_status != MU_STATE_RESETTING, MU_MAX_DELAY * HZ); spin_lock_irqsave(hba->host->host_lock, flags); mu_status = hba->mu_status; } if (mu_status != MU_STATE_RESETTING) { spin_unlock_irqrestore(hba->host->host_lock, flags); return (mu_status == MU_STATE_STARTED) ? 0 : -1; } hba->mu_status = MU_STATE_RESETTING; spin_unlock_irqrestore(hba->host->host_lock, flags); if (hba->cardtype == st_yosemite) return stex_yos_reset(hba); if (hba->cardtype == st_shasta) stex_hard_reset(hba); else if (hba->cardtype == st_yel) stex_ss_reset(hba); spin_lock_irqsave(hba->host->host_lock, flags); for (tag = 0; tag < hba->host->can_queue; tag++) { ccb = &hba->ccb[tag]; if (ccb->req == NULL) continue; ccb->req = NULL; if (ccb->cmd) { scsi_dma_unmap(ccb->cmd); ccb->cmd->result = DID_RESET << 16; ccb->cmd->scsi_done(ccb->cmd); ccb->cmd = NULL; } } spin_unlock_irqrestore(hba->host->host_lock, flags); if (stex_handshake(hba) == 0) return 0; printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n", pci_name(hba->pdev)); return -1; } static int stex_reset(struct scsi_cmnd *cmd) { struct st_hba *hba; hba = (struct st_hba *) &cmd->device->host->hostdata[0]; printk(KERN_INFO DRV_NAME "(%s): resetting host\n", pci_name(hba->pdev)); scsi_print_command(cmd); return stex_do_reset(hba) ? FAILED : SUCCESS; } static void stex_reset_work(struct work_struct *work) { struct st_hba *hba = container_of(work, struct st_hba, reset_work); stex_do_reset(hba); } static int stex_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads = 255, sectors = 63; if (capacity < 0x200000) { heads = 64; sectors = 32; } sector_div(capacity, heads * sectors); geom[0] = heads; geom[1] = sectors; geom[2] = capacity; return 0; } static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = DRV_NAME, .proc_name = DRV_NAME, .bios_param = stex_biosparam, .queuecommand = stex_queuecommand, .slave_alloc = stex_slave_alloc, .slave_configure = stex_slave_config, .slave_destroy = stex_slave_destroy, .eh_abort_handler = stex_abort, .eh_host_reset_handler = stex_reset, .this_id = -1, }; static struct pci_device_id stex_pci_tbl[] = { /* st_shasta */ { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */ { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX12350 */ { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX4350 */ { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, /* SuperTrak EX24350 */ /* st_vsc */ { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, /* st_yosemite */ { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite }, /* st_seq */ { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq }, /* st_yel */ { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel }, { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel }, { } /* terminate list */ }; static struct st_card_info stex_card_info[] = { /* st_shasta */ { .max_id = 17, .max_lun = 8, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_vsc */ { .max_id = 129, .max_lun = 1, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_yosemite */ { .max_id = 2, .max_lun = 256, .max_channel = 0, .rq_count = 256, .rq_size = 1048, .sts_count = 256, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_seq */ { .max_id = 129, .max_lun = 1, .max_channel = 0, .rq_count = 32, .rq_size = 1048, .sts_count = 32, .alloc_rq = stex_alloc_req, .map_sg = stex_map_sg, .send = stex_send_cmd, }, /* st_yel */ { .max_id = 129, .max_lun = 256, .max_channel = 3, .rq_count = 801, .rq_size = 512, .sts_count = 801, .alloc_rq = stex_ss_alloc_req, .map_sg = stex_ss_map_sg, .send = stex_ss_send_cmd, }, }; static int stex_set_dma_mask(struct pci_dev * pdev) { int ret; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) return 0; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!ret) ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); return ret; } static int stex_request_irq(struct st_hba *hba) { struct pci_dev *pdev = hba->pdev; int status; if (msi) { status = pci_enable_msi(pdev); if (status != 0) printk(KERN_ERR DRV_NAME "(%s): error %d setting up MSI\n", pci_name(pdev), status); else hba->msi_enabled = 1; } else hba->msi_enabled = 0; status = request_irq(pdev->irq, hba->cardtype == st_yel ? stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); if (status != 0) { if (hba->msi_enabled) pci_disable_msi(pdev); } return status; } static void stex_free_irq(struct st_hba *hba) { struct pci_dev *pdev = hba->pdev; free_irq(pdev->irq, hba); if (hba->msi_enabled) pci_disable_msi(pdev); } static int __devinit stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct st_hba *hba; struct Scsi_Host *host; const struct st_card_info *ci = NULL; u32 sts_offset, cp_offset, scratch_offset; int err; err = pci_enable_device(pdev); if (err) return err; pci_set_master(pdev); host = scsi_host_alloc(&driver_template, sizeof(struct st_hba)); if (!host) { printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto out_disable; } hba = (struct st_hba *)host->hostdata; memset(hba, 0, sizeof(struct st_hba)); err = pci_request_regions(pdev, DRV_NAME); if (err < 0) { printk(KERN_ERR DRV_NAME "(%s): request regions failed\n", pci_name(pdev)); goto out_scsi_host_put; } hba->mmio_base = pci_ioremap_bar(pdev, 0); if ( !hba->mmio_base) { printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", pci_name(pdev)); err = -ENOMEM; goto out_release_regions; } err = stex_set_dma_mask(pdev); if (err) { printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n", pci_name(pdev)); goto out_iounmap; } hba->cardtype = (unsigned int) id->driver_data; ci = &stex_card_info[hba->cardtype]; sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; if (hba->cardtype == st_yel) sts_offset += (ci->sts_count+1) * sizeof(u32); cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); hba->dma_size = cp_offset + sizeof(struct st_frame); if (hba->cardtype == st_seq || (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { hba->extra_offset = hba->dma_size; hba->dma_size += ST_ADDITIONAL_MEM; } hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba->dma_size, &hba->dma_handle, GFP_KERNEL); if (!hba->dma_mem) { /* Retry minimum coherent mapping for st_seq and st_vsc */ if (hba->cardtype == st_seq || (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { printk(KERN_WARNING DRV_NAME "(%s): allocating min buffer for controller\n", pci_name(pdev)); hba->dma_size = hba->extra_offset + ST_ADDITIONAL_MEM_MIN; hba->dma_mem = dma_alloc_coherent(&pdev->dev, hba->dma_size, &hba->dma_handle, GFP_KERNEL); } if (!hba->dma_mem) { err = -ENOMEM; printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", pci_name(pdev)); goto out_iounmap; } } hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); if (!hba->ccb) { err = -ENOMEM; printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n", pci_name(pdev)); goto out_pci_free; } if (hba->cardtype == st_yel) hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); hba->copy_buffer = hba->dma_mem + cp_offset; hba->rq_count = ci->rq_count; hba->rq_size = ci->rq_size; hba->sts_count = ci->sts_count; hba->alloc_rq = ci->alloc_rq; hba->map_sg = ci->map_sg; hba->send = ci->send; hba->mu_status = MU_STATE_STARTING; if (hba->cardtype == st_yel) host->sg_tablesize = 38; else host->sg_tablesize = 32; host->can_queue = ci->rq_count; host->cmd_per_lun = ci->rq_count; host->max_id = ci->max_id; host->max_lun = ci->max_lun; host->max_channel = ci->max_channel; host->unique_id = host->host_no; host->max_cmd_len = STEX_CDB_LENGTH; hba->host = host; hba->pdev = pdev; init_waitqueue_head(&hba->reset_waitq); snprintf(hba->work_q_name, sizeof(hba->work_q_name), "stex_wq_%d", host->host_no); hba->work_q = create_singlethread_workqueue(hba->work_q_name); if (!hba->work_q) { printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", pci_name(pdev)); err = -ENOMEM; goto out_ccb_free; } INIT_WORK(&hba->reset_work, stex_reset_work); err = stex_request_irq(hba); if (err) { printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", pci_name(pdev)); goto out_free_wq; } err = stex_handshake(hba); if (err) goto out_free_irq; err = scsi_init_shared_tag_map(host, host->can_queue); if (err) { printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n", pci_name(pdev)); goto out_free_irq; } pci_set_drvdata(pdev, hba); err = scsi_add_host(host, &pdev->dev); if (err) { printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n", pci_name(pdev)); goto out_free_irq; } scsi_scan_host(host); return 0; out_free_irq: stex_free_irq(hba); out_free_wq: destroy_workqueue(hba->work_q); out_ccb_free: kfree(hba->ccb); out_pci_free: dma_free_coherent(&pdev->dev, hba->dma_size, hba->dma_mem, hba->dma_handle); out_iounmap: iounmap(hba->mmio_base); out_release_regions: pci_release_regions(pdev); out_scsi_host_put: scsi_host_put(host); out_disable: pci_disable_device(pdev); return err; } static void stex_hba_stop(struct st_hba *hba) { struct req_msg *req; struct st_msg_header *msg_h; unsigned long flags; unsigned long before; u16 tag = 0; spin_lock_irqsave(hba->host->host_lock, flags); req = hba->alloc_rq(hba); if (hba->cardtype == st_yel) { msg_h = (struct st_msg_header *)req - 1; memset(msg_h, 0, hba->rq_size); } else memset(req, 0, hba->rq_size); if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) { req->cdb[0] = MGT_CMD; req->cdb[1] = MGT_CMD_SIGNATURE; req->cdb[2] = CTLR_CONFIG_CMD; req->cdb[3] = CTLR_SHUTDOWN; } else { req->cdb[0] = CONTROLLER_CMD; req->cdb[1] = CTLR_POWER_STATE_CHANGE; req->cdb[2] = CTLR_POWER_SAVING; } hba->ccb[tag].cmd = NULL; hba->ccb[tag].sg_count = 0; hba->ccb[tag].sense_bufflen = 0; hba->ccb[tag].sense_buffer = NULL; hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; hba->send(hba, req, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); before = jiffies; while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { hba->ccb[tag].req_type = 0; return; } msleep(1); } } static void stex_hba_free(struct st_hba *hba) { stex_free_irq(hba); destroy_workqueue(hba->work_q); iounmap(hba->mmio_base); pci_release_regions(hba->pdev); kfree(hba->ccb); dma_free_coherent(&hba->pdev->dev, hba->dma_size, hba->dma_mem, hba->dma_handle); } static void stex_remove(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); scsi_remove_host(hba->host); pci_set_drvdata(pdev, NULL); stex_hba_stop(hba); stex_hba_free(hba); scsi_host_put(hba->host); pci_disable_device(pdev); } static void stex_shutdown(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); stex_hba_stop(hba); } MODULE_DEVICE_TABLE(pci, stex_pci_tbl); static struct pci_driver stex_pci_driver = { .name = DRV_NAME, .id_table = stex_pci_tbl, .probe = stex_probe, .remove = __devexit_p(stex_remove), .shutdown = stex_shutdown, }; static int __init stex_init(void) { printk(KERN_INFO DRV_NAME ": Promise SuperTrak EX Driver version: %s\n", ST_DRIVER_VERSION); return pci_register_driver(&stex_pci_driver); } static void __exit stex_exit(void) { pci_unregister_driver(&stex_pci_driver); } module_init(stex_init); module_exit(stex_exit);
gpl-2.0
mpokwsths/Z3_kernel
drivers/pcmcia/sa1111_neponset.c
9354
3133
/* * linux/drivers/pcmcia/sa1100_neponset.c * * Neponset PCMCIA specific routines */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/init.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <mach/neponset.h> #include <asm/hardware/sa1111.h> #include "sa1111_generic.h" /* * Neponset uses the Maxim MAX1600, with the following connections: * * MAX1600 Neponset * * A0VCC SA-1111 GPIO A<1> * A1VCC SA-1111 GPIO A<0> * A0VPP CPLD NCR A0VPP * A1VPP CPLD NCR A1VPP * B0VCC SA-1111 GPIO A<2> * B1VCC SA-1111 GPIO A<3> * B0VPP ground (slot B is CF) * B1VPP ground (slot B is CF) * * VX VCC (5V) * VY VCC3_3 (3.3V) * 12INA 12V * 12INB ground (slot B is CF) * * The MAX1600 CODE pin is tied to ground, placing the device in * "Standard Intel code" mode. Refer to the Maxim data sheet for * the corresponding truth table. */ static int neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { struct sa1111_pcmcia_socket *s = to_skt(skt); unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set; int ret; switch (skt->nr) { case 0: pa_dwr_mask = GPIO_A0 | GPIO_A1; ncr_mask = NCR_A0VPP | NCR_A1VPP; if (state->Vpp == 0) ncr_set = 0; else if (state->Vpp == 120) ncr_set = NCR_A1VPP; else if (state->Vpp == state->Vcc) ncr_set = NCR_A0VPP; else { printk(KERN_ERR "%s(): unrecognized VPP %u\n", __func__, state->Vpp); return -1; } break; case 1: pa_dwr_mask = GPIO_A2 | GPIO_A3; ncr_mask = 0; ncr_set = 0; if (state->Vpp != state->Vcc && state->Vpp != 0) { printk(KERN_ERR "%s(): CF slot cannot support VPP %u\n", __func__, state->Vpp); return -1; } break; default: return -1; } /* * pa_dwr_set is the mask for selecting Vcc on both sockets. * pa_dwr_mask selects which bits (and therefore socket) we change. */ switch (state->Vcc) { default: case 0: pa_dwr_set = 0; break; case 33: pa_dwr_set = GPIO_A1|GPIO_A2; break; case 50: pa_dwr_set = GPIO_A0|GPIO_A3; break; } ret = sa1111_pcmcia_configure_socket(skt, state); if (ret == 0) { neponset_ncr_frob(ncr_mask, ncr_set); sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); } return ret; } static struct pcmcia_low_level neponset_pcmcia_ops = { .owner = THIS_MODULE, .configure_socket = neponset_pcmcia_configure_socket, .first = 0, .nr = 2, }; int pcmcia_neponset_init(struct sa1111_dev *sadev) { int ret = -ENODEV; if (machine_is_assabet()) { /* * Set GPIO_A<3:0> to be outputs for the MAX1600, * and switch to standby mode. */ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops); ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops, sa11xx_drv_pcmcia_add_one); } return ret; }
gpl-2.0
1nv4d3r5/linux-1
drivers/i2c/busses/i2c-powermac.c
139
12800
/* i2c Support for Apple SMU Controller Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp. <benh@kernel.crashing.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <asm/prom.h> #include <asm/pmac_low_i2c.h> MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("I2C driver for Apple PowerMac"); MODULE_LICENSE("GPL"); /* * SMBUS-type transfer entrypoint */ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data* data) { struct pmac_i2c_bus *bus = i2c_get_adapdata(adap); int rc = 0; int read = (read_write == I2C_SMBUS_READ); int addrdir = (addr << 1) | read; int mode, subsize, len; u32 subaddr; u8 *buf; u8 local[2]; if (size == I2C_SMBUS_QUICK || size == I2C_SMBUS_BYTE) { mode = pmac_i2c_mode_std; subsize = 0; subaddr = 0; } else { mode = read ? pmac_i2c_mode_combined : pmac_i2c_mode_stdsub; subsize = 1; subaddr = command; } switch (size) { case I2C_SMBUS_QUICK: buf = NULL; len = 0; break; case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: buf = &data->byte; len = 1; break; case I2C_SMBUS_WORD_DATA: if (!read) { local[0] = data->word & 0xff; local[1] = (data->word >> 8) & 0xff; } buf = local; len = 2; break; /* Note that these are broken vs. the expected smbus API where * on reads, the length is actually returned from the function, * but I think the current API makes no sense and I don't want * any driver that I haven't verified for correctness to go * anywhere near a pmac i2c bus anyway ... * * I'm also not completely sure what kind of phases to do between * the actual command and the data (what I am _supposed_ to do that * is). For now, I assume writes are a single stream and reads have * a repeat start/addr phase (but not stop in between) */ case I2C_SMBUS_BLOCK_DATA: buf = data->block; len = data->block[0] + 1; break; case I2C_SMBUS_I2C_BLOCK_DATA: buf = &data->block[1]; len = data->block[0]; break; default: return -EINVAL; } rc = pmac_i2c_open(bus, 0); if (rc) { dev_err(&adap->dev, "Failed to open I2C, err %d\n", rc); return rc; } rc = pmac_i2c_setmode(bus, mode); if (rc) { dev_err(&adap->dev, "Failed to set I2C mode %d, err %d\n", mode, rc); goto bail; } rc = pmac_i2c_xfer(bus, addrdir, subsize, subaddr, buf, len); if (rc) { if (rc == -ENXIO) dev_dbg(&adap->dev, "I2C transfer at 0x%02x failed, size %d, " "err %d\n", addrdir >> 1, size, rc); else dev_err(&adap->dev, "I2C transfer at 0x%02x failed, size %d, " "err %d\n", addrdir >> 1, size, rc); goto bail; } if (size == I2C_SMBUS_WORD_DATA && read) { data->word = ((u16)local[1]) << 8; data->word |= local[0]; } bail: pmac_i2c_close(bus); return rc; } /* * Generic i2c master transfer entrypoint. This driver only support single * messages (for "lame i2c" transfers). Anything else should use the smbus * entry point */ static int i2c_powermac_master_xfer( struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct pmac_i2c_bus *bus = i2c_get_adapdata(adap); int rc = 0; int read; int addrdir; if (num != 1) { dev_err(&adap->dev, "Multi-message I2C transactions not supported\n"); return -EOPNOTSUPP; } if (msgs->flags & I2C_M_TEN) return -EINVAL; read = (msgs->flags & I2C_M_RD) != 0; addrdir = (msgs->addr << 1) | read; rc = pmac_i2c_open(bus, 0); if (rc) { dev_err(&adap->dev, "Failed to open I2C, err %d\n", rc); return rc; } rc = pmac_i2c_setmode(bus, pmac_i2c_mode_std); if (rc) { dev_err(&adap->dev, "Failed to set I2C mode %d, err %d\n", pmac_i2c_mode_std, rc); goto bail; } rc = pmac_i2c_xfer(bus, addrdir, 0, 0, msgs->buf, msgs->len); if (rc < 0) { if (rc == -ENXIO) dev_dbg(&adap->dev, "I2C %s 0x%02x failed, err %d\n", addrdir & 1 ? "read from" : "write to", addrdir >> 1, rc); else dev_err(&adap->dev, "I2C %s 0x%02x failed, err %d\n", addrdir & 1 ? "read from" : "write to", addrdir >> 1, rc); } bail: pmac_i2c_close(bus); return rc < 0 ? rc : 1; } static u32 i2c_powermac_func(struct i2c_adapter * adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_I2C; } /* For now, we only handle smbus */ static const struct i2c_algorithm i2c_powermac_algorithm = { .smbus_xfer = i2c_powermac_smbus_xfer, .master_xfer = i2c_powermac_master_xfer, .functionality = i2c_powermac_func, }; static int __devexit i2c_powermac_remove(struct platform_device *dev) { struct i2c_adapter *adapter = platform_get_drvdata(dev); int rc; rc = i2c_del_adapter(adapter); /* We aren't that prepared to deal with this... */ if (rc) printk(KERN_WARNING "i2c-powermac.c: Failed to remove bus %s !\n", adapter->name); platform_set_drvdata(dev, NULL); memset(adapter, 0, sizeof(*adapter)); return 0; } static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap, struct pmac_i2c_bus *bus, struct device_node *node) { const __be32 *prop; int len; /* First check for valid "reg" */ prop = of_get_property(node, "reg", &len); if (prop && (len >= sizeof(int))) return (be32_to_cpup(prop) & 0xff) >> 1; /* Then check old-style "i2c-address" */ prop = of_get_property(node, "i2c-address", &len); if (prop && (len >= sizeof(int))) return (be32_to_cpup(prop) & 0xff) >> 1; /* Now handle some devices with missing "reg" properties */ if (!strcmp(node->name, "cereal")) return 0x60; else if (!strcmp(node->name, "deq")) return 0x34; dev_warn(&adap->dev, "No i2c address for %s\n", node->full_name); return 0xffffffff; } static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap, const char *type, u32 addr) { struct i2c_board_info info = {}; struct i2c_client *newdev; strncpy(info.type, type, sizeof(info.type)); info.addr = addr; newdev = i2c_new_device(adap, &info); if (!newdev) dev_err(&adap->dev, "i2c-powermac: Failure to register missing %s\n", type); } static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap, struct pmac_i2c_bus *bus, bool found_onyx) { struct device_node *busnode = pmac_i2c_get_bus_node(bus); int rc; /* Check for the onyx audio codec */ #define ONYX_REG_CONTROL 67 if (of_device_is_compatible(busnode, "k2-i2c") && !found_onyx) { union i2c_smbus_data data; rc = i2c_smbus_xfer(adap, 0x46, 0, I2C_SMBUS_READ, ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA, &data); if (rc >= 0) i2c_powermac_create_one(adap, "MAC,pcm3052", 0x46); rc = i2c_smbus_xfer(adap, 0x47, 0, I2C_SMBUS_READ, ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA, &data); if (rc >= 0) i2c_powermac_create_one(adap, "MAC,pcm3052", 0x47); } } static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap, struct device_node *node, u32 addr, char *type, int type_size) { char tmp[16]; /* Note: we to _NOT_ want the standard * i2c drivers to match with any of our powermac stuff * unless they have been specifically modified to handle * it on a case by case basis. For example, for thermal * control, things like lm75 etc... shall match with their * corresponding windfarm drivers, _NOT_ the generic ones, * so we force a prefix of AAPL, onto the modalias to * make that happen */ /* First try proper modalias */ if (of_modalias_node(node, tmp, sizeof(tmp)) >= 0) { snprintf(type, type_size, "MAC,%s", tmp); return true; } /* Now look for known workarounds */ if (!strcmp(node->name, "deq")) { /* Apple uses address 0x34 for TAS3001 and 0x35 for TAS3004 */ if (addr == 0x34) { snprintf(type, type_size, "MAC,tas3001"); return true; } else if (addr == 0x35) { snprintf(type, type_size, "MAC,tas3004"); return true; } } dev_err(&adap->dev, "i2c-powermac: modalias failure" " on %s\n", node->full_name); return false; } static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap, struct pmac_i2c_bus *bus) { struct i2c_client *newdev; struct device_node *node; bool found_onyx = 0; /* * In some cases we end up with the via-pmu node itself, in this * case we skip this function completely as the device-tree will * not contain anything useful. */ if (!strcmp(adap->dev.of_node->name, "via-pmu")) return; for_each_child_of_node(adap->dev.of_node, node) { struct i2c_board_info info = {}; u32 addr; /* Get address & channel */ addr = i2c_powermac_get_addr(adap, bus, node); if (addr == 0xffffffff) continue; /* Multibus setup, check channel */ if (!pmac_i2c_match_adapter(node, adap)) continue; dev_dbg(&adap->dev, "i2c-powermac: register %s\n", node->full_name); /* * Keep track of some device existence to handle * workarounds later. */ if (of_device_is_compatible(node, "pcm3052")) found_onyx = true; /* Make up a modalias */ if (!i2c_powermac_get_type(adap, node, addr, info.type, sizeof(info.type))) { continue; } /* Fill out the rest of the info structure */ info.addr = addr; info.irq = irq_of_parse_and_map(node, 0); info.of_node = of_node_get(node); newdev = i2c_new_device(adap, &info); if (!newdev) { dev_err(&adap->dev, "i2c-powermac: Failure to register" " %s\n", node->full_name); of_node_put(node); /* We do not dispose of the interrupt mapping on * purpose. It's not necessary (interrupt cannot be * re-used) and somebody else might have grabbed it * via direct DT lookup so let's not bother */ continue; } } /* Additional workarounds */ i2c_powermac_add_missing(adap, bus, found_onyx); } static int __devinit i2c_powermac_probe(struct platform_device *dev) { struct pmac_i2c_bus *bus = dev->dev.platform_data; struct device_node *parent = NULL; struct i2c_adapter *adapter; const char *basename; int rc; if (bus == NULL) return -EINVAL; adapter = pmac_i2c_get_adapter(bus); /* Ok, now we need to make up a name for the interface that will * match what we used to do in the past, that is basically the * controller's parent device node for keywest. PMU didn't have a * naming convention and SMU has a different one */ switch(pmac_i2c_get_type(bus)) { case pmac_i2c_bus_keywest: parent = of_get_parent(pmac_i2c_get_controller(bus)); if (parent == NULL) return -EINVAL; basename = parent->name; break; case pmac_i2c_bus_pmu: basename = "pmu"; break; case pmac_i2c_bus_smu: /* This is not what we used to do but I'm fixing drivers at * the same time as this change */ basename = "smu"; break; default: return -EINVAL; } snprintf(adapter->name, sizeof(adapter->name), "%s %d", basename, pmac_i2c_get_channel(bus)); of_node_put(parent); platform_set_drvdata(dev, adapter); adapter->algo = &i2c_powermac_algorithm; i2c_set_adapdata(adapter, bus); adapter->dev.parent = &dev->dev; adapter->dev.of_node = dev->dev.of_node; rc = i2c_add_adapter(adapter); if (rc) { printk(KERN_ERR "i2c-powermac: Adapter %s registration " "failed\n", adapter->name); memset(adapter, 0, sizeof(*adapter)); } printk(KERN_INFO "PowerMac i2c bus %s registered\n", adapter->name); /* Cannot use of_i2c_register_devices() due to Apple device-tree * funkyness */ i2c_powermac_register_devices(adapter, bus); return rc; } static struct platform_driver i2c_powermac_driver = { .probe = i2c_powermac_probe, .remove = __devexit_p(i2c_powermac_remove), .driver = { .name = "i2c-powermac", .bus = &platform_bus_type, }, }; module_platform_driver(i2c_powermac_driver); MODULE_ALIAS("platform:i2c-powermac");
gpl-2.0
mjmccall/Kernel
drivers/md/raid0.c
139
14503
/* raid0.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr> Copyright (C) 1999, 2000 Ingo Molnar, Red Hat RAID-0 management functions. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/raid/raid0.h> static void raid0_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; raid0_conf_t *conf = mddev_to_conf(mddev); mdk_rdev_t **devlist = conf->strip_zone[0].dev; int i; for (i=0; i<mddev->raid_disks; i++) { struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); blk_unplug(r_queue); } } static int raid0_congested(void *data, int bits) { mddev_t *mddev = data; raid0_conf_t *conf = mddev_to_conf(mddev); mdk_rdev_t **devlist = conf->strip_zone[0].dev; int i, ret = 0; for (i = 0; i < mddev->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(devlist[i]->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } return ret; } static int create_strip_zones (mddev_t *mddev) { int i, c, j; sector_t current_start, curr_zone_start; sector_t min_spacing; raid0_conf_t *conf = mddev_to_conf(mddev); mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; struct strip_zone *zone; int cnt; char b[BDEVNAME_SIZE]; /* * The number of 'same size groups' */ conf->nr_strip_zones = 0; list_for_each_entry(rdev1, &mddev->disks, same_set) { printk(KERN_INFO "raid0: looking at %s\n", bdevname(rdev1->bdev,b)); c = 0; list_for_each_entry(rdev2, &mddev->disks, same_set) { printk(KERN_INFO "raid0: comparing %s(%llu)", bdevname(rdev1->bdev,b), (unsigned long long)rdev1->size); printk(KERN_INFO " with %s(%llu)\n", bdevname(rdev2->bdev,b), (unsigned long long)rdev2->size); if (rdev2 == rdev1) { printk(KERN_INFO "raid0: END\n"); break; } if (rdev2->size == rdev1->size) { /* * Not unique, don't count it as a new * group */ printk(KERN_INFO "raid0: EQUAL\n"); c = 1; break; } printk(KERN_INFO "raid0: NOT EQUAL\n"); } if (!c) { printk(KERN_INFO "raid0: ==> UNIQUE\n"); conf->nr_strip_zones++; printk(KERN_INFO "raid0: %d zones\n", conf->nr_strip_zones); } } printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones); conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->nr_strip_zones, GFP_KERNEL); if (!conf->strip_zone) return 1; conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* conf->nr_strip_zones*mddev->raid_disks, GFP_KERNEL); if (!conf->devlist) return 1; /* The first zone must contain all devices, so here we check that * there is a proper alignment of slots to devices and find them all */ zone = &conf->strip_zone[0]; cnt = 0; smallest = NULL; zone->dev = conf->devlist; list_for_each_entry(rdev1, &mddev->disks, same_set) { int j = rdev1->raid_disk; if (j < 0 || j >= mddev->raid_disks) { printk(KERN_ERR "raid0: bad disk number %d - " "aborting!\n", j); goto abort; } if (zone->dev[j]) { printk(KERN_ERR "raid0: multiple devices for %d - " "aborting!\n", j); goto abort; } zone->dev[j] = rdev1; blk_queue_stack_limits(mddev->queue, rdev1->bdev->bd_disk->queue); /* as we don't honour merge_bvec_fn, we must never risk * violating it, so limit ->max_sector to one PAGE, as * a one page request is never in violation. */ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && mddev->queue->max_sectors > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); if (!smallest || (rdev1->size <smallest->size)) smallest = rdev1; cnt++; } if (cnt != mddev->raid_disks) { printk(KERN_ERR "raid0: too few disks (%d of %d) - " "aborting!\n", cnt, mddev->raid_disks); goto abort; } zone->nb_dev = cnt; zone->sectors = smallest->size * cnt * 2; zone->zone_start = 0; current_start = smallest->size * 2; curr_zone_start = zone->sectors; /* now do the other zones */ for (i = 1; i < conf->nr_strip_zones; i++) { zone = conf->strip_zone + i; zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; printk(KERN_INFO "raid0: zone %d\n", i); zone->dev_start = current_start; smallest = NULL; c = 0; for (j=0; j<cnt; j++) { char b[BDEVNAME_SIZE]; rdev = conf->strip_zone[0].dev[j]; printk(KERN_INFO "raid0: checking %s ...", bdevname(rdev->bdev, b)); if (rdev->size > current_start / 2) { printk(KERN_INFO " contained as device %d\n", c); zone->dev[c] = rdev; c++; if (!smallest || (rdev->size <smallest->size)) { smallest = rdev; printk(KERN_INFO " (%llu) is smallest!.\n", (unsigned long long)rdev->size); } } else printk(KERN_INFO " nope.\n"); } zone->nb_dev = c; zone->sectors = (smallest->size * 2 - current_start) * c; printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n", zone->nb_dev, (unsigned long long)zone->sectors); zone->zone_start = curr_zone_start; curr_zone_start += zone->sectors; current_start = smallest->size * 2; printk(KERN_INFO "raid0: current zone start: %llu\n", (unsigned long long)current_start); } /* Now find appropriate hash spacing. * We want a number which causes most hash entries to cover * at most two strips, but the hash table must be at most * 1 PAGE. We choose the smallest strip, or contiguous collection * of strips, that has big enough size. We never consider the last * strip though as it's size has no bearing on the efficacy of the hash * table. */ conf->spacing = curr_zone_start; min_spacing = curr_zone_start; sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); for (i=0; i < conf->nr_strip_zones-1; i++) { sector_t s = 0; for (j = i; j < conf->nr_strip_zones - 1 && s < min_spacing; j++) s += conf->strip_zone[j].sectors; if (s >= min_spacing && s < conf->spacing) conf->spacing = s; } mddev->queue->unplug_fn = raid0_unplug; mddev->queue->backing_dev_info.congested_fn = raid0_congested; mddev->queue->backing_dev_info.congested_data = mddev; printk(KERN_INFO "raid0: done.\n"); return 0; abort: return 1; } /** * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged * @q: request queue * @bvm: properties of new bio * @biovec: the request that could be merged to it. * * Return amount of bytes we can accept at this offset */ static int raid0_mergeable_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *biovec) { mddev_t *mddev = q->queuedata; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max; unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int bio_sectors = bvm->bi_size >> 9; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; if (max < 0) max = 0; /* bio_add cannot handle a negative return */ if (max <= biovec->bv_len && bio_sectors == 0) return biovec->bv_len; else return max; } static int raid0_run (mddev_t *mddev) { unsigned cur=0, i=0, nb_zone; s64 sectors; raid0_conf_t *conf; mdk_rdev_t *rdev; if (mddev->chunk_size == 0) { printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); return -EINVAL; } printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", mdname(mddev), mddev->chunk_size >> 9, (mddev->chunk_size>>1)-1); blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); mddev->queue->queue_lock = &mddev->queue->__queue_lock; conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); if (!conf) goto out; mddev->private = (void *)conf; conf->strip_zone = NULL; conf->devlist = NULL; if (create_strip_zones (mddev)) goto out_free_conf; /* calculate array device size */ mddev->array_sectors = 0; list_for_each_entry(rdev, &mddev->disks, same_set) mddev->array_sectors += rdev->size * 2; printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", (unsigned long long)mddev->array_sectors); printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n", (unsigned long long)conf->spacing); { sector_t s = mddev->array_sectors; sector_t space = conf->spacing; int round; conf->sector_shift = 0; if (sizeof(sector_t) > sizeof(u32)) { /*shift down space and s so that sector_div will work */ while (space > (sector_t) (~(u32)0)) { s >>= 1; space >>= 1; s += 1; /* force round-up */ conf->sector_shift++; } } round = sector_div(s, (u32)space) ? 1 : 0; nb_zone = s + round; } printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone); printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n", nb_zone*sizeof(struct strip_zone*)); conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); if (!conf->hash_table) goto out_free_conf; sectors = conf->strip_zone[cur].sectors; conf->hash_table[0] = conf->strip_zone + cur; for (i=1; i< nb_zone; i++) { while (sectors <= conf->spacing) { cur++; sectors += conf->strip_zone[cur].sectors; } sectors -= conf->spacing; conf->hash_table[i] = conf->strip_zone + cur; } if (conf->sector_shift) { conf->spacing >>= conf->sector_shift; /* round spacing up so when we divide by it, we * err on the side of too-low, which is safest */ conf->spacing++; } /* calculate the max read-ahead size. * For read-ahead of large files to be effective, we need to * readahead at least twice a whole stripe. i.e. number of devices * multiplied by chunk size times 2. * If an individual device has an ra_pages greater than the * chunk size, then we will not drive that device as hard as it * wants. We consider this a configuration error: a larger * chunksize should be used in that case. */ { int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; } blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); return 0; out_free_conf: kfree(conf->strip_zone); kfree(conf->devlist); kfree(conf); mddev->private = NULL; out: return -ENOMEM; } static int raid0_stop (mddev_t *mddev) { raid0_conf_t *conf = mddev_to_conf(mddev); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ kfree(conf->hash_table); conf->hash_table = NULL; kfree(conf->strip_zone); conf->strip_zone = NULL; kfree(conf); mddev->private = NULL; return 0; } static int raid0_make_request (struct request_queue *q, struct bio *bio) { mddev_t *mddev = q->queuedata; unsigned int sect_in_chunk, chunksect_bits, chunk_sects; raid0_conf_t *conf = mddev_to_conf(mddev); struct strip_zone *zone; mdk_rdev_t *tmp_dev; sector_t chunk; sector_t sector, rsect; const int rw = bio_data_dir(bio); int cpu; if (unlikely(bio_barrier(bio))) { bio_endio(bio, -EOPNOTSUPP); return 0; } cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], bio_sectors(bio)); part_stat_unlock(); chunk_sects = mddev->chunk_size >> 9; chunksect_bits = ffz(~chunk_sects); sector = bio->bi_sector; if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { struct bio_pair *bp; /* Sanity check -- queue functions should prevent this happening */ if (bio->bi_vcnt != 1 || bio->bi_idx != 0) goto bad_map; /* This is a one page bio that upper layers * refuse to split for us, so we need to split it. */ bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); if (raid0_make_request(q, &bp->bio1)) generic_make_request(&bp->bio1); if (raid0_make_request(q, &bp->bio2)) generic_make_request(&bp->bio2); bio_pair_release(bp); return 0; } { sector_t x = sector >> conf->sector_shift; sector_div(x, (u32)conf->spacing); zone = conf->hash_table[x]; } while (sector >= zone->zone_start + zone->sectors) zone++; sect_in_chunk = bio->bi_sector & (chunk_sects - 1); { sector_t x = (sector - zone->zone_start) >> chunksect_bits; sector_div(x, zone->nb_dev); chunk = x; x = sector >> chunksect_bits; tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; } rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk; bio->bi_bdev = tmp_dev->bdev; bio->bi_sector = rsect + tmp_dev->data_offset; /* * Let the main block layer submit the IO and resolve recursion: */ return 1; bad_map: printk("raid0_make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", chunk_sects / 2, (unsigned long long)bio->bi_sector, bio->bi_size >> 10); bio_io_error(bio); return 0; } static void raid0_status (struct seq_file *seq, mddev_t *mddev) { #undef MD_DEBUG #ifdef MD_DEBUG int j, k, h; char b[BDEVNAME_SIZE]; raid0_conf_t *conf = mddev_to_conf(mddev); h = 0; for (j = 0; j < conf->nr_strip_zones; j++) { seq_printf(seq, " z%d", j); if (conf->hash_table[h] == conf->strip_zone+j) seq_printf(seq, "(h%d)", h++); seq_printf(seq, "=["); for (k = 0; k < conf->strip_zone[j].nb_dev; k++) seq_printf(seq, "%s/", bdevname( conf->strip_zone[j].dev[k]->bdev,b)); seq_printf(seq, "] zs=%d ds=%d s=%d\n", conf->strip_zone[j].zone_start, conf->strip_zone[j].dev_start, conf->strip_zone[j].sectors); } #endif seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); return; } static struct mdk_personality raid0_personality= { .name = "raid0", .level = 0, .owner = THIS_MODULE, .make_request = raid0_make_request, .run = raid0_run, .stop = raid0_stop, .status = raid0_status, }; static int __init raid0_init (void) { return register_md_personality (&raid0_personality); } static void raid0_exit (void) { unregister_md_personality (&raid0_personality); } module_init(raid0_init); module_exit(raid0_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("md-personality-2"); /* RAID0 */ MODULE_ALIAS("md-raid0"); MODULE_ALIAS("md-level-0");
gpl-2.0
AnesHadzi/linux-socfpga
drivers/media/dvb-frontends/rtl2830.c
395
21949
/* * Realtek RTL2830 DVB-T demodulator driver * * Copyright (C) 2011 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "rtl2830_priv.h" /* Our regmap is bypassing I2C adapter lock, thus we do it! */ static int rtl2830_bulk_write(struct i2c_client *client, unsigned int reg, const void *val, size_t val_count) { struct rtl2830_dev *dev = i2c_get_clientdata(client); int ret; i2c_lock_adapter(client->adapter); ret = regmap_bulk_write(dev->regmap, reg, val, val_count); i2c_unlock_adapter(client->adapter); return ret; } static int rtl2830_update_bits(struct i2c_client *client, unsigned int reg, unsigned int mask, unsigned int val) { struct rtl2830_dev *dev = i2c_get_clientdata(client); int ret; i2c_lock_adapter(client->adapter); ret = regmap_update_bits(dev->regmap, reg, mask, val); i2c_unlock_adapter(client->adapter); return ret; } static int rtl2830_bulk_read(struct i2c_client *client, unsigned int reg, void *val, size_t val_count) { struct rtl2830_dev *dev = i2c_get_clientdata(client); int ret; i2c_lock_adapter(client->adapter); ret = regmap_bulk_read(dev->regmap, reg, val, val_count); i2c_unlock_adapter(client->adapter); return ret; } static int rtl2830_init(struct dvb_frontend *fe) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; int ret, i; struct rtl2830_reg_val_mask tab[] = { {0x00d, 0x01, 0x03}, {0x00d, 0x10, 0x10}, {0x104, 0x00, 0x1e}, {0x105, 0x80, 0x80}, {0x110, 0x02, 0x03}, {0x110, 0x08, 0x0c}, {0x17b, 0x00, 0x40}, {0x17d, 0x05, 0x0f}, {0x17d, 0x50, 0xf0}, {0x18c, 0x08, 0x0f}, {0x18d, 0x00, 0xc0}, {0x188, 0x05, 0x0f}, {0x189, 0x00, 0xfc}, {0x2d5, 0x02, 0x02}, {0x2f1, 0x02, 0x06}, {0x2f1, 0x20, 0xf8}, {0x16d, 0x00, 0x01}, {0x1a6, 0x00, 0x80}, {0x106, dev->pdata->vtop, 0x3f}, {0x107, dev->pdata->krf, 0x3f}, {0x112, 0x28, 0xff}, {0x103, dev->pdata->agc_targ_val, 0xff}, {0x00a, 0x02, 0x07}, {0x140, 0x0c, 0x3c}, {0x140, 0x40, 0xc0}, {0x15b, 0x05, 0x07}, {0x15b, 0x28, 0x38}, {0x15c, 0x05, 0x07}, {0x15c, 0x28, 0x38}, {0x115, dev->pdata->spec_inv, 0x01}, {0x16f, 0x01, 0x07}, {0x170, 0x18, 0x38}, {0x172, 0x0f, 0x0f}, {0x173, 0x08, 0x38}, {0x175, 0x01, 0x07}, {0x176, 0x00, 0xc0}, }; for (i = 0; i < ARRAY_SIZE(tab); i++) { ret = rtl2830_update_bits(client, tab[i].reg, tab[i].mask, tab[i].val); if (ret) goto err; } ret = rtl2830_bulk_write(client, 0x18f, "\x28\x00", 2); if (ret) goto err; ret = rtl2830_bulk_write(client, 0x195, "\x04\x06\x0a\x12\x0a\x12\x1e\x28", 8); if (ret) goto err; /* TODO: spec init */ /* soft reset */ ret = rtl2830_update_bits(client, 0x101, 0x04, 0x04); if (ret) goto err; ret = rtl2830_update_bits(client, 0x101, 0x04, 0x00); if (ret) goto err; /* init stats here in order signal app which stats are supported */ c->strength.len = 1; c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->cnr.len = 1; c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_error.len = 1; c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_count.len = 1; c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; /* start statistics polling */ schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000)); dev->sleeping = false; return ret; err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } static int rtl2830_sleep(struct dvb_frontend *fe) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); dev->sleeping = true; /* stop statistics polling */ cancel_delayed_work_sync(&dev->stat_work); dev->fe_status = 0; return 0; } static int rtl2830_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *s) { s->min_delay_ms = 500; s->step_size = fe->ops.info.frequency_stepsize * 2; s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1; return 0; } static int rtl2830_set_frontend(struct dvb_frontend *fe) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, i; u64 num; u8 buf[3], u8tmp; u32 if_ctl, if_frequency; static const u8 bw_params1[3][34] = { { 0x1f, 0xf0, 0x1f, 0xf0, 0x1f, 0xfa, 0x00, 0x17, 0x00, 0x41, 0x00, 0x64, 0x00, 0x67, 0x00, 0x38, 0x1f, 0xde, 0x1f, 0x7a, 0x1f, 0x47, 0x1f, 0x7c, 0x00, 0x30, 0x01, 0x4b, 0x02, 0x82, 0x03, 0x73, 0x03, 0xcf, /* 6 MHz */ }, { 0x1f, 0xfa, 0x1f, 0xda, 0x1f, 0xc1, 0x1f, 0xb3, 0x1f, 0xca, 0x00, 0x07, 0x00, 0x4d, 0x00, 0x6d, 0x00, 0x40, 0x1f, 0xca, 0x1f, 0x4d, 0x1f, 0x2a, 0x1f, 0xb2, 0x00, 0xec, 0x02, 0x7e, 0x03, 0xd0, 0x04, 0x53, /* 7 MHz */ }, { 0x00, 0x10, 0x00, 0x0e, 0x1f, 0xf7, 0x1f, 0xc9, 0x1f, 0xa0, 0x1f, 0xa6, 0x1f, 0xec, 0x00, 0x4e, 0x00, 0x7d, 0x00, 0x3a, 0x1f, 0x98, 0x1f, 0x10, 0x1f, 0x40, 0x00, 0x75, 0x02, 0x5f, 0x04, 0x24, 0x04, 0xdb, /* 8 MHz */ }, }; static const u8 bw_params2[3][6] = { {0xc3, 0x0c, 0x44, 0x33, 0x33, 0x30}, /* 6 MHz */ {0xb8, 0xe3, 0x93, 0x99, 0x99, 0x98}, /* 7 MHz */ {0xae, 0xba, 0xf3, 0x26, 0x66, 0x64}, /* 8 MHz */ }; dev_dbg(&client->dev, "frequency=%u bandwidth_hz=%u inversion=%u\n", c->frequency, c->bandwidth_hz, c->inversion); /* program tuner */ if (fe->ops.tuner_ops.set_params) fe->ops.tuner_ops.set_params(fe); switch (c->bandwidth_hz) { case 6000000: i = 0; break; case 7000000: i = 1; break; case 8000000: i = 2; break; default: dev_err(&client->dev, "invalid bandwidth_hz %u\n", c->bandwidth_hz); return -EINVAL; } ret = rtl2830_update_bits(client, 0x008, 0x06, i << 1); if (ret) goto err; /* program if frequency */ if (fe->ops.tuner_ops.get_if_frequency) ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency); else ret = -EINVAL; if (ret) goto err; num = if_frequency % dev->pdata->clk; num *= 0x400000; num = div_u64(num, dev->pdata->clk); num = -num; if_ctl = num & 0x3fffff; dev_dbg(&client->dev, "if_frequency=%d if_ctl=%08x\n", if_frequency, if_ctl); buf[0] = (if_ctl >> 16) & 0x3f; buf[1] = (if_ctl >> 8) & 0xff; buf[2] = (if_ctl >> 0) & 0xff; ret = rtl2830_bulk_read(client, 0x119, &u8tmp, 1); if (ret) goto err; buf[0] |= u8tmp & 0xc0; /* [7:6] */ ret = rtl2830_bulk_write(client, 0x119, buf, 3); if (ret) goto err; /* 1/2 split I2C write */ ret = rtl2830_bulk_write(client, 0x11c, &bw_params1[i][0], 17); if (ret) goto err; /* 2/2 split I2C write */ ret = rtl2830_bulk_write(client, 0x12d, &bw_params1[i][17], 17); if (ret) goto err; ret = rtl2830_bulk_write(client, 0x19d, bw_params2[i], 6); if (ret) goto err; return ret; err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } static int rtl2830_get_frontend(struct dvb_frontend *fe) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; u8 buf[3]; if (dev->sleeping) return 0; ret = rtl2830_bulk_read(client, 0x33c, buf, 2); if (ret) goto err; ret = rtl2830_bulk_read(client, 0x351, &buf[2], 1); if (ret) goto err; dev_dbg(&client->dev, "TPS=%*ph\n", 3, buf); switch ((buf[0] >> 2) & 3) { case 0: c->modulation = QPSK; break; case 1: c->modulation = QAM_16; break; case 2: c->modulation = QAM_64; break; } switch ((buf[2] >> 2) & 1) { case 0: c->transmission_mode = TRANSMISSION_MODE_2K; break; case 1: c->transmission_mode = TRANSMISSION_MODE_8K; } switch ((buf[2] >> 0) & 3) { case 0: c->guard_interval = GUARD_INTERVAL_1_32; break; case 1: c->guard_interval = GUARD_INTERVAL_1_16; break; case 2: c->guard_interval = GUARD_INTERVAL_1_8; break; case 3: c->guard_interval = GUARD_INTERVAL_1_4; break; } switch ((buf[0] >> 4) & 7) { case 0: c->hierarchy = HIERARCHY_NONE; break; case 1: c->hierarchy = HIERARCHY_1; break; case 2: c->hierarchy = HIERARCHY_2; break; case 3: c->hierarchy = HIERARCHY_4; break; } switch ((buf[1] >> 3) & 7) { case 0: c->code_rate_HP = FEC_1_2; break; case 1: c->code_rate_HP = FEC_2_3; break; case 2: c->code_rate_HP = FEC_3_4; break; case 3: c->code_rate_HP = FEC_5_6; break; case 4: c->code_rate_HP = FEC_7_8; break; } switch ((buf[1] >> 0) & 7) { case 0: c->code_rate_LP = FEC_1_2; break; case 1: c->code_rate_LP = FEC_2_3; break; case 2: c->code_rate_LP = FEC_3_4; break; case 3: c->code_rate_LP = FEC_5_6; break; case 4: c->code_rate_LP = FEC_7_8; break; } return 0; err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } static int rtl2830_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); int ret; u8 u8tmp; *status = 0; if (dev->sleeping) return 0; ret = rtl2830_bulk_read(client, 0x351, &u8tmp, 1); if (ret) goto err; u8tmp = (u8tmp >> 3) & 0x0f; /* [6:3] */ if (u8tmp == 11) { *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; } else if (u8tmp == 10) { *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI; } dev->fe_status = *status; return ret; err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } static int rtl2830_read_snr(struct dvb_frontend *fe, u16 *snr) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) *snr = div_s64(c->cnr.stat[0].svalue, 100); else *snr = 0; return 0; } static int rtl2830_read_ber(struct dvb_frontend *fe, u32 *ber) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); *ber = (dev->post_bit_error - dev->post_bit_error_prev); dev->post_bit_error_prev = dev->post_bit_error; return 0; } static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { *ucblocks = 0; return 0; } static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; if (c->strength.stat[0].scale == FE_SCALE_RELATIVE) *strength = c->strength.stat[0].uvalue; else *strength = 0; return 0; } static struct dvb_frontend_ops rtl2830_ops = { .delsys = {SYS_DVBT}, .info = { .name = "Realtek RTL2830 (DVB-T)", .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .init = rtl2830_init, .sleep = rtl2830_sleep, .get_tune_settings = rtl2830_get_tune_settings, .set_frontend = rtl2830_set_frontend, .get_frontend = rtl2830_get_frontend, .read_status = rtl2830_read_status, .read_snr = rtl2830_read_snr, .read_ber = rtl2830_read_ber, .read_ucblocks = rtl2830_read_ucblocks, .read_signal_strength = rtl2830_read_signal_strength, }; static void rtl2830_stat_work(struct work_struct *work) { struct rtl2830_dev *dev = container_of(work, struct rtl2830_dev, stat_work.work); struct i2c_client *client = dev->client; struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; int ret, tmp; u8 u8tmp, buf[2]; u16 u16tmp; dev_dbg(&client->dev, "\n"); /* signal strength */ if (dev->fe_status & FE_HAS_SIGNAL) { struct {signed int x:14; } s; /* read IF AGC */ ret = rtl2830_bulk_read(client, 0x359, buf, 2); if (ret) goto err; u16tmp = buf[0] << 8 | buf[1] << 0; u16tmp &= 0x3fff; /* [13:0] */ tmp = s.x = u16tmp; /* 14-bit bin to 2 complement */ u16tmp = clamp_val(-4 * tmp + 32767, 0x0000, 0xffff); dev_dbg(&client->dev, "IF AGC=%d\n", tmp); c->strength.stat[0].scale = FE_SCALE_RELATIVE; c->strength.stat[0].uvalue = u16tmp; } else { c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } /* CNR */ if (dev->fe_status & FE_HAS_VITERBI) { unsigned hierarchy, constellation; #define CONSTELLATION_NUM 3 #define HIERARCHY_NUM 4 static const u32 constant[CONSTELLATION_NUM][HIERARCHY_NUM] = { {70705899, 70705899, 70705899, 70705899}, {82433173, 82433173, 87483115, 94445660}, {92888734, 92888734, 95487525, 99770748}, }; ret = rtl2830_bulk_read(client, 0x33c, &u8tmp, 1); if (ret) goto err; constellation = (u8tmp >> 2) & 0x03; /* [3:2] */ if (constellation > CONSTELLATION_NUM - 1) goto err_schedule_delayed_work; hierarchy = (u8tmp >> 4) & 0x07; /* [6:4] */ if (hierarchy > HIERARCHY_NUM - 1) goto err_schedule_delayed_work; ret = rtl2830_bulk_read(client, 0x40c, buf, 2); if (ret) goto err; u16tmp = buf[0] << 8 | buf[1] << 0; if (u16tmp) tmp = (constant[constellation][hierarchy] - intlog10(u16tmp)) / ((1 << 24) / 10000); else tmp = 0; dev_dbg(&client->dev, "CNR raw=%u\n", u16tmp); c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c->cnr.stat[0].svalue = tmp; } else { c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } /* BER */ if (dev->fe_status & FE_HAS_LOCK) { ret = rtl2830_bulk_read(client, 0x34e, buf, 2); if (ret) goto err; u16tmp = buf[0] << 8 | buf[1] << 0; dev->post_bit_error += u16tmp; dev->post_bit_count += 1000000; dev_dbg(&client->dev, "BER errors=%u total=1000000\n", u16tmp); c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_error.stat[0].uvalue = dev->post_bit_error; c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_count.stat[0].uvalue = dev->post_bit_count; } else { c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; } err_schedule_delayed_work: schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000)); return; err: dev_dbg(&client->dev, "failed=%d\n", ret); } static int rtl2830_pid_filter_ctrl(struct dvb_frontend *fe, int onoff) { struct i2c_client *client = fe->demodulator_priv; int ret; u8 u8tmp; dev_dbg(&client->dev, "onoff=%d\n", onoff); /* enable / disable PID filter */ if (onoff) u8tmp = 0x80; else u8tmp = 0x00; ret = rtl2830_update_bits(client, 0x061, 0x80, u8tmp); if (ret) goto err; return 0; err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int onoff) { struct i2c_client *client = fe->demodulator_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); int ret; u8 buf[4]; dev_dbg(&client->dev, "index=%d pid=%04x onoff=%d\n", index, pid, onoff); /* skip invalid PIDs (0x2000) */ if (pid > 0x1fff || index > 32) return 0; if (onoff) set_bit(index, &dev->filters); else clear_bit(index, &dev->filters); /* enable / disable PIDs */ buf[0] = (dev->filters >> 0) & 0xff; buf[1] = (dev->filters >> 8) & 0xff; buf[2] = (dev->filters >> 16) & 0xff; buf[3] = (dev->filters >> 24) & 0xff; ret = rtl2830_bulk_write(client, 0x062, buf, 4); if (ret) goto err; /* add PID */ buf[0] = (pid >> 8) & 0xff; buf[1] = (pid >> 0) & 0xff; ret = rtl2830_bulk_write(client, 0x066 + 2 * index, buf, 2); if (ret) goto err; return 0; err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } /* * I2C gate/mux/repeater logic * We must use unlocked __i2c_transfer() here (through regmap) because of I2C * adapter lock is already taken by tuner driver. * Gate is closed automatically after single I2C transfer. */ static int rtl2830_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id) { struct i2c_client *client = mux_priv; struct rtl2830_dev *dev = i2c_get_clientdata(client); int ret; dev_dbg(&client->dev, "\n"); /* open I2C repeater for 1 transfer, closes automatically */ /* XXX: regmap_update_bits() does not lock I2C adapter */ ret = regmap_update_bits(dev->regmap, 0x101, 0x08, 0x08); if (ret) goto err; return 0; err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } static struct dvb_frontend *rtl2830_get_dvb_frontend(struct i2c_client *client) { struct rtl2830_dev *dev = i2c_get_clientdata(client); dev_dbg(&client->dev, "\n"); return &dev->fe; } static struct i2c_adapter *rtl2830_get_i2c_adapter(struct i2c_client *client) { struct rtl2830_dev *dev = i2c_get_clientdata(client); dev_dbg(&client->dev, "\n"); return dev->adapter; } /* * We implement own I2C access routines for regmap in order to get manual access * to I2C adapter lock, which is needed for I2C mux adapter. */ static int rtl2830_regmap_read(void *context, const void *reg_buf, size_t reg_size, void *val_buf, size_t val_size) { struct i2c_client *client = context; int ret; struct i2c_msg msg[2] = { { .addr = client->addr, .flags = 0, .len = reg_size, .buf = (u8 *)reg_buf, }, { .addr = client->addr, .flags = I2C_M_RD, .len = val_size, .buf = val_buf, } }; ret = __i2c_transfer(client->adapter, msg, 2); if (ret != 2) { dev_warn(&client->dev, "i2c reg read failed %d\n", ret); if (ret >= 0) ret = -EREMOTEIO; return ret; } return 0; } static int rtl2830_regmap_write(void *context, const void *data, size_t count) { struct i2c_client *client = context; int ret; struct i2c_msg msg[1] = { { .addr = client->addr, .flags = 0, .len = count, .buf = (u8 *)data, } }; ret = __i2c_transfer(client->adapter, msg, 1); if (ret != 1) { dev_warn(&client->dev, "i2c reg write failed %d\n", ret); if (ret >= 0) ret = -EREMOTEIO; return ret; } return 0; } static int rtl2830_regmap_gather_write(void *context, const void *reg, size_t reg_len, const void *val, size_t val_len) { struct i2c_client *client = context; int ret; u8 buf[256]; struct i2c_msg msg[1] = { { .addr = client->addr, .flags = 0, .len = 1 + val_len, .buf = buf, } }; buf[0] = *(u8 const *)reg; memcpy(&buf[1], val, val_len); ret = __i2c_transfer(client->adapter, msg, 1); if (ret != 1) { dev_warn(&client->dev, "i2c reg write failed %d\n", ret); if (ret >= 0) ret = -EREMOTEIO; return ret; } return 0; } static int rtl2830_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct rtl2830_platform_data *pdata = client->dev.platform_data; struct rtl2830_dev *dev; int ret; u8 u8tmp; static const struct regmap_bus regmap_bus = { .read = rtl2830_regmap_read, .write = rtl2830_regmap_write, .gather_write = rtl2830_regmap_gather_write, .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; static const struct regmap_range_cfg regmap_range_cfg[] = { { .selector_reg = 0x00, .selector_mask = 0xff, .selector_shift = 0, .window_start = 0, .window_len = 0x100, .range_min = 0 * 0x100, .range_max = 5 * 0x100, }, }; static const struct regmap_config regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 5 * 0x100, .ranges = regmap_range_cfg, .num_ranges = ARRAY_SIZE(regmap_range_cfg), }; dev_dbg(&client->dev, "\n"); if (pdata == NULL) { ret = -EINVAL; goto err; } /* allocate memory for the internal state */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { ret = -ENOMEM; goto err; } /* setup the state */ i2c_set_clientdata(client, dev); dev->client = client; dev->pdata = client->dev.platform_data; dev->sleeping = true; INIT_DELAYED_WORK(&dev->stat_work, rtl2830_stat_work); dev->regmap = regmap_init(&client->dev, &regmap_bus, client, &regmap_config); if (IS_ERR(dev->regmap)) { ret = PTR_ERR(dev->regmap); goto err_kfree; } /* check if the demod is there */ ret = rtl2830_bulk_read(client, 0x000, &u8tmp, 1); if (ret) goto err_regmap_exit; /* create muxed i2c adapter for tuner */ dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev, client, 0, 0, 0, rtl2830_select, NULL); if (dev->adapter == NULL) { ret = -ENODEV; goto err_regmap_exit; } /* create dvb frontend */ memcpy(&dev->fe.ops, &rtl2830_ops, sizeof(dev->fe.ops)); dev->fe.demodulator_priv = client; /* setup callbacks */ pdata->get_dvb_frontend = rtl2830_get_dvb_frontend; pdata->get_i2c_adapter = rtl2830_get_i2c_adapter; pdata->pid_filter = rtl2830_pid_filter; pdata->pid_filter_ctrl = rtl2830_pid_filter_ctrl; dev_info(&client->dev, "Realtek RTL2830 successfully attached\n"); return 0; err_regmap_exit: regmap_exit(dev->regmap); err_kfree: kfree(dev); err: dev_dbg(&client->dev, "failed=%d\n", ret); return ret; } static int rtl2830_remove(struct i2c_client *client) { struct rtl2830_dev *dev = i2c_get_clientdata(client); dev_dbg(&client->dev, "\n"); i2c_del_mux_adapter(dev->adapter); regmap_exit(dev->regmap); kfree(dev); return 0; } static const struct i2c_device_id rtl2830_id_table[] = { {"rtl2830", 0}, {} }; MODULE_DEVICE_TABLE(i2c, rtl2830_id_table); static struct i2c_driver rtl2830_driver = { .driver = { .name = "rtl2830", }, .probe = rtl2830_probe, .remove = rtl2830_remove, .id_table = rtl2830_id_table, }; module_i2c_driver(rtl2830_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Realtek RTL2830 DVB-T demodulator driver"); MODULE_LICENSE("GPL");
gpl-2.0
deryfebriantara/cimahi-city
node_modules/node-sass/src/sass_types/factory.cpp
395
2182
#include <nan.h> #include "factory.h" #include "value.h" #include "number.h" #include "string.h" #include "color.h" #include "boolean.h" #include "list.h" #include "map.h" #include "null.h" #include "error.h" namespace SassTypes { SassTypes::Value* Factory::create(Sass_Value* v) { switch (sass_value_get_tag(v)) { case SASS_NUMBER: return new Number(v); case SASS_STRING: return new String(v); case SASS_COLOR: return new Color(v); case SASS_BOOLEAN: return &Boolean::get_singleton(sass_boolean_get_value(v)); case SASS_LIST: return new List(v); case SASS_MAP: return new Map(v); case SASS_NULL: return &Null::get_singleton(); case SASS_ERROR: return new Error(v); default: const char *msg = "Unknown type encountered."; Nan::ThrowTypeError(Nan::New<v8::String>(msg).ToLocalChecked()); return new Error(sass_make_error(msg)); } } NAN_MODULE_INIT(Factory::initExports) { Nan::HandleScope scope; v8::Local<v8::Object> types = Nan::New<v8::Object>(); Nan::Set(types, Nan::New("Number").ToLocalChecked(), Number::get_constructor()); Nan::Set(types, Nan::New("String").ToLocalChecked(), String::get_constructor()); Nan::Set(types, Nan::New("Color").ToLocalChecked(), Color::get_constructor()); Nan::Set(types, Nan::New("Boolean").ToLocalChecked(), Boolean::get_constructor()); Nan::Set(types, Nan::New("List").ToLocalChecked(), List::get_constructor()); Nan::Set(types, Nan::New("Map").ToLocalChecked(), Map::get_constructor()); Nan::Set(types, Nan::New("Null").ToLocalChecked(), Null::get_constructor()); Nan::Set(types, Nan::New("Error").ToLocalChecked(), Error::get_constructor()); Nan::Set(target, Nan::New<v8::String>("types").ToLocalChecked(), types); } Value* Factory::unwrap(v8::Local<v8::Value> obj) { // Todo: non-SassValue objects could easily fall under that condition, need to be more specific. if (!obj->IsObject() || obj.As<v8::Object>()->InternalFieldCount() != 1) { return NULL; } return static_cast<Value*>(Nan::GetInternalFieldPointer(obj.As<v8::Object>(), 0)); } }
gpl-2.0
rogro82/picasso-kernel
drivers/net/netxen/netxen_nic_hw.c
651
51659
/* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, * MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution * in the file called "COPYING". * */ #include <linux/slab.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" #include <net/ip.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) #define CRB_INDIRECT_2M (0x1e0000UL) static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, void __iomem *addr, u32 data); static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, void __iomem *addr); #ifndef readq static inline u64 readq(void __iomem *addr) { return readl(addr) | (((u64) readl(addr + 4)) << 32LL); } #endif #ifndef writeq static inline void writeq(u64 val, void __iomem *addr) { writel(((u32) (val)), (addr)); writel(((u32) (val >> 32)), (addr + 4)); } #endif #define PCI_OFFSET_FIRST_RANGE(adapter, off) \ ((adapter)->ahw.pci_base0 + (off)) #define PCI_OFFSET_SECOND_RANGE(adapter, off) \ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) #define PCI_OFFSET_THIRD_RANGE(adapter, off) \ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) static void __iomem *pci_base_offset(struct netxen_adapter *adapter, unsigned long off) { if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) return PCI_OFFSET_FIRST_RANGE(adapter, off); if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) return PCI_OFFSET_SECOND_RANGE(adapter, off); if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) return PCI_OFFSET_THIRD_RANGE(adapter, off); return NULL; } static crb_128M_2M_block_map_t crb_128M_2M_map[64] __cacheline_aligned_in_smp = { {{{0, 0, 0, 0} } }, /* 0: PCI */ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ {{{0, 0, 0, 0} } }, /* 3: */ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ {{{0, 0, 0, 0} } }, /* 23: */ {{{0, 0, 0, 0} } }, /* 24: */ {{{0, 0, 0, 0} } }, /* 25: */ {{{0, 0, 0, 0} } }, /* 26: */ {{{0, 0, 0, 0} } }, /* 27: */ {{{0, 0, 0, 0} } }, /* 28: */ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ {{{0} } }, /* 32: PCI */ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ {{{0} } }, /* 35: */ {{{0} } }, /* 36: */ {{{0} } }, /* 37: */ {{{0} } }, /* 38: */ {{{0} } }, /* 39: */ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ {{{0} } }, /* 52: */ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ {{{0} } }, /* 59: I2C0 */ {{{0} } }, /* 60: I2C1 */ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned crb_hub_agt[64] = { 0, NETXEN_HW_CRB_HUB_AGT_ADR_PS, NETXEN_HW_CRB_HUB_AGT_ADR_MN, NETXEN_HW_CRB_HUB_AGT_ADR_MS, 0, NETXEN_HW_CRB_HUB_AGT_ADR_SRE, NETXEN_HW_CRB_HUB_AGT_ADR_NIU, NETXEN_HW_CRB_HUB_AGT_ADR_QMN, NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, NETXEN_HW_CRB_HUB_AGT_ADR_PGND, NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, NETXEN_HW_CRB_HUB_AGT_ADR_SN, 0, NETXEN_HW_CRB_HUB_AGT_ADR_EG, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PS, NETXEN_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, 0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, 0, NETXEN_HW_CRB_HUB_AGT_ADR_SMB, NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* PCI Windowing for DDR regions. */ #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ #define NETXEN_PCIE_SEM_TIMEOUT 10000 static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); int netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) { int done = 0, timeout = 0; while (!done) { done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); if (done == 1) break; if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) return -EIO; msleep(1); } if (id_reg) NXWR32(adapter, id_reg, adapter->portnum); return 0; } void netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) { NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); } static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) { if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); } return 0; } /* Disable an XG interface */ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) { __u32 mac_cfg; u32 port = adapter->physical_port; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) return 0; if (port > NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_cfg = 0; if (NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) return -EIO; return 0; } #define NETXEN_UNICAST_ADDR(port, index) \ (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) #define NETXEN_MCAST_ADDR(port, index) \ (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) #define MAC_HI(addr) \ ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) #define MAC_LO(addr) \ ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) { u32 mac_cfg; u32 cnt = 0; __u32 reg = 0x0200; u32 port = adapter->physical_port; u16 board_type = adapter->ahw.board_type; if (port > NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); mac_cfg &= ~0x4; NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) reg = (0x20 << port); NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); mdelay(10); while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) mdelay(10); if (cnt < 20) { reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); if (mode == NETXEN_NIU_PROMISC_MODE) reg = (reg | 0x2000UL); else reg = (reg & ~0x2000UL); if (mode == NETXEN_NIU_ALLMULTI_MODE) reg = (reg | 0x1000UL); else reg = (reg & ~0x1000UL); NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); } mac_cfg |= 0x4; NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); return 0; } static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) { u32 mac_hi, mac_lo; u32 reg_hi, reg_lo; u8 phy = adapter->physical_port; if (phy >= NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); mac_hi = addr[2] | ((u32)addr[3] << 8) | ((u32)addr[4] << 16) | ((u32)addr[5] << 24); reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); /* write twice to flush */ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) return -EIO; if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) return -EIO; return 0; } static int netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) { u32 val = 0; u16 port = adapter->physical_port; u8 *addr = adapter->mac_addr; if (adapter->mc_enabled) return 0; val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); val |= (1UL << (28+port)); NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); /* add broadcast addr to filter */ val = 0xffffff; NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); /* add station addr to filter */ val = MAC_HI(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); val = MAC_LO(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); adapter->mc_enabled = 1; return 0; } static int netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) { u32 val = 0; u16 port = adapter->physical_port; u8 *addr = adapter->mac_addr; if (!adapter->mc_enabled) return 0; val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); val &= ~(1UL << (28+port)); NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); val = MAC_HI(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); val = MAC_LO(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); adapter->mc_enabled = 0; return 0; } static int netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, int index, u8 *addr) { u32 hi = 0, lo = 0; u16 port = adapter->physical_port; lo = MAC_LO(addr); hi = MAC_HI(addr); NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); return 0; } static void netxen_p2_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 null_addr[6]; int i; memset(null_addr, 0, 6); if (netdev->flags & IFF_PROMISC) { adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE); /* Full promiscuous mode */ netxen_nic_disable_mcast_filter(adapter); return; } if (netdev_mc_empty(netdev)) { adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); netxen_nic_disable_mcast_filter(adapter); return; } adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); if (netdev->flags & IFF_ALLMULTI || netdev_mc_count(netdev) > adapter->max_mc_count) { netxen_nic_disable_mcast_filter(adapter); return; } netxen_nic_enable_mcast_filter(adapter); i = 0; netdev_for_each_mc_addr(ha, netdev) netxen_nic_set_mcast_addr(adapter, i++, ha->addr); /* Clear out remaining addresses */ while (i < adapter->max_mc_count) netxen_nic_set_mcast_addr(adapter, i++, null_addr); } static int netxen_send_cmd_descs(struct netxen_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { u32 i, producer, consumer; struct netxen_cmd_buffer *pbuf; struct cmd_desc_type0 *cmd_desc; struct nx_host_tx_ring *tx_ring; i = 0; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return -EIO; tx_ring = adapter->tx_ring; __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; consumer = tx_ring->sw_consumer; if (nr_desc >= netxen_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); smp_mb(); if (netxen_tx_avail(tx_ring) > nr_desc) { if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_tx_wake_queue(tx_ring->txq); } else { __netif_tx_unlock_bh(tx_ring->txq); return -EBUSY; } } do { cmd_desc = &cmd_desc_arr[i]; pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; memcpy(&tx_ring->desc_head[producer], &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); producer = get_next_index(producer, tx_ring->num_desc); i++; } while (i != nr_desc); tx_ring->producer = producer; netxen_nic_update_cmd_producer(adapter, tx_ring); __netif_tx_unlock_bh(tx_ring->txq); return 0; } static int nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) { nx_nic_req_t req; nx_mac_req_t *mac_req; u64 word; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); mac_req = (nx_mac_req_t *)&req.words[0]; mac_req->op = op; memcpy(mac_req->mac_addr, addr, 6); return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, const u8 *addr, struct list_head *del_list) { struct list_head *head; nx_mac_list_t *cur; /* look up if already exists */ list_for_each(head, del_list) { cur = list_entry(head, nx_mac_list_t, list); if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { list_move_tail(head, &adapter->mac_list); return 0; } } cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); if (cur == NULL) { printk(KERN_ERR "%s: failed to add mac address filter\n", adapter->netdev->name); return -ENOMEM; } memcpy(cur->mac_addr, addr, ETH_ALEN); list_add_tail(&cur->list, &adapter->mac_list); return nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_ADD); } static void netxen_p3_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; LIST_HEAD(del_list); struct list_head *head; nx_mac_list_t *cur; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; list_splice_tail_init(&adapter->mac_list, &del_list); nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); if (netdev->flags & IFF_PROMISC) { mode = VPORT_MISS_MODE_ACCEPT_ALL; goto send_fw_cmd; } if ((netdev->flags & IFF_ALLMULTI) || (netdev_mc_count(netdev) > adapter->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; goto send_fw_cmd; } if (!netdev_mc_empty(netdev)) { netdev_for_each_mc_addr(ha, netdev) nx_p3_nic_add_mac(adapter, ha->addr, &del_list); } send_fw_cmd: adapter->set_promisc(adapter, mode); head = &del_list; while (!list_empty(head)) { cur = list_entry(head->next, nx_mac_list_t, list); nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) { nx_nic_req_t req; u64 word; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(mode); return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } void netxen_p3_free_mac_list(struct netxen_adapter *adapter) { nx_mac_list_t *cur; struct list_head *head = &adapter->mac_list; while (!list_empty(head)) { cur = list_entry(head->next, nx_mac_list_t, list); nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) { /* assuming caller has already copied new addr to netdev */ netxen_p3_nic_set_multi(adapter->netdev); return 0; } #define NETXEN_CONFIG_INTR_COALESCE 3 /* * Send the interrupt coalescing parameter set by ethtool to the card. */ int netxen_config_intr_coalesce(struct netxen_adapter *adapter) { nx_nic_req_t req; u64 word[6]; int rv, i; memset(&req, 0, sizeof(nx_nic_req_t)); memset(word, 0, sizeof(word)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word[0]); memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); for (i = 0; i < 6; i++) req.words[i] = cpu_to_le64(word[i]); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "interrupt coalescing parameters\n"); } return rv; } int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv = 0; if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "configure hw lro request\n"); } return rv; } int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv = 0; if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) return rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "configure bridge mode request\n"); } adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; return rv; } #define RSS_HASHTYPE_IP_TCP 0x3 int netxen_config_rss(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int i, rv; static const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); /* * RSS request: * bits 3-0: hash_method * 5-4: hash_type_ipv4 * 7-6: hash_type_ipv6 * 8: enable * 9: use indirection table * 47-10: reserved * 63-48: indirection table mask */ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | ((u64)(enable & 0x1) << 8) | ((0x7ULL) << 48); req.words[0] = cpu_to_le64(word); for (i = 0; i < ARRAY_SIZE(key); i++) req.words[i+1] = cpu_to_le64(key[i]); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not configure RSS\n", adapter->netdev->name); } return rv; } int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd) { nx_nic_req_t req; u64 word; int rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(cmd); req.words[1] = cpu_to_le64(ip); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n", adapter->netdev->name, (cmd == NX_IP_UP) ? "Add" : "Remove", ip); } return rv; } int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable | (enable << 8)); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not configure link notification\n", adapter->netdev->name); } return rv; } int netxen_send_lro_cleanup(struct netxen_adapter *adapter) { nx_nic_req_t req; u64 word; int rv; if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_LRO_REQUEST | ((u64)adapter->portnum << 16) | ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; req.req_hdr = cpu_to_le64(word); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not cleanup lro flows\n", adapter->netdev->name); } return rv; } /* * netxen_nic_change_mtu - Change the Maximum Transfer Unit * @returns 0 on success, negative on failure */ #define MTU_FUDGE_FACTOR 100 int netxen_nic_change_mtu(struct net_device *netdev, int mtu) { struct netxen_adapter *adapter = netdev_priv(netdev); int max_mtu; int rc = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) max_mtu = P3_MAX_MTU; else max_mtu = P2_MAX_MTU; if (mtu > max_mtu) { printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", netdev->name, max_mtu); return -EINVAL; } if (adapter->set_mtu) rc = adapter->set_mtu(adapter, mtu); if (!rc) netdev->mtu = mtu; return rc; } static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, int size, __le32 * buf) { int i, v, addr; __le32 *ptr32; addr = base; ptr32 = buf; for (i = 0; i < size / sizeof(u32); i++) { if (netxen_rom_fast_read(adapter, addr, &v) == -1) return -1; *ptr32 = cpu_to_le32(v); ptr32++; addr += sizeof(u32); } if ((char *)buf + size > (char *)ptr32) { __le32 local; if (netxen_rom_fast_read(adapter, addr, &v) == -1) return -1; local = cpu_to_le32(v); memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); } return 0; } int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) { __le32 *pmac = (__le32 *) mac; u32 offset; offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) return -1; if (*mac == cpu_to_le64(~0ULL)) { offset = NX_OLD_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) return -1; if (*mac == cpu_to_le64(~0ULL)) return -1; } return 0; } int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) { uint32_t crbaddr, mac_hi, mac_lo; int pci_func = adapter->ahw.pci_func; crbaddr = CRB_MAC_BLOCK_START + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); mac_lo = NXRD32(adapter, crbaddr); mac_hi = NXRD32(adapter, crbaddr+4); if (pci_func & 1) *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); else *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); return 0; } /* * Changes the CRB window to the specified window. */ static void netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, u32 window) { void __iomem *offset; int count = 10; u8 func = adapter->ahw.pci_func; if (adapter->ahw.crb_win == window) return; offset = PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); writel(window, offset); do { if (window == readl(offset)) break; if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d\n", (window == NETXEN_WINDOW_ONE)); udelay(1); } while (--count > 0); if (count > 0) adapter->ahw.crb_win = window; } /* * Returns < 0 if off is not valid, * 1 if window access is needed. 'off' is set to offset from * CRB space in 128M pci map * 0 if no window access is needed. 'off' is set to 2M addr * In: 'off' is offset from base in 128M pci map */ static int netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong off, void __iomem **addr) { crb_128M_2M_sub_block_map_t *m; if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) return -EINVAL; off -= NETXEN_PCI_CRBSPACE; /* * Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { *addr = adapter->ahw.pci_base0 + m->start_2M + (off - m->start_128M); return 0; } /* * Not in direct map, use crb window */ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); return 1; } /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static void netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) { u32 window; void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; off -= NETXEN_PCI_CRBSPACE; window = CRB_HI(off); writel(window, addr); if (readl(addr) != window) { if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d off 0x%lx\n", window, off); } } static void __iomem * netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, ulong win_off, void __iomem **mem_ptr) { ulong off = win_off; void __iomem *addr; resource_size_t mem_base; if (ADDR_IN_WINDOW1(win_off)) off = NETXEN_CRB_NORMAL(win_off); addr = pci_base_offset(adapter, off); if (addr) return addr; if (adapter->ahw.pci_len0 == 0) off -= NETXEN_PCI_CRBSPACE; mem_base = pci_resource_start(adapter->pdev, 0); *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); if (*mem_ptr) addr = *mem_ptr + (off & (PAGE_SIZE - 1)); return addr; } static int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) { unsigned long flags; void __iomem *addr, *mem_ptr = NULL; addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); if (!addr) return -EIO; if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ netxen_nic_io_write_128M(adapter, addr, data); } else { /* Window 0 */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(data, addr); netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); } if (mem_ptr) iounmap(mem_ptr); return 0; } static u32 netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) { unsigned long flags; void __iomem *addr, *mem_ptr = NULL; u32 data; addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); if (!addr) return -EIO; if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ data = netxen_nic_io_read_128M(adapter, addr); } else { /* Window 0 */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); netxen_nic_pci_set_crbwindow_128M(adapter, 0); data = readl(addr); netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); } if (mem_ptr) iounmap(mem_ptr); return data; } static int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) { unsigned long flags; int rv; void __iomem *addr = NULL; rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) { writel(data, addr); return 0; } if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); netxen_nic_pci_set_crbwindow_2M(adapter, off); writel(data, addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); return 0; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -EIO; } static u32 netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) { unsigned long flags; int rv; u32 data; void __iomem *addr = NULL; rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) return readl(addr); if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); netxen_nic_pci_set_crbwindow_2M(adapter, off); data = readl(addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); return data; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -1; } /* window 1 registers only */ static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, void __iomem *addr, u32 data) { read_lock(&adapter->ahw.crb_lock); writel(data, addr); read_unlock(&adapter->ahw.crb_lock); } static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, void __iomem *addr) { u32 val; read_lock(&adapter->ahw.crb_lock); val = readl(addr); read_unlock(&adapter->ahw.crb_lock); return val; } static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, void __iomem *addr, u32 data) { writel(data, addr); } static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, void __iomem *addr) { return readl(addr); } void __iomem * netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) { void __iomem *addr = NULL; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if ((offset < NETXEN_CRB_PCIX_HOST2) && (offset > NETXEN_CRB_PCIX_HOST)) addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); else addr = NETXEN_CRB_NORMALIZE(adapter, offset); } else { WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, offset, &addr)); } return addr; } static int netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, u64 addr, u32 *start) { if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); return 0; } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); return 0; } return -EIO; } static int netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, u64 addr, u32 *start) { u32 window; window = OCM_WIN(addr); writel(window, adapter->ahw.ocm_win_crb); /* read back to flush */ readl(adapter->ahw.ocm_win_crb); adapter->ahw.ocm_win = window; *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); return 0; } static int netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, u64 *data, int op) { void __iomem *addr, *mem_ptr = NULL; resource_size_t mem_base; int ret; u32 start; spin_lock(&adapter->ahw.mem_lock); ret = adapter->pci_set_window(adapter, off, &start); if (ret != 0) goto unlock; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { addr = adapter->ahw.pci_base0 + start; } else { addr = pci_base_offset(adapter, start); if (addr) goto noremap; mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK); mem_ptr = ioremap(mem_base, PAGE_SIZE); if (mem_ptr == NULL) { ret = -EIO; goto unlock; } addr = mem_ptr + (start & (PAGE_SIZE-1)); } noremap: if (op == 0) /* read */ *data = readq(addr); else /* write */ writeq(*data, addr); unlock: spin_unlock(&adapter->ahw.mem_lock); if (mem_ptr) iounmap(mem_ptr); return ret; } void netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) { void __iomem *addr = adapter->ahw.pci_base0 + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); spin_lock(&adapter->ahw.mem_lock); *data = readq(addr); spin_unlock(&adapter->ahw.mem_lock); } void netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) { void __iomem *addr = adapter->ahw.pci_base0 + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); spin_lock(&adapter->ahw.mem_lock); writeq(data, addr); spin_unlock(&adapter->ahw.mem_lock); } #define MAX_CTL_CHECK 1000 static int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, u64 off, u64 data) { int j, ret; u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P2 has different SIU and MIU test agent base addr */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P2)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); addr_hi = SIU_TEST_AGT_ADDR_HI; data_lo = SIU_TEST_AGT_WRDATA_LO; data_hi = SIU_TEST_AGT_WRDATA_HI; off_lo = off & SIU_TEST_AGT_ADDR_MASK; off_hi = SIU_TEST_AGT_UPPER_ADDR(off); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); addr_hi = MIU_TEST_AGT_ADDR_HI; data_lo = MIU_TEST_AGT_WRDATA_LO; data_hi = MIU_TEST_AGT_WRDATA_HI; off_lo = off & MIU_TEST_AGT_ADDR_MASK; off_hi = 0; goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { if (adapter->ahw.pci_len0 != 0) { return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); } } return -EIO; correct: spin_lock(&adapter->ahw.mem_lock); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(off_hi, (mem_crb + addr_hi)); writel(data & 0xffffffff, (mem_crb + data_lo)); writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl((mem_crb + TEST_AGT_CTRL)); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P2 has different SIU and MIU test agent base addr */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P2)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); addr_hi = SIU_TEST_AGT_ADDR_HI; data_lo = SIU_TEST_AGT_RDDATA_LO; data_hi = SIU_TEST_AGT_RDDATA_HI; off_lo = off & SIU_TEST_AGT_ADDR_MASK; off_hi = SIU_TEST_AGT_UPPER_ADDR(off); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); addr_hi = MIU_TEST_AGT_ADDR_HI; data_lo = MIU_TEST_AGT_RDDATA_LO; data_hi = MIU_TEST_AGT_RDDATA_HI; off_lo = off & MIU_TEST_AGT_ADDR_MASK; off_hi = 0; goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { if (adapter->ahw.pci_len0 != 0) { return netxen_nic_pci_mem_access_direct(adapter, off, data, 0); } } return -EIO; correct: spin_lock(&adapter->ahw.mem_lock); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(off_hi, (mem_crb + addr_hi)); writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { temp = readl(mem_crb + data_hi); val = ((u64)temp << 32); val |= readl(mem_crb + data_lo); *data = val; ret = 0; } netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) { int j, ret; u32 temp, off8; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); return -EIO; correct: off8 = off & 0xfffffff8; spin_lock(&adapter->ahw.mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); writel(data & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA_LO); writel((data >> 32) & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA_HI); writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off8; u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { return netxen_nic_pci_mem_access_direct(adapter, off, data, 0); } return -EIO; correct: off8 = off & 0xfffffff8; spin_lock(&adapter->ahw.mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); *data = val; ret = 0; } spin_unlock(&adapter->ahw.mem_lock); return ret; } void netxen_setup_hwops(struct netxen_adapter *adapter) { adapter->init_port = netxen_niu_xg_init_port; adapter->stop_port = netxen_niu_disable_xg_port; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { adapter->crb_read = netxen_nic_hw_read_wx_128M, adapter->crb_write = netxen_nic_hw_write_wx_128M, adapter->pci_set_window = netxen_nic_pci_set_window_128M, adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, adapter->io_read = netxen_nic_io_read_128M, adapter->io_write = netxen_nic_io_write_128M, adapter->macaddr_set = netxen_p2_nic_set_mac_addr; adapter->set_multi = netxen_p2_nic_set_multi; adapter->set_mtu = netxen_nic_set_mtu_xgb; adapter->set_promisc = netxen_p2_nic_set_promisc; } else { adapter->crb_read = netxen_nic_hw_read_wx_2M, adapter->crb_write = netxen_nic_hw_write_wx_2M, adapter->pci_set_window = netxen_nic_pci_set_window_2M, adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, adapter->io_read = netxen_nic_io_read_2M, adapter->io_write = netxen_nic_io_write_2M, adapter->set_mtu = nx_fw_cmd_set_mtu; adapter->set_promisc = netxen_p3_nic_set_promisc; adapter->macaddr_set = netxen_p3_nic_set_mac_addr; adapter->set_multi = netxen_p3_nic_set_multi; adapter->phy_read = nx_fw_cmd_query_phy; adapter->phy_write = nx_fw_cmd_set_phy; } } int netxen_nic_get_board_info(struct netxen_adapter *adapter) { int offset, board_type, magic; struct pci_dev *pdev = adapter->pdev; offset = NX_FW_MAGIC_OFFSET; if (netxen_rom_fast_read(adapter, offset, &magic)) return -EIO; if (magic != NETXEN_BDINFO_MAGIC) { dev_err(&pdev->dev, "invalid board config, magic=%08x\n", magic); return -EIO; } offset = NX_BRDTYPE_OFFSET; if (netxen_rom_fast_read(adapter, offset, &board_type)) return -EIO; if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); if ((gpio & 0x8000) == 0) board_type = NETXEN_BRDTYPE_P3_10G_TP; } adapter->ahw.board_type = board_type; switch (board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: adapter->ahw.port_type = NETXEN_NIC_GBE; break; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_HMEZ: case NETXEN_BRDTYPE_P3_XG_LOM: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_SFP_QT: case NETXEN_BRDTYPE_P3_10G_XFP: case NETXEN_BRDTYPE_P3_10000_BASE_T: adapter->ahw.port_type = NETXEN_NIC_XGBE; break; case NETXEN_BRDTYPE_P1_BD: case NETXEN_BRDTYPE_P1_SB: case NETXEN_BRDTYPE_P1_SMAX: case NETXEN_BRDTYPE_P1_SOCK: case NETXEN_BRDTYPE_P3_REF_QG: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: adapter->ahw.port_type = NETXEN_NIC_GBE; break; case NETXEN_BRDTYPE_P3_10G_TP: adapter->ahw.port_type = (adapter->portnum < 2) ? NETXEN_NIC_XGBE : NETXEN_NIC_GBE; break; default: dev_err(&pdev->dev, "unknown board type %x\n", board_type); adapter->ahw.port_type = NETXEN_NIC_XGBE; break; } return 0; } /* NIU access sections */ static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) { new_mtu += MTU_FUDGE_FACTOR; if (adapter->physical_port == 0) NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); else NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); return 0; } void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) { __u32 status; __u32 autoneg; __u32 port_mode; if (!netif_carrier_ok(adapter->netdev)) { adapter->link_speed = 0; adapter->link_duplex = -1; adapter->link_autoneg = AUTONEG_ENABLE; return; } if (adapter->ahw.port_type == NETXEN_NIC_GBE) { port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); if (port_mode == NETXEN_PORT_MODE_802_3_AP) { adapter->link_speed = SPEED_1000; adapter->link_duplex = DUPLEX_FULL; adapter->link_autoneg = AUTONEG_DISABLE; return; } if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, &status) == 0) { if (netxen_get_phy_link(status)) { switch (netxen_get_phy_speed(status)) { case 0: adapter->link_speed = SPEED_10; break; case 1: adapter->link_speed = SPEED_100; break; case 2: adapter->link_speed = SPEED_1000; break; default: adapter->link_speed = 0; break; } switch (netxen_get_phy_duplex(status)) { case 0: adapter->link_duplex = DUPLEX_HALF; break; case 1: adapter->link_duplex = DUPLEX_FULL; break; default: adapter->link_duplex = -1; break; } if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, &autoneg) != 0) adapter->link_autoneg = autoneg; } else goto link_down; } else { link_down: adapter->link_speed = 0; adapter->link_duplex = -1; } } } int netxen_nic_wol_supported(struct netxen_adapter *adapter) { u32 wol_cfg; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); if (wol_cfg & (1UL << adapter->portnum)) { wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); if (wol_cfg & (1 << adapter->portnum)) return 1; } return 0; }
gpl-2.0
Lloir/android_kernel_htc_enrc2b-bladev2
drivers/net/wireless/rtlwifi/rtl8192de/fw.c
651
25891
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "../base.h" #include "reg.h" #include "def.h" #include "fw.h" #include "sw.h" static bool _rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv) { return (rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) ? true : false; } static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp; if (enable) { tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); } else { tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); /* Reserved for fw extension. * 0x81[7] is used for mac0 status , * so don't write this reg here * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);*/ } } static void _rtl92d_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 blocksize = sizeof(u32); u8 *bufferptr = (u8 *) buffer; u32 *pu4BytePtr = (u32 *) buffer; u32 i, offset, blockCount, remainSize; blockCount = size / blocksize; remainSize = size % blocksize; for (i = 0; i < blockCount; i++) { offset = i * blocksize; rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset), *(pu4BytePtr + i)); } if (remainSize) { offset = blockCount * blocksize; bufferptr += offset; for (i = 0; i < remainSize; i++) { rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS + offset + i), *(bufferptr + i)); } } } static void _rtl92d_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 value8; u8 u8page = (u8) (page & 0x07); value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); _rtl92d_fw_block_write(hw, buffer, size); } static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen) { u32 fwlen = *pfwlen; u8 remain = (u8) (fwlen % 4); remain = (remain == 0) ? 0 : (4 - remain); while (remain > 0) { pfwbuf[fwlen] = 0; fwlen++; remain--; } *pfwlen = fwlen; } static void _rtl92d_write_fw(struct ieee80211_hw *hw, enum version_8192d version, u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 *bufferPtr = (u8 *) buffer; u32 pagenums, remainSize; u32 page, offset; RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size)); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) _rtl92d_fill_dummy(bufferPtr, &size); pagenums = size / FW_8192D_PAGE_SIZE; remainSize = size % FW_8192D_PAGE_SIZE; if (pagenums > 8) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Page numbers should not greater then 8\n")); } for (page = 0; page < pagenums; page++) { offset = page * FW_8192D_PAGE_SIZE; _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), FW_8192D_PAGE_SIZE); } if (remainSize) { offset = pagenums * FW_8192D_PAGE_SIZE; page = pagenums; _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), remainSize); } } static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 counter = 0; u32 value32; do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && (!(value32 & FWDL_ChkSum_rpt))); if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("chksum report faill ! REG_MCUFWDL:0x%08x .\n", value32)); return -EIO; } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32)); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); return 0; } void rtl92d_firmware_selfreset(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 u1b_tmp; u8 delay = 100; /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); while (u1b_tmp & BIT(2)) { delay--; if (delay == 0) break; udelay(50); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); } RT_ASSERT((delay > 0), ("8051 reset failed!\n")); RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("=====> 8051 reset success (%d) .\n", delay)); } static int _rtl92d_fw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 counter; RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("FW already have download\n")); /* polling for FW ready */ counter = 0; do { if (rtlhal->interfaceindex == 0) { if (rtl_read_byte(rtlpriv, FW_MAC0_READY) & MAC0_READY) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Polling FW ready success!! " "REG_MCUFWDL: 0x%x .\n", rtl_read_byte(rtlpriv, FW_MAC0_READY))); return 0; } udelay(5); } else { if (rtl_read_byte(rtlpriv, FW_MAC1_READY) & MAC1_READY) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Polling FW ready success!! " "REG_MCUFWDL: 0x%x .\n", rtl_read_byte(rtlpriv, FW_MAC1_READY))); return 0; } udelay(5); } } while (counter++ < POLLING_READY_TIMEOUT_COUNT); if (rtlhal->interfaceindex == 0) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Polling FW ready fail!! MAC0 FW init not ready: " "0x%x .\n", rtl_read_byte(rtlpriv, FW_MAC0_READY))); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Polling FW ready fail!! MAC1 FW init not ready: " "0x%x .\n", rtl_read_byte(rtlpriv, FW_MAC1_READY))); } RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Polling FW ready fail!! REG_MCUFWDL:0x%08ul .\n", rtl_read_dword(rtlpriv, REG_MCUFWDL))); return -1; } int rtl92d_download_fw(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 *pfwheader; u8 *pfwdata; u32 fwsize; int err; enum version_8192d version = rtlhal->version; u8 value; u32 count; bool fw_downloaded = false, fwdl_in_process = false; unsigned long flags; if (!rtlhal->pfirmware) return 1; fwsize = rtlhal->fwsize; pfwheader = (u8 *) rtlhal->pfirmware; pfwdata = (u8 *) rtlhal->pfirmware; rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader); rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, (" FirmwareVersion(%d)," "FirmwareSubVersion(%d), Signature(%#x)\n", rtlhal->fw_version, rtlhal->fw_subversion, GET_FIRMWARE_HDR_SIGNATURE(pfwheader))); if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Shift 32 bytes for FW header!!\n")); pfwdata = pfwdata + 32; fwsize = fwsize - 32; } spin_lock_irqsave(&globalmutex_for_fwdownload, flags); fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else fwdl_in_process = false; if (fw_downloaded) { spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); goto exit; } else if (fwdl_in_process) { spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); for (count = 0; count < 5000; count++) { udelay(500); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else fwdl_in_process = false; spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (fw_downloaded) goto exit; else if (!fwdl_in_process) break; else RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("Wait for another mac " "download fw\n")); } spin_lock_irqsave(&globalmutex_for_fwdownload, flags); value = rtl_read_byte(rtlpriv, 0x1f); value |= BIT(5); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); } else { value = rtl_read_byte(rtlpriv, 0x1f); value |= BIT(5); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); } /* If 8051 is running in RAM code, driver should * inform Fw to reset by itself, or it will cause * download Fw fail.*/ /* 8051 RAM code */ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { rtl92d_firmware_selfreset(hw); rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); } _rtl92d_enable_fw_download(hw, true); _rtl92d_write_fw(hw, version, pfwdata, fwsize); _rtl92d_enable_fw_download(hw, false); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); err = _rtl92d_fw_free_to_go(hw); /* download fw over,clear 0x1f[5] */ value = rtl_read_byte(rtlpriv, 0x1f); value &= (~BIT(5)); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("fw is not ready to run!\n")); goto exit; } else { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("fw is ready to run!\n")); } exit: err = _rtl92d_fw_init(hw); return err; } static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 val_hmetfr; bool result = false; val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); if (((val_hmetfr >> boxnum) & BIT(0)) == 0) result = true; return result; } static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u8 boxnum; u16 box_reg = 0, box_extreg = 0; u8 u1b_tmp; bool isfw_read = false; u8 buf_index = 0; bool bwrite_sucess = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; u32 h2c_waitcounter = 0; unsigned long flag; u8 idx; if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("Return as RF is off!!!\n")); return; } RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n")); while (true) { spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); if (rtlhal->h2c_setinprogress) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("H2C set in progress! Wait to set.." "element_id(%d).\n", element_id)); while (rtlhal->h2c_setinprogress) { spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); h2c_waitcounter++; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("Wait 100 us (%d times)...\n", h2c_waitcounter)); udelay(100); if (h2c_waitcounter > 1000) return; spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); } spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); } else { rtlhal->h2c_setinprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); break; } } while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write H2C fail because no trigger " "for FW INT!\n")); break; } boxnum = rtlhal->last_hmeboxnum; switch (boxnum) { case 0: box_reg = REG_HMEBOX_0; box_extreg = REG_HMEBOX_EXT_0; break; case 1: box_reg = REG_HMEBOX_1; box_extreg = REG_HMEBOX_EXT_1; break; case 2: box_reg = REG_HMEBOX_2; box_extreg = REG_HMEBOX_EXT_2; break; case 3: box_reg = REG_HMEBOX_3; box_extreg = REG_HMEBOX_EXT_3; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case not process\n")); break; } isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); while (!isfw_read) { wait_h2c_limmit--; if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("Wating too long for FW read " "clear HMEBox(%d)!\n", boxnum)); break; } udelay(10); isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("Wating for FW read clear HMEBox(%d)!!! " "0x1BF = %2x\n", boxnum, u1b_tmp)); } if (!isfw_read) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("Write H2C register BOX[%d] fail!!!!! " "Fw do not read.\n", boxnum)); break; } memset(boxcontent, 0, sizeof(boxcontent)); memset(boxextcontent, 0, sizeof(boxextcontent)); boxcontent[0] = element_id; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("Write element_id box_reg(%4x) = %2x\n", box_reg, element_id)); switch (cmd_len) { case 1: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 1); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 2: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 2); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 3: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 3); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 4: boxcontent[0] |= (BIT(7)); memcpy(boxextcontent, cmdbuffer + buf_index, 2); memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 2); for (idx = 0; idx < 2; idx++) rtl_write_byte(rtlpriv, box_extreg + idx, boxextcontent[idx]); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 5: boxcontent[0] |= (BIT(7)); memcpy(boxextcontent, cmdbuffer + buf_index, 2); memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 3); for (idx = 0; idx < 2; idx++) rtl_write_byte(rtlpriv, box_extreg + idx, boxextcontent[idx]); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case not process\n")); break; } bwrite_sucess = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) rtlhal->last_hmeboxnum = 0; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("pHalData->last_hmeboxnum = %d\n", rtlhal->last_hmeboxnum)); } spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); rtlhal->h2c_setinprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n")); } void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 tmp_cmdbuf[2]; if (rtlhal->fw_ready == false) { RT_ASSERT(false, ("return H2C cmd because of Fw " "download fail!!!\n")); return; } memset(tmp_cmdbuf, 0, 8); memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); _rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); return; } void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 u1_h2c_set_pwrmode[3] = { 0 }; struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode)); SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1); SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n", u1_h2c_set_pwrmode, 3); rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); } static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; u8 idx = 0; unsigned long flags; struct sk_buff *pskb; ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); if (pskb) kfree_skb(pskb); spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); pdesc = &ring->desc[idx]; /* discard output from call below */ rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN); rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); return true; } #define BEACON_PG 0 /*->1 */ #define PSPOLL_PG 2 #define NULL_PG 3 #define PROBERSP_PG 4 /*->5 */ #define TOTAL_RESERVED_PKT_LEN 768 static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { /* page 0 beacon */ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 1 beacon */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 2 ps-poll */ 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 3 null */ 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 4 probe_resp */ 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00, 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 5 probe_resp */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; u32 totalpacketlen; bool rtstatus; u8 u1RsvdPageLoc[3] = { 0 }; bool dlok = false; u8 *beacon; u8 *p_pspoll; u8 *nullfunc; u8 *p_probersp; /*--------------------------------------------------------- (1) beacon ---------------------------------------------------------*/ beacon = &reserved_page_packet[BEACON_PG * 128]; SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); SET_80211_HDR_ADDRESS3(beacon, mac->bssid); /*------------------------------------------------------- (2) ps-poll --------------------------------------------------------*/ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); /*-------------------------------------------------------- (3) null data ---------------------------------------------------------*/ nullfunc = &reserved_page_packet[NULL_PG * 128]; SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); /*--------------------------------------------------------- (4) probe response ----------------------------------------------------------*/ p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", u1RsvdPageLoc, 3); skb = dev_alloc_skb(totalpacketlen); memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); rtstatus = _rtl92d_cmd_send_packet(hw, skb); if (rtstatus) dlok = true; if (dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("Set RSVD page location to Fw.\n")); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 3); rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE, sizeof(u1RsvdPageLoc), u1RsvdPageLoc); } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set RSVD page location to Fw FAIL!!!!!!.\n")); } void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) { u8 u1_joinbssrpt_parm[1] = {0}; SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); }
gpl-2.0
mathur/rohan.kernel.op3
drivers/input/misc/mma8450.c
907
5981
/* * Driver for Freescale's 3-Axis Accelerometer MMA8450 * * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/input-polldev.h> #include <linux/of_device.h> #define MMA8450_DRV_NAME "mma8450" #define MODE_CHANGE_DELAY_MS 100 #define POLL_INTERVAL 100 #define POLL_INTERVAL_MAX 500 /* register definitions */ #define MMA8450_STATUS 0x00 #define MMA8450_STATUS_ZXYDR 0x08 #define MMA8450_OUT_X8 0x01 #define MMA8450_OUT_Y8 0x02 #define MMA8450_OUT_Z8 0x03 #define MMA8450_OUT_X_LSB 0x05 #define MMA8450_OUT_X_MSB 0x06 #define MMA8450_OUT_Y_LSB 0x07 #define MMA8450_OUT_Y_MSB 0x08 #define MMA8450_OUT_Z_LSB 0x09 #define MMA8450_OUT_Z_MSB 0x0a #define MMA8450_XYZ_DATA_CFG 0x16 #define MMA8450_CTRL_REG1 0x38 #define MMA8450_CTRL_REG2 0x39 /* mma8450 status */ struct mma8450 { struct i2c_client *client; struct input_polled_dev *idev; }; static int mma8450_read(struct mma8450 *m, unsigned off) { struct i2c_client *c = m->client; int ret; ret = i2c_smbus_read_byte_data(c, off); if (ret < 0) dev_err(&c->dev, "failed to read register 0x%02x, error %d\n", off, ret); return ret; } static int mma8450_write(struct mma8450 *m, unsigned off, u8 v) { struct i2c_client *c = m->client; int error; error = i2c_smbus_write_byte_data(c, off, v); if (error < 0) { dev_err(&c->dev, "failed to write to register 0x%02x, error %d\n", off, error); return error; } return 0; } static int mma8450_read_block(struct mma8450 *m, unsigned off, u8 *buf, size_t size) { struct i2c_client *c = m->client; int err; err = i2c_smbus_read_i2c_block_data(c, off, size, buf); if (err < 0) { dev_err(&c->dev, "failed to read block data at 0x%02x, error %d\n", MMA8450_OUT_X_LSB, err); return err; } return 0; } static void mma8450_poll(struct input_polled_dev *dev) { struct mma8450 *m = dev->private; int x, y, z; int ret; u8 buf[6]; ret = mma8450_read(m, MMA8450_STATUS); if (ret < 0) return; if (!(ret & MMA8450_STATUS_ZXYDR)) return; ret = mma8450_read_block(m, MMA8450_OUT_X_LSB, buf, sizeof(buf)); if (ret < 0) return; x = ((int)(s8)buf[1] << 4) | (buf[0] & 0xf); y = ((int)(s8)buf[3] << 4) | (buf[2] & 0xf); z = ((int)(s8)buf[5] << 4) | (buf[4] & 0xf); input_report_abs(dev->input, ABS_X, x); input_report_abs(dev->input, ABS_Y, y); input_report_abs(dev->input, ABS_Z, z); input_sync(dev->input); } /* Initialize the MMA8450 chip */ static void mma8450_open(struct input_polled_dev *dev) { struct mma8450 *m = dev->private; int err; /* enable all events from X/Y/Z, no FIFO */ err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07); if (err) return; /* * Sleep mode poll rate - 50Hz * System output data rate - 400Hz * Full scale selection - Active, +/- 2G */ err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01); if (err < 0) return; msleep(MODE_CHANGE_DELAY_MS); } static void mma8450_close(struct input_polled_dev *dev) { struct mma8450 *m = dev->private; mma8450_write(m, MMA8450_CTRL_REG1, 0x00); mma8450_write(m, MMA8450_CTRL_REG2, 0x01); } /* * I2C init/probing/exit functions */ static int mma8450_probe(struct i2c_client *c, const struct i2c_device_id *id) { struct input_polled_dev *idev; struct mma8450 *m; int err; m = kzalloc(sizeof(struct mma8450), GFP_KERNEL); idev = input_allocate_polled_device(); if (!m || !idev) { err = -ENOMEM; goto err_free_mem; } m->client = c; m->idev = idev; idev->private = m; idev->input->name = MMA8450_DRV_NAME; idev->input->id.bustype = BUS_I2C; idev->poll = mma8450_poll; idev->poll_interval = POLL_INTERVAL; idev->poll_interval_max = POLL_INTERVAL_MAX; idev->open = mma8450_open; idev->close = mma8450_close; __set_bit(EV_ABS, idev->input->evbit); input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32); input_set_abs_params(idev->input, ABS_Y, -2048, 2047, 32, 32); input_set_abs_params(idev->input, ABS_Z, -2048, 2047, 32, 32); err = input_register_polled_device(idev); if (err) { dev_err(&c->dev, "failed to register polled input device\n"); goto err_free_mem; } i2c_set_clientdata(c, m); return 0; err_free_mem: input_free_polled_device(idev); kfree(m); return err; } static int mma8450_remove(struct i2c_client *c) { struct mma8450 *m = i2c_get_clientdata(c); struct input_polled_dev *idev = m->idev; input_unregister_polled_device(idev); input_free_polled_device(idev); kfree(m); return 0; } static const struct i2c_device_id mma8450_id[] = { { MMA8450_DRV_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, mma8450_id); static const struct of_device_id mma8450_dt_ids[] = { { .compatible = "fsl,mma8450", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mma8450_dt_ids); static struct i2c_driver mma8450_driver = { .driver = { .name = MMA8450_DRV_NAME, .owner = THIS_MODULE, .of_match_table = mma8450_dt_ids, }, .probe = mma8450_probe, .remove = mma8450_remove, .id_table = mma8450_id, }; module_i2c_driver(mma8450_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("MMA8450 3-Axis Accelerometer Driver"); MODULE_LICENSE("GPL");
gpl-2.0
ZeroInfinityXDA/HelixKernel_Nougat
drivers/bluetooth/bpa10x.c
1675
10542
/* * * Digianswer Bluetooth USB driver * * Copyright (C) 2004-2007 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "0.10" static const struct usb_device_id bpa10x_table[] = { /* Tektronix BPA 100/105 (Digianswer) */ { USB_DEVICE(0x08fd, 0x0002) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, bpa10x_table); struct bpa10x_data { struct hci_dev *hdev; struct usb_device *udev; struct usb_anchor tx_anchor; struct usb_anchor rx_anchor; struct sk_buff *rx_skb[2]; }; #define HCI_VENDOR_HDR_SIZE 5 struct hci_vendor_hdr { __u8 type; __le16 snum; __le16 dlen; } __packed; static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count) { struct bpa10x_data *data = hci_get_drvdata(hdev); BT_DBG("%s queue %d buffer %p count %d", hdev->name, queue, buf, count); if (queue < 0 || queue > 1) return -EILSEQ; hdev->stat.byte_rx += count; while (count) { struct sk_buff *skb = data->rx_skb[queue]; struct { __u8 type; int expect; } *scb; int type, len = 0; if (!skb) { /* Start of the frame */ type = *((__u8 *) buf); count--; buf++; switch (type) { case HCI_EVENT_PKT: if (count >= HCI_EVENT_HDR_SIZE) { struct hci_event_hdr *h = buf; len = HCI_EVENT_HDR_SIZE + h->plen; } else return -EILSEQ; break; case HCI_ACLDATA_PKT: if (count >= HCI_ACL_HDR_SIZE) { struct hci_acl_hdr *h = buf; len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); } else return -EILSEQ; break; case HCI_SCODATA_PKT: if (count >= HCI_SCO_HDR_SIZE) { struct hci_sco_hdr *h = buf; len = HCI_SCO_HDR_SIZE + h->dlen; } else return -EILSEQ; break; case HCI_VENDOR_PKT: if (count >= HCI_VENDOR_HDR_SIZE) { struct hci_vendor_hdr *h = buf; len = HCI_VENDOR_HDR_SIZE + __le16_to_cpu(h->dlen); } else return -EILSEQ; break; } skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for packet", hdev->name); return -ENOMEM; } data->rx_skb[queue] = skb; scb = (void *) skb->cb; scb->type = type; scb->expect = len; } else { /* Continuation */ scb = (void *) skb->cb; len = scb->expect; } len = min(len, count); memcpy(skb_put(skb, len), buf, len); scb->expect -= len; if (scb->expect == 0) { /* Complete frame */ data->rx_skb[queue] = NULL; bt_cb(skb)->pkt_type = scb->type; hci_recv_frame(hdev, skb); } count -= len; buf += len; } return 0; } static void bpa10x_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *) skb->dev; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: kfree(urb->setup_packet); kfree_skb(skb); } static void bpa10x_rx_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct bpa10x_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe), urb->transfer_buffer, urb->actual_length) < 0) { BT_ERR("%s corrupted event packet", hdev->name); hdev->stat.err_rx++; } } usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } } static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = 16; BT_DBG("%s", hdev->name); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; buf = kmalloc(size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvintpipe(data->udev, 0x81); usb_fill_int_urb(urb, data->udev, pipe, buf, size, bpa10x_rx_complete, hdev, 1); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = 64; BT_DBG("%s", hdev->name); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; buf = kmalloc(size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(data->udev, 0x82); usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, bpa10x_rx_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static int bpa10x_open(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) return 0; err = bpa10x_submit_intr_urb(hdev); if (err < 0) goto error; err = bpa10x_submit_bulk_urb(hdev); if (err < 0) goto error; return 0; error: usb_kill_anchored_urbs(&data->rx_anchor); clear_bit(HCI_RUNNING, &hdev->flags); return err; } static int bpa10x_close(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; usb_kill_anchored_urbs(&data->rx_anchor); return 0; } static int bpa10x_flush(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->tx_anchor); return 0; } static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; skb->dev = (void *) hdev; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; /* Prepend skb with frame type */ *skb_push(skb, 1) = bt_cb(skb)->pkt_type; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: dr = kmalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = USB_TYPE_VENDOR; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.sco_tx++; break; default: usb_free_urb(urb); return -EILSEQ; } usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { BT_ERR("%s urb %p submission failed", hdev->name, urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } usb_free_urb(urb); return 0; } static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct bpa10x_data *data; struct hci_dev *hdev; int err; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->udev = interface_to_usbdev(intf); init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->rx_anchor); hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); hdev->open = bpa10x_open; hdev->close = bpa10x_close; hdev->flush = bpa10x_flush; hdev->send = bpa10x_send_frame; set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); return err; } usb_set_intfdata(intf, data); return 0; } static void bpa10x_disconnect(struct usb_interface *intf) { struct bpa10x_data *data = usb_get_intfdata(intf); BT_DBG("intf %p", intf); if (!data) return; usb_set_intfdata(intf, NULL); hci_unregister_dev(data->hdev); hci_free_dev(data->hdev); kfree_skb(data->rx_skb[0]); kfree_skb(data->rx_skb[1]); } static struct usb_driver bpa10x_driver = { .name = "bpa10x", .probe = bpa10x_probe, .disconnect = bpa10x_disconnect, .id_table = bpa10x_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(bpa10x_driver); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
gpl-2.0
Snuzzo/funky_msm8960
arch/blackfin/mach-bf561/boards/ezkit.c
2187
13228
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <asm/dpmc.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF561-EZKIT"; #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) #include <linux/usb/isp1760.h> static struct resource bfin_isp1760_resources[] = { [0] = { .start = 0x2C0F0000, .end = 0x203C0000 + 0xfffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PF10, .end = IRQ_PF10, .flags = IORESOURCE_IRQ, }, }; static struct isp1760_platform_data isp1760_priv = { .is_isp1761 = 0, .bus_width_16 = 1, .port1_otg = 0, .analog_oc = 0, .dack_polarity_high = 0, .dreq_polarity_high = 0, }; static struct platform_device bfin_isp1760_device = { .name = "isp1760", .id = 0, .dev = { .platform_data = &isp1760_priv, }, .num_resources = ARRAY_SIZE(bfin_isp1760_resources), .resource = bfin_isp1760_resources, }; #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> static struct resource isp1362_hcd_resources[] = { { .start = 0x2c060000, .end = 0x2c060000, .flags = IORESOURCE_MEM, }, { .start = 0x2c060004, .end = 0x2c060004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF8, .end = IRQ_PF8, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x2C000000, .end = 0x2C000000 + 0x7F, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF10, .end = IRQ_PF10, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif /* * USB-LAN EzExtender board * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x2C010300, .end = 0x2C010300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF9, .end = IRQ_PF9, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = BFIN_UART_THR, .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART_RX, .end = IRQ_UART_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART_ERROR, .end = IRQ_UART_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART_TX, .end = CH_UART_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART_RX, .end = CH_UART_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezkit_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = 0x800000 - 0x40000 - 0x1C0000 - 0x2000 * 8, .offset = MTDPART_OFS_APPEND, }, { .name = "config(nor)", .size = 0x2000 * 7, .offset = MTDPART_OFS_APPEND, }, { .name = "u-boot env(nor)", .size = 0x2000, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ezkit_flash_data = { .width = 2, .parts = ezkit_partitions, .nr_parts = ARRAY_SIZE(ezkit_partitions), }; static struct resource ezkit_flash_resource = { .start = 0x20000000, .end = 0x207fffff, .flags = IORESOURCE_MEM, }; static struct platform_device ezkit_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ezkit_flash_data, }, .num_resources = 1, .resource = &ezkit_flash_resource, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) static struct bfin5xx_spi_chip spidev_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, .platform_data = "ad1836", /* only includes chip name for the moment */ .controller_data = &ad1836_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &spidev_chip_info, }, #endif }; #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/input.h> #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PF5, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PF6, 1, "gpio-keys: BTN1"}, {BTN_2, GPIO_PF7, 1, "gpio-keys: BTN2"}, {BTN_3, GPIO_PF8, 1, "gpio-keys: BTN3"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) #include <linux/i2c-gpio.h> static struct i2c_gpio_platform_data i2c_gpio_data = { .sda_pin = GPIO_PF1, .scl_pin = GPIO_PF0, .sda_is_open_drain = 0, .scl_is_open_drain = 0, .udelay = 40, }; static struct platform_device i2c_gpio_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &i2c_gpio_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 300000000), VRPAIR(VLEV_095, 313000000), VRPAIR(VLEV_100, 350000000), VRPAIR(VLEV_105, 400000000), VRPAIR(VLEV_110, 444000000), VRPAIR(VLEV_115, 450000000), VRPAIR(VLEV_120, 475000000), VRPAIR(VLEV_125, 500000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) static struct platform_device bfin_i2s = { .name = "bfin-i2s", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) static struct platform_device bfin_tdm = { .name = "bfin-tdm", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97 = { .name = "bfin-ac97", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif static struct platform_device *ezkit_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) &bfin_isp1760_device, #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) &i2c_gpio_device, #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezkit_flash_device, #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s, #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) &bfin_tdm, #endif #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97, #endif }; static int __init ezkit_init(void) { int ret; printk(KERN_INFO "%s(): registering device resources\n", __func__); ret = platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices)); if (ret < 0) return ret; #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 12)); SSYNC(); #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 15)); bfin_write_FIO0_FLAG_S(1 << 15); SSYNC(); /* * This initialization lasts for approximately 4500 MCLKs. * MCLK = 12.288MHz */ udelay(400); #endif spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ezkit_init); static struct platform_device *ezkit_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ezkit_early_devices, ARRAY_SIZE(ezkit_early_devices)); }
gpl-2.0
djmatt604/android_kernel_samsung_note2jb
drivers/staging/iio/adc/ad7298_core.c
2699
7101
/* * AD7298 SPI ADC driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/delay.h> #include "../iio.h" #include "../sysfs.h" #include "../ring_generic.h" #include "adc.h" #include "ad7298.h" static struct iio_chan_spec ad7298_channels[] = { IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, (1 << IIO_CHAN_INFO_SCALE_SEPARATE), 9, AD7298_CH_TEMP, IIO_ST('s', 32, 32, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 0, 0, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 1, 1, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 2, 2, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 3, 3, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 4, 4, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 5, 5, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 6, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 6, 6, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 7, 0, (1 << IIO_CHAN_INFO_SCALE_SHARED), 7, 7, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN_SOFT_TIMESTAMP(8), }; static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch) { int ret; st->tx_buf[0] = cpu_to_be16(AD7298_WRITE | st->ext_ref | (AD7298_CH(0) >> ch)); ret = spi_sync(st->spi, &st->scan_single_msg); if (ret) return ret; return be16_to_cpu(st->rx_buf[0]); } static int ad7298_scan_temp(struct ad7298_state *st, int *val) { int tmp, ret; tmp = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE | AD7298_TAVG | st->ext_ref); ret = spi_write(st->spi, (u8 *)&tmp, 2); if (ret) return ret; tmp = 0; ret = spi_write(st->spi, (u8 *)&tmp, 2); if (ret) return ret; usleep_range(101, 1000); /* sleep > 100us */ ret = spi_read(st->spi, (u8 *)&tmp, 2); if (ret) return ret; tmp = be16_to_cpu(tmp) & RES_MASK(AD7298_BITS); /* * One LSB of the ADC corresponds to 0.25 deg C. * The temperature reading is in 12-bit twos complement format */ if (tmp & (1 << (AD7298_BITS - 1))) { tmp = (4096 - tmp) * 250; tmp -= (2 * tmp); } else { tmp *= 250; /* temperature in milli degrees Celsius */ } *val = tmp; return 0; } static int ad7298_read_raw(struct iio_dev *dev_info, struct iio_chan_spec const *chan, int *val, int *val2, long m) { int ret; struct ad7298_state *st = iio_priv(dev_info); unsigned int scale_uv; switch (m) { case 0: mutex_lock(&dev_info->mlock); if (iio_ring_enabled(dev_info)) { if (chan->address == AD7298_CH_TEMP) ret = -ENODEV; else ret = ad7298_scan_from_ring(dev_info, chan->address); } else { if (chan->address == AD7298_CH_TEMP) ret = ad7298_scan_temp(st, val); else ret = ad7298_scan_direct(st, chan->address); } mutex_unlock(&dev_info->mlock); if (ret < 0) return ret; if (chan->address != AD7298_CH_TEMP) *val = ret & RES_MASK(AD7298_BITS); return IIO_VAL_INT; case (1 << IIO_CHAN_INFO_SCALE_SHARED): scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS; *val = scale_uv / 1000; *val2 = (scale_uv % 1000) * 1000; return IIO_VAL_INT_PLUS_MICRO; case (1 << IIO_CHAN_INFO_SCALE_SEPARATE): *val = 1; *val2 = 0; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } static const struct iio_info ad7298_info = { .read_raw = &ad7298_read_raw, .driver_module = THIS_MODULE, }; static int __devinit ad7298_probe(struct spi_device *spi) { struct ad7298_platform_data *pdata = spi->dev.platform_data; struct ad7298_state *st; int ret, regdone = 0; struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); st->reg = regulator_get(&spi->dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) goto error_put_reg; } spi_set_drvdata(spi, indio_dev); st->spi = spi; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->dev.parent = &spi->dev; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = ad7298_channels; indio_dev->num_channels = ARRAY_SIZE(ad7298_channels); indio_dev->info = &ad7298_info; /* Setup default message */ st->scan_single_xfer[0].tx_buf = &st->tx_buf[0]; st->scan_single_xfer[0].len = 2; st->scan_single_xfer[0].cs_change = 1; st->scan_single_xfer[1].tx_buf = &st->tx_buf[1]; st->scan_single_xfer[1].len = 2; st->scan_single_xfer[1].cs_change = 1; st->scan_single_xfer[2].rx_buf = &st->rx_buf[0]; st->scan_single_xfer[2].len = 2; spi_message_init(&st->scan_single_msg); spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg); spi_message_add_tail(&st->scan_single_xfer[1], &st->scan_single_msg); spi_message_add_tail(&st->scan_single_xfer[2], &st->scan_single_msg); if (pdata && pdata->vref_mv) { st->int_vref_mv = pdata->vref_mv; st->ext_ref = AD7298_EXTREF; } else { st->int_vref_mv = AD7298_INTREF_mV; } ret = ad7298_register_ring_funcs_and_init(indio_dev); if (ret) goto error_disable_reg; ret = iio_device_register(indio_dev); if (ret) goto error_disable_reg; regdone = 1; ret = iio_ring_buffer_register_ex(indio_dev->ring, 0, &ad7298_channels[1], /* skip temp0 */ ARRAY_SIZE(ad7298_channels) - 1); if (ret) goto error_cleanup_ring; return 0; error_cleanup_ring: ad7298_ring_cleanup(indio_dev); error_disable_reg: if (!IS_ERR(st->reg)) regulator_disable(st->reg); error_put_reg: if (!IS_ERR(st->reg)) regulator_put(st->reg); if (regdone) iio_device_unregister(indio_dev); else iio_free_device(indio_dev); return ret; } static int __devexit ad7298_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7298_state *st = iio_priv(indio_dev); iio_ring_buffer_unregister(indio_dev->ring); ad7298_ring_cleanup(indio_dev); iio_device_unregister(indio_dev); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } iio_device_unregister(indio_dev); return 0; } static const struct spi_device_id ad7298_id[] = { {"ad7298", 0}, {} }; static struct spi_driver ad7298_driver = { .driver = { .name = "ad7298", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ad7298_probe, .remove = __devexit_p(ad7298_remove), .id_table = ad7298_id, }; static int __init ad7298_init(void) { return spi_register_driver(&ad7298_driver); } module_init(ad7298_init); static void __exit ad7298_exit(void) { spi_unregister_driver(&ad7298_driver); } module_exit(ad7298_exit); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7298 ADC"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:ad7298");
gpl-2.0
moguriso/isw11sc-kernel
net/caif/cfdbgl.c
2955
1599
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/stddef.h> #include <linux/slab.h> #include <net/caif/caif_layer.h> #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> #define container_obj(layr) ((struct cfsrvl *) layr) static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) { struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); if (!dbg) { pr_warn("Out of memory\n"); return NULL; } caif_assert(offsetof(struct cfsrvl, layer) == 0); memset(dbg, 0, sizeof(struct cfsrvl)); cfsrvl_init(dbg, channel_id, dev_info, false); dbg->layer.receive = cfdbgl_receive; dbg->layer.transmit = cfdbgl_transmit; snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); return &dbg->layer; } static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) { return layr->up->receive(layr->up, pkt); } static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) { struct cfsrvl *service = container_obj(layr); struct caif_payload_info *info; int ret; if (!cfsrvl_ready(service, &ret)) return ret; /* Add info for MUX-layer to route the packet out */ info = cfpkt_info(pkt); info->channel_id = service->layer.id; info->dev_info = &service->dev_info; return layr->dn->transmit(layr->dn, pkt); }
gpl-2.0
dan82840/Netgear-RBR50
git_home/linux.git/arch/mips/cavium-octeon/executive/cvmx-l2c.c
3211
25450
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Implementation of the Level 2 Cache (L2C) control, * measurement, and debugging facilities. */ #include <linux/compiler.h> #include <linux/irqflags.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-l2c.h> #include <asm/octeon/cvmx-spinlock.h> /* * This spinlock is used internally to ensure that only one core is * performing certain L2 operations at a time. * * NOTE: This only protects calls from within a single application - * if multiple applications or operating systems are running, then it * is up to the user program to coordinate between them. */ cvmx_spinlock_t cvmx_l2c_spinlock; int cvmx_l2c_get_core_way_partition(uint32_t core) { uint32_t field; /* Validate the core number */ if (core >= cvmx_octeon_num_cores()) return -1; if (OCTEON_IS_MODEL(OCTEON_CN63XX)) return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff; /* * Use the lower two bits of the coreNumber to determine the * bit offset of the UMSK[] field in the L2C_SPAR register. */ field = (core & 0x3) * 8; /* * Return the UMSK[] field from the appropriate L2C_SPAR * register based on the coreNumber. */ switch (core & 0xC) { case 0x0: return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field; case 0x4: return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field; case 0x8: return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field; case 0xC: return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field; } return 0; } int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask) { uint32_t field; uint32_t valid_mask; valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1; mask &= valid_mask; /* A UMSK setting which blocks all L2C Ways is an error on some chips */ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX)) return -1; /* Validate the core number */ if (core >= cvmx_octeon_num_cores()) return -1; if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask); return 0; } /* * Use the lower two bits of core to determine the bit offset of the * UMSK[] field in the L2C_SPAR register. */ field = (core & 0x3) * 8; /* * Assign the new mask setting to the UMSK[] field in the appropriate * L2C_SPAR register based on the core_num. * */ switch (core & 0xC) { case 0x0: cvmx_write_csr(CVMX_L2C_SPAR0, (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) | mask << field); break; case 0x4: cvmx_write_csr(CVMX_L2C_SPAR1, (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) | mask << field); break; case 0x8: cvmx_write_csr(CVMX_L2C_SPAR2, (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) | mask << field); break; case 0xC: cvmx_write_csr(CVMX_L2C_SPAR3, (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) | mask << field); break; } return 0; } int cvmx_l2c_set_hw_way_partition(uint32_t mask) { uint32_t valid_mask; valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1; mask &= valid_mask; /* A UMSK setting which blocks all L2C Ways is an error on some chips */ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX)) return -1; if (OCTEON_IS_MODEL(OCTEON_CN63XX)) cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask); else cvmx_write_csr(CVMX_L2C_SPAR4, (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask); return 0; } int cvmx_l2c_get_hw_way_partition(void) { if (OCTEON_IS_MODEL(OCTEON_CN63XX)) return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff; else return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF); } void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read) { if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) { union cvmx_l2c_pfctl pfctl; pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL); switch (counter) { case 0: pfctl.s.cnt0sel = event; pfctl.s.cnt0ena = 1; pfctl.s.cnt0rdclr = clear_on_read; break; case 1: pfctl.s.cnt1sel = event; pfctl.s.cnt1ena = 1; pfctl.s.cnt1rdclr = clear_on_read; break; case 2: pfctl.s.cnt2sel = event; pfctl.s.cnt2ena = 1; pfctl.s.cnt2rdclr = clear_on_read; break; case 3: default: pfctl.s.cnt3sel = event; pfctl.s.cnt3ena = 1; pfctl.s.cnt3rdclr = clear_on_read; break; } cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64); } else { union cvmx_l2c_tadx_prf l2c_tadx_prf; int tad; cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n"); if (clear_on_read) cvmx_dprintf("L2C counters don't support clear on read for this chip\n"); l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0)); switch (counter) { case 0: l2c_tadx_prf.s.cnt0sel = event; break; case 1: l2c_tadx_prf.s.cnt1sel = event; break; case 2: l2c_tadx_prf.s.cnt2sel = event; break; default: case 3: l2c_tadx_prf.s.cnt3sel = event; break; } for (tad = 0; tad < CVMX_L2C_TADS; tad++) cvmx_write_csr(CVMX_L2C_TADX_PRF(tad), l2c_tadx_prf.u64); } } uint64_t cvmx_l2c_read_perf(uint32_t counter) { switch (counter) { case 0: if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) return cvmx_read_csr(CVMX_L2C_PFC0); else { uint64_t counter = 0; int tad; for (tad = 0; tad < CVMX_L2C_TADS; tad++) counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad)); return counter; } case 1: if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) return cvmx_read_csr(CVMX_L2C_PFC1); else { uint64_t counter = 0; int tad; for (tad = 0; tad < CVMX_L2C_TADS; tad++) counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad)); return counter; } case 2: if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) return cvmx_read_csr(CVMX_L2C_PFC2); else { uint64_t counter = 0; int tad; for (tad = 0; tad < CVMX_L2C_TADS; tad++) counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad)); return counter; } case 3: default: if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) return cvmx_read_csr(CVMX_L2C_PFC3); else { uint64_t counter = 0; int tad; for (tad = 0; tad < CVMX_L2C_TADS; tad++) counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad)); return counter; } } } /** * @INTERNAL * Helper function use to fault in cache lines for L2 cache locking * * @addr: Address of base of memory region to read into L2 cache * @len: Length (in bytes) of region to fault in */ static void fault_in(uint64_t addr, int len) { char *ptr; /* * Adjust addr and length so we get all cache lines even for * small ranges spanning two cache lines. */ len += addr & CVMX_CACHE_LINE_MASK; addr &= ~CVMX_CACHE_LINE_MASK; ptr = cvmx_phys_to_ptr(addr); /* * Invalidate L1 cache to make sure all loads result in data * being in L2. */ CVMX_DCACHE_INVALIDATE; while (len > 0) { ACCESS_ONCE(*ptr); len -= CVMX_CACHE_LINE_SIZE; ptr += CVMX_CACHE_LINE_SIZE; } } int cvmx_l2c_lock_line(uint64_t addr) { if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT; uint64_t assoc = cvmx_l2c_get_num_assoc(); uint64_t tag = addr >> shift; uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT); uint64_t way; union cvmx_l2c_tadx_tag l2c_tadx_tag; CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0); /* Make sure we were able to lock the line */ for (way = 0; way < assoc; way++) { CVMX_CACHE_LTGL2I(index | (way << shift), 0); /* make sure CVMX_L2C_TADX_TAG is updated */ CVMX_SYNC; l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0)); if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag) break; } /* Check if a valid line is found */ if (way >= assoc) { /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */ return -1; } /* Check if lock bit is not set */ if (!l2c_tadx_tag.s.lock) { /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */ return -1; } return way; } else { int retval = 0; union cvmx_l2c_dbg l2cdbg; union cvmx_l2c_lckbase lckbase; union cvmx_l2c_lckoff lckoff; union cvmx_l2t_err l2t_err; cvmx_spinlock_lock(&cvmx_l2c_spinlock); l2cdbg.u64 = 0; lckbase.u64 = 0; lckoff.u64 = 0; /* Clear l2t error bits if set */ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); l2t_err.s.lckerr = 1; l2t_err.s.lckerr2 = 1; cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64); addr &= ~CVMX_CACHE_LINE_MASK; /* Set this core as debug core */ l2cdbg.s.ppnum = cvmx_get_core_num(); CVMX_SYNC; cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); cvmx_read_csr(CVMX_L2C_DBG); lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64); cvmx_read_csr(CVMX_L2C_LCKOFF); if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) { int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1; uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS; lckbase.s.lck_base = addr_tmp >> 7; } else { lckbase.s.lck_base = addr >> 7; } lckbase.s.lck_ena = 1; cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); /* Make sure it gets there */ cvmx_read_csr(CVMX_L2C_LCKBASE); fault_in(addr, CVMX_CACHE_LINE_SIZE); lckbase.s.lck_ena = 0; cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); /* Make sure it gets there */ cvmx_read_csr(CVMX_L2C_LCKBASE); /* Stop being debug core */ cvmx_write_csr(CVMX_L2C_DBG, 0); cvmx_read_csr(CVMX_L2C_DBG); l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); if (l2t_err.s.lckerr || l2t_err.s.lckerr2) retval = 1; /* We were unable to lock the line */ cvmx_spinlock_unlock(&cvmx_l2c_spinlock); return retval; } } int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len) { int retval = 0; /* Round start/end to cache line boundaries */ len += start & CVMX_CACHE_LINE_MASK; start &= ~CVMX_CACHE_LINE_MASK; len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK; while (len) { retval += cvmx_l2c_lock_line(start); start += CVMX_CACHE_LINE_SIZE; len -= CVMX_CACHE_LINE_SIZE; } return retval; } void cvmx_l2c_flush(void) { uint64_t assoc, set; uint64_t n_assoc, n_set; n_set = cvmx_l2c_get_num_sets(); n_assoc = cvmx_l2c_get_num_assoc(); if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { uint64_t address; /* These may look like constants, but they aren't... */ int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT; int set_shift = CVMX_L2C_IDX_ADDR_SHIFT; for (set = 0; set < n_set; set++) { for (assoc = 0; assoc < n_assoc; assoc++) { address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (assoc << assoc_shift) | (set << set_shift)); CVMX_CACHE_WBIL2I(address, 0); } } } else { for (set = 0; set < n_set; set++) for (assoc = 0; assoc < n_assoc; assoc++) cvmx_l2c_flush_line(assoc, set); } } int cvmx_l2c_unlock_line(uint64_t address) { if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { int assoc; union cvmx_l2c_tag tag; uint32_t tag_addr; uint32_t index = cvmx_l2c_address_to_index(address); tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); /* * For 63XX, we can flush a line by using the physical * address directly, so finding the cache line used by * the address is only required to provide the proper * return value for the function. */ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { tag = cvmx_l2c_get_tag(assoc, index); if (tag.s.V && (tag.s.addr == tag_addr)) { CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0); return tag.s.L; } } } else { int assoc; union cvmx_l2c_tag tag; uint32_t tag_addr; uint32_t index = cvmx_l2c_address_to_index(address); /* Compute portion of address that is stored in tag */ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { tag = cvmx_l2c_get_tag(assoc, index); if (tag.s.V && (tag.s.addr == tag_addr)) { cvmx_l2c_flush_line(assoc, index); return tag.s.L; } } } return 0; } int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len) { int num_unlocked = 0; /* Round start/end to cache line boundaries */ len += start & CVMX_CACHE_LINE_MASK; start &= ~CVMX_CACHE_LINE_MASK; len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK; while (len > 0) { num_unlocked += cvmx_l2c_unlock_line(start); start += CVMX_CACHE_LINE_SIZE; len -= CVMX_CACHE_LINE_SIZE; } return num_unlocked; } /* * Internal l2c tag types. These are converted to a generic structure * that can be used on all chips. */ union __cvmx_l2c_tag { uint64_t u64; struct cvmx_l2c_tag_cn50xx { uint64_t reserved:40; uint64_t V:1; /* Line valid */ uint64_t D:1; /* Line dirty */ uint64_t L:1; /* Line locked */ uint64_t U:1; /* Use, LRU eviction */ uint64_t addr:20; /* Phys mem addr (33..14) */ } cn50xx; struct cvmx_l2c_tag_cn30xx { uint64_t reserved:41; uint64_t V:1; /* Line valid */ uint64_t D:1; /* Line dirty */ uint64_t L:1; /* Line locked */ uint64_t U:1; /* Use, LRU eviction */ uint64_t addr:19; /* Phys mem addr (33..15) */ } cn30xx; struct cvmx_l2c_tag_cn31xx { uint64_t reserved:42; uint64_t V:1; /* Line valid */ uint64_t D:1; /* Line dirty */ uint64_t L:1; /* Line locked */ uint64_t U:1; /* Use, LRU eviction */ uint64_t addr:18; /* Phys mem addr (33..16) */ } cn31xx; struct cvmx_l2c_tag_cn38xx { uint64_t reserved:43; uint64_t V:1; /* Line valid */ uint64_t D:1; /* Line dirty */ uint64_t L:1; /* Line locked */ uint64_t U:1; /* Use, LRU eviction */ uint64_t addr:17; /* Phys mem addr (33..17) */ } cn38xx; struct cvmx_l2c_tag_cn58xx { uint64_t reserved:44; uint64_t V:1; /* Line valid */ uint64_t D:1; /* Line dirty */ uint64_t L:1; /* Line locked */ uint64_t U:1; /* Use, LRU eviction */ uint64_t addr:16; /* Phys mem addr (33..18) */ } cn58xx; struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */ struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */ }; /** * @INTERNAL * Function to read a L2C tag. This code make the current core * the 'debug core' for the L2. This code must only be executed by * 1 core at a time. * * @assoc: Association (way) of the tag to dump * @index: Index of the cacheline * * Returns The Octeon model specific tag structure. This is * translated by a wrapper function to a generic form that is * easier for applications to use. */ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index) { uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96); uint64_t core = cvmx_get_core_num(); union __cvmx_l2c_tag tag_val; uint64_t dbg_addr = CVMX_L2C_DBG; unsigned long flags; union cvmx_l2c_dbg debug_val; debug_val.u64 = 0; /* * For low core count parts, the core number is always small * enough to stay in the correct field and not set any * reserved bits. */ debug_val.s.ppnum = core; debug_val.s.l2t = 1; debug_val.s.set = assoc; local_irq_save(flags); /* * Make sure core is quiet (no prefetches, etc.) before * entering debug mode. */ CVMX_SYNC; /* Flush L1 to make sure debug load misses L1 */ CVMX_DCACHE_INVALIDATE; /* * The following must be done in assembly as when in debug * mode all data loads from L2 return special debug data, not * normal memory contents. Also, interrupts must be disabled, * since if an interrupt occurs while in debug mode the ISR * will get debug data from all its memory * reads instead of * the contents of memory. */ asm volatile ( ".set push\n\t" ".set mips64\n\t" ".set noreorder\n\t" "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */ "ld $0, 0(%[dbg_addr])\n\t" "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */ "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */ "ld $0, 0(%[dbg_addr])\n\t" "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */ ".set pop" : [tag_val] "=r" (tag_val) : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr) : "memory"); local_irq_restore(flags); return tag_val; } union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index) { union cvmx_l2c_tag tag; tag.u64 = 0; if ((int)association >= cvmx_l2c_get_num_assoc()) { cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n"); return tag; } if ((int)index >= cvmx_l2c_get_num_sets()) { cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n", (int)index, cvmx_l2c_get_num_sets()); return tag; } if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { union cvmx_l2c_tadx_tag l2c_tadx_tag; uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) | (index << CVMX_L2C_IDX_ADDR_SHIFT)); /* * Use L2 cache Index load tag cache instruction, as * hardware loads the virtual tag for the L2 cache * block with the contents of L2C_TAD0_TAG * register. */ CVMX_CACHE_LTGL2I(address, 0); CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0)); tag.s.V = l2c_tadx_tag.s.valid; tag.s.D = l2c_tadx_tag.s.dirty; tag.s.L = l2c_tadx_tag.s.lock; tag.s.U = l2c_tadx_tag.s.use; tag.s.addr = l2c_tadx_tag.s.tag; } else { union __cvmx_l2c_tag tmp_tag; /* __read_l2_tag is intended for internal use only */ tmp_tag = __read_l2_tag(association, index); /* * Convert all tag structure types to generic version, * as it can represent all models. */ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { tag.s.V = tmp_tag.cn58xx.V; tag.s.D = tmp_tag.cn58xx.D; tag.s.L = tmp_tag.cn58xx.L; tag.s.U = tmp_tag.cn58xx.U; tag.s.addr = tmp_tag.cn58xx.addr; } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { tag.s.V = tmp_tag.cn38xx.V; tag.s.D = tmp_tag.cn38xx.D; tag.s.L = tmp_tag.cn38xx.L; tag.s.U = tmp_tag.cn38xx.U; tag.s.addr = tmp_tag.cn38xx.addr; } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { tag.s.V = tmp_tag.cn31xx.V; tag.s.D = tmp_tag.cn31xx.D; tag.s.L = tmp_tag.cn31xx.L; tag.s.U = tmp_tag.cn31xx.U; tag.s.addr = tmp_tag.cn31xx.addr; } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) { tag.s.V = tmp_tag.cn30xx.V; tag.s.D = tmp_tag.cn30xx.D; tag.s.L = tmp_tag.cn30xx.L; tag.s.U = tmp_tag.cn30xx.U; tag.s.addr = tmp_tag.cn30xx.addr; } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { tag.s.V = tmp_tag.cn50xx.V; tag.s.D = tmp_tag.cn50xx.D; tag.s.L = tmp_tag.cn50xx.L; tag.s.U = tmp_tag.cn50xx.U; tag.s.addr = tmp_tag.cn50xx.addr; } else { cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__); } } return tag; } uint32_t cvmx_l2c_address_to_index(uint64_t addr) { uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT; int indxalias = 0; if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { union cvmx_l2c_ctl l2c_ctl; l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL); indxalias = !l2c_ctl.s.disidxalias; } else { union cvmx_l2c_cfg l2c_cfg; l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG); indxalias = l2c_cfg.s.idxalias; } if (indxalias) { if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7; idx ^= idx / cvmx_l2c_get_num_sets(); idx ^= a_14_12; } else { idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT); } } idx &= CVMX_L2C_IDX_MASK; return idx; } int cvmx_l2c_get_cache_size_bytes(void) { return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() * CVMX_CACHE_LINE_SIZE; } /** * Return log base 2 of the number of sets in the L2 cache * Returns */ int cvmx_l2c_get_set_bits(void) { int l2_set_bits; if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) l2_set_bits = 11; /* 2048 sets */ else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)) l2_set_bits = 10; /* 1024 sets */ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) l2_set_bits = 9; /* 512 sets */ else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) l2_set_bits = 8; /* 256 sets */ else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) l2_set_bits = 7; /* 128 sets */ else { cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__); l2_set_bits = 11; /* 2048 sets */ } return l2_set_bits; } /* Return the number of sets in the L2 Cache */ int cvmx_l2c_get_num_sets(void) { return 1 << cvmx_l2c_get_set_bits(); } /* Return the number of associations in the L2 Cache */ int cvmx_l2c_get_num_assoc(void) { int l2_assoc; if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) l2_assoc = 8; else if (OCTEON_IS_MODEL(OCTEON_CN63XX)) l2_assoc = 16; else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)) l2_assoc = 4; else { cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__); l2_assoc = 8; } /* Check to see if part of the cache is disabled */ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { union cvmx_mio_fus_dat3 mio_fus_dat3; mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3); /* * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows * <2> will be not used for 63xx * <1> disables 1/2 ways * <0> disables 1/4 ways * They are cumulative, so for 63xx: * <1> <0> * 0 0 16-way 2MB cache * 0 1 12-way 1.5MB cache * 1 0 8-way 1MB cache * 1 1 4-way 512KB cache */ if (mio_fus_dat3.s.l2c_crip == 3) l2_assoc = 4; else if (mio_fus_dat3.s.l2c_crip == 2) l2_assoc = 8; else if (mio_fus_dat3.s.l2c_crip == 1) l2_assoc = 12; } else { union cvmx_l2d_fus3 val; val.u64 = cvmx_read_csr(CVMX_L2D_FUS3); /* * Using shifts here, as bit position names are * different for each model but they all mean the * same. */ if ((val.u64 >> 35) & 0x1) l2_assoc = l2_assoc >> 2; else if ((val.u64 >> 34) & 0x1) l2_assoc = l2_assoc >> 1; } return l2_assoc; } /** * Flush a line from the L2 cache * This should only be called from one core at a time, as this routine * sets the core to the 'debug' core in order to flush the line. * * @assoc: Association (or way) to flush * @index: Index to flush */ void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index) { /* Check the range of the index. */ if (index > (uint32_t)cvmx_l2c_get_num_sets()) { cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n"); return; } /* Check the range of association. */ if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) { cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n"); return; } if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { uint64_t address; /* Create the address based on index and association. * Bits<20:17> select the way of the cache block involved in * the operation * Bits<16:7> of the effect address select the index */ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) | (index << CVMX_L2C_IDX_ADDR_SHIFT)); CVMX_CACHE_WBIL2I(address, 0); } else { union cvmx_l2c_dbg l2cdbg; l2cdbg.u64 = 0; if (!OCTEON_IS_MODEL(OCTEON_CN30XX)) l2cdbg.s.ppnum = cvmx_get_core_num(); l2cdbg.s.finv = 1; l2cdbg.s.set = assoc; cvmx_spinlock_lock(&cvmx_l2c_spinlock); /* * Enter debug mode, and make sure all other writes * complete before we enter debug mode */ CVMX_SYNC; cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); cvmx_read_csr(CVMX_L2C_DBG); CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, index * CVMX_CACHE_LINE_SIZE), 0); /* Exit debug mode */ CVMX_SYNC; cvmx_write_csr(CVMX_L2C_DBG, 0); cvmx_read_csr(CVMX_L2C_DBG); cvmx_spinlock_unlock(&cvmx_l2c_spinlock); } }
gpl-2.0
defconoi/L-Kernel-Mako
drivers/staging/iio/industrialio-event.c
4235
11499
/* Industrial I/O event handling * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Based on elements of hwmon and input subsystems. */ #include <linux/anon_inodes.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/kfifo.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/wait.h> #include "iio.h" #include "iio_core.h" #include "sysfs.h" #include "events.h" /** * struct iio_event_interface - chrdev interface for an event line * @wait: wait queue to allow blocking reads of events * @det_events: list of detected events * @dev_attr_list: list of event interface sysfs attribute * @flags: file operations related flags including busy flag. * @group: event interface sysfs attribute group */ struct iio_event_interface { wait_queue_head_t wait; DECLARE_KFIFO(det_events, struct iio_event_data, 16); struct list_head dev_attr_list; unsigned long flags; struct attribute_group group; }; int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) { struct iio_event_interface *ev_int = indio_dev->event_interface; struct iio_event_data ev; int copied; /* Does anyone care? */ spin_lock(&ev_int->wait.lock); if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { ev.id = ev_code; ev.timestamp = timestamp; copied = kfifo_put(&ev_int->det_events, &ev); if (copied != 0) wake_up_locked_poll(&ev_int->wait, POLLIN); } spin_unlock(&ev_int->wait.lock); return 0; } EXPORT_SYMBOL(iio_push_event); /** * iio_event_poll() - poll the event queue to find out if it has data */ static unsigned int iio_event_poll(struct file *filep, struct poll_table_struct *wait) { struct iio_event_interface *ev_int = filep->private_data; unsigned int events = 0; poll_wait(filep, &ev_int->wait, wait); spin_lock(&ev_int->wait.lock); if (!kfifo_is_empty(&ev_int->det_events)) events = POLLIN | POLLRDNORM; spin_unlock(&ev_int->wait.lock); return events; } static ssize_t iio_event_chrdev_read(struct file *filep, char __user *buf, size_t count, loff_t *f_ps) { struct iio_event_interface *ev_int = filep->private_data; unsigned int copied; int ret; if (count < sizeof(struct iio_event_data)) return -EINVAL; spin_lock(&ev_int->wait.lock); if (kfifo_is_empty(&ev_int->det_events)) { if (filep->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto error_unlock; } /* Blocking on device; waiting for something to be there */ ret = wait_event_interruptible_locked(ev_int->wait, !kfifo_is_empty(&ev_int->det_events)); if (ret) goto error_unlock; /* Single access device so no one else can get the data */ } ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); error_unlock: spin_unlock(&ev_int->wait.lock); return ret ? ret : copied; } static int iio_event_chrdev_release(struct inode *inode, struct file *filep) { struct iio_event_interface *ev_int = filep->private_data; spin_lock(&ev_int->wait.lock); __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); /* * In order to maintain a clean state for reopening, * clear out any awaiting events. The mask will prevent * any new __iio_push_event calls running. */ kfifo_reset_out(&ev_int->det_events); spin_unlock(&ev_int->wait.lock); return 0; } static const struct file_operations iio_event_chrdev_fileops = { .read = iio_event_chrdev_read, .poll = iio_event_poll, .release = iio_event_chrdev_release, .owner = THIS_MODULE, .llseek = noop_llseek, }; int iio_event_getfd(struct iio_dev *indio_dev) { struct iio_event_interface *ev_int = indio_dev->event_interface; int fd; if (ev_int == NULL) return -ENODEV; spin_lock(&ev_int->wait.lock); if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { spin_unlock(&ev_int->wait.lock); return -EBUSY; } spin_unlock(&ev_int->wait.lock); fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, ev_int, O_RDONLY); if (fd < 0) { spin_lock(&ev_int->wait.lock); __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); spin_unlock(&ev_int->wait.lock); } return fd; } static const char * const iio_ev_type_text[] = { [IIO_EV_TYPE_THRESH] = "thresh", [IIO_EV_TYPE_MAG] = "mag", [IIO_EV_TYPE_ROC] = "roc", [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", }; static const char * const iio_ev_dir_text[] = { [IIO_EV_DIR_EITHER] = "either", [IIO_EV_DIR_RISING] = "rising", [IIO_EV_DIR_FALLING] = "falling" }; static ssize_t iio_ev_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; bool val; ret = strtobool(buf, &val); if (ret < 0) return ret; ret = indio_dev->info->write_event_config(indio_dev, this_attr->address, val); return (ret < 0) ? ret : len; } static ssize_t iio_ev_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val = indio_dev->info->read_event_config(indio_dev, this_attr->address); if (val < 0) return val; else return sprintf(buf, "%d\n", val); } static ssize_t iio_ev_value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val, ret; ret = indio_dev->info->read_event_value(indio_dev, this_attr->address, &val); if (ret < 0) return ret; return sprintf(buf, "%d\n", val); } static ssize_t iio_ev_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); unsigned long val; int ret; if (!indio_dev->info->write_event_value) return -EINVAL; ret = strict_strtoul(buf, 10, &val); if (ret) return ret; ret = indio_dev->info->write_event_value(indio_dev, this_attr->address, val); if (ret < 0) return ret; return len; } static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { int ret = 0, i, attrcount = 0; u64 mask = 0; char *postfix; if (!chan->event_mask) return 0; for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) { postfix = kasprintf(GFP_KERNEL, "%s_%s_en", iio_ev_type_text[i/IIO_EV_DIR_MAX], iio_ev_dir_text[i%IIO_EV_DIR_MAX]); if (postfix == NULL) { ret = -ENOMEM; goto error_ret; } if (chan->modified) mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel, i/IIO_EV_DIR_MAX, i%IIO_EV_DIR_MAX); else if (chan->differential) mask = IIO_EVENT_CODE(chan->type, 0, 0, i%IIO_EV_DIR_MAX, i/IIO_EV_DIR_MAX, 0, chan->channel, chan->channel2); else mask = IIO_UNMOD_EVENT_CODE(chan->type, chan->channel, i/IIO_EV_DIR_MAX, i%IIO_EV_DIR_MAX); ret = __iio_add_chan_devattr(postfix, chan, &iio_ev_state_show, iio_ev_state_store, mask, 0, &indio_dev->dev, &indio_dev->event_interface-> dev_attr_list); kfree(postfix); if (ret) goto error_ret; attrcount++; postfix = kasprintf(GFP_KERNEL, "%s_%s_value", iio_ev_type_text[i/IIO_EV_DIR_MAX], iio_ev_dir_text[i%IIO_EV_DIR_MAX]); if (postfix == NULL) { ret = -ENOMEM; goto error_ret; } ret = __iio_add_chan_devattr(postfix, chan, iio_ev_value_show, iio_ev_value_store, mask, 0, &indio_dev->dev, &indio_dev->event_interface-> dev_attr_list); kfree(postfix); if (ret) goto error_ret; attrcount++; } ret = attrcount; error_ret: return ret; } static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev) { struct iio_dev_attr *p, *n; list_for_each_entry_safe(p, n, &indio_dev->event_interface-> dev_attr_list, l) { kfree(p->dev_attr.attr.name); kfree(p); } } static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) { int j, ret, attrcount = 0; INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); /* Dynically created from the channels array */ for (j = 0; j < indio_dev->num_channels; j++) { ret = iio_device_add_event_sysfs(indio_dev, &indio_dev->channels[j]); if (ret < 0) goto error_clear_attrs; attrcount += ret; } return attrcount; error_clear_attrs: __iio_remove_event_config_attrs(indio_dev); return ret; } static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) { int j; for (j = 0; j < indio_dev->num_channels; j++) if (indio_dev->channels[j].event_mask != 0) return true; return false; } static void iio_setup_ev_int(struct iio_event_interface *ev_int) { INIT_KFIFO(ev_int->det_events); init_waitqueue_head(&ev_int->wait); } static const char *iio_event_group_name = "events"; int iio_device_register_eventset(struct iio_dev *indio_dev) { struct iio_dev_attr *p; int ret = 0, attrcount_orig = 0, attrcount, attrn; struct attribute **attr; if (!(indio_dev->info->event_attrs || iio_check_for_dynamic_events(indio_dev))) return 0; indio_dev->event_interface = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); if (indio_dev->event_interface == NULL) { ret = -ENOMEM; goto error_ret; } iio_setup_ev_int(indio_dev->event_interface); if (indio_dev->info->event_attrs != NULL) { attr = indio_dev->info->event_attrs->attrs; while (*attr++ != NULL) attrcount_orig++; } attrcount = attrcount_orig; if (indio_dev->channels) { ret = __iio_add_event_config_attrs(indio_dev); if (ret < 0) goto error_free_setup_event_lines; attrcount += ret; } indio_dev->event_interface->group.name = iio_event_group_name; indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, sizeof(indio_dev->event_interface->group.attrs[0]), GFP_KERNEL); if (indio_dev->event_interface->group.attrs == NULL) { ret = -ENOMEM; goto error_free_setup_event_lines; } if (indio_dev->info->event_attrs) memcpy(indio_dev->event_interface->group.attrs, indio_dev->info->event_attrs->attrs, sizeof(indio_dev->event_interface->group.attrs[0]) *attrcount_orig); attrn = attrcount_orig; /* Add all elements from the list. */ list_for_each_entry(p, &indio_dev->event_interface->dev_attr_list, l) indio_dev->event_interface->group.attrs[attrn++] = &p->dev_attr.attr; indio_dev->groups[indio_dev->groupcounter++] = &indio_dev->event_interface->group; return 0; error_free_setup_event_lines: __iio_remove_event_config_attrs(indio_dev); kfree(indio_dev->event_interface); error_ret: return ret; } void iio_device_unregister_eventset(struct iio_dev *indio_dev) { if (indio_dev->event_interface == NULL) return; __iio_remove_event_config_attrs(indio_dev); kfree(indio_dev->event_interface->group.attrs); kfree(indio_dev->event_interface); }
gpl-2.0
PerLycke/android_kernel_moto_shamu
arch/arm/mach-footbridge/cats-pci.c
4747
1397
/* * linux/arch/arm/mach-footbridge/cats-pci.c * * PCI bios-type initialisation for PCI machines * * Bits taken from various places. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> /* cats host-specific stuff */ static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin) { return 0; } static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->irq >= 255) return -1; /* not a valid interrupt. */ if (dev->irq >= 128) return dev->irq & 0x1f; if (dev->irq >= 1 && dev->irq <= 4) return irqmap_cats[dev->irq - 1]; if (dev->irq != 0) printk("PCI: device %02x:%02x has unknown irq line %x\n", dev->bus->number, dev->devfn, dev->irq); return -1; } /* * why not the standard PCI swizzle? does this prevent 4-port tulip * cards being used (ie, pci-pci bridge based cards)? */ static struct hw_pci cats_pci __initdata = { .swizzle = cats_no_swizzle, .map_irq = cats_map_irq, .nr_controllers = 1, .ops = &dc21285_ops, .setup = dc21285_setup, .preinit = dc21285_preinit, .postinit = dc21285_postinit, }; static int __init cats_pci_init(void) { if (machine_is_cats()) pci_common_init(&cats_pci); return 0; } subsys_initcall(cats_pci_init);
gpl-2.0
javilonas/Hammerhead
drivers/pcmcia/cistpl.c
11659
35786
/* * cistpl.c -- 16-bit PCMCIA Card Information Structure parser * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/io.h> #include <asm/byteorder.h> #include <asm/unaligned.h> #include <pcmcia/ss.h> #include <pcmcia/cisreg.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" static const u_char mantissa[] = { 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80, 90 }; static const u_int exponent[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000 }; /* Convert an extended speed byte to a time in nanoseconds */ #define SPEED_CVT(v) \ (mantissa[(((v)>>3)&15)-1] * exponent[(v)&7] / 10) /* Convert a power byte to a current in 0.1 microamps */ #define POWER_CVT(v) \ (mantissa[((v)>>3)&15] * exponent[(v)&7] / 10) #define POWER_SCALE(v) (exponent[(v)&7]) /* Upper limit on reasonable # of tuples */ #define MAX_TUPLES 200 /* Bits in IRQInfo1 field */ #define IRQ_INFO2_VALID 0x10 /* 16-bit CIS? */ static int cis_width; module_param(cis_width, int, 0444); void release_cis_mem(struct pcmcia_socket *s) { mutex_lock(&s->ops_mutex); if (s->cis_mem.flags & MAP_ACTIVE) { s->cis_mem.flags &= ~MAP_ACTIVE; s->ops->set_mem_map(s, &s->cis_mem); if (s->cis_mem.res) { release_resource(s->cis_mem.res); kfree(s->cis_mem.res); s->cis_mem.res = NULL; } iounmap(s->cis_virt); s->cis_virt = NULL; } mutex_unlock(&s->ops_mutex); } /** * set_cis_map() - map the card memory at "card_offset" into virtual space. * * If flags & MAP_ATTRIB, map the attribute space, otherwise * map the memory space. * * Must be called with ops_mutex held. */ static void __iomem *set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags) { pccard_mem_map *mem = &s->cis_mem; int ret; if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) { mem->res = pcmcia_find_mem_region(0, s->map_size, s->map_size, 0, s); if (mem->res == NULL) { dev_printk(KERN_NOTICE, &s->dev, "cs: unable to map card memory!\n"); return NULL; } s->cis_virt = NULL; } if (!(s->features & SS_CAP_STATIC_MAP) && (!s->cis_virt)) s->cis_virt = ioremap(mem->res->start, s->map_size); mem->card_start = card_offset; mem->flags = flags; ret = s->ops->set_mem_map(s, mem); if (ret) { iounmap(s->cis_virt); s->cis_virt = NULL; return NULL; } if (s->features & SS_CAP_STATIC_MAP) { if (s->cis_virt) iounmap(s->cis_virt); s->cis_virt = ioremap(mem->static_start, s->map_size); } return s->cis_virt; } /* Bits in attr field */ #define IS_ATTR 1 #define IS_INDIRECT 8 /** * pcmcia_read_cis_mem() - low-level function to read CIS memory * * must be called with ops_mutex held */ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, u_int len, void *ptr) { void __iomem *sys, *end; unsigned char *buf = ptr; dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); if (attr & IS_INDIRECT) { /* Indirect accesses use a bunch of special registers at fixed locations in common memory */ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN; if (attr & IS_ATTR) { addr *= 2; flags = ICTRL0_AUTOINC; } sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); if (!sys) { dev_dbg(&s->dev, "could not map memory\n"); memset(ptr, 0xff, len); return -1; } writeb(flags, sys+CISREG_ICTRL0); writeb(addr & 0xff, sys+CISREG_IADDR0); writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); for ( ; len > 0; len--, buf++) *buf = readb(sys+CISREG_IDATA0); } else { u_int inc = 1, card_offset, flags; if (addr > CISTPL_MAX_CIS_SIZE) dev_dbg(&s->dev, "attempt to read CIS mem at addr %#x", addr); flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0); if (attr) { flags |= MAP_ATTRIB; inc++; addr *= 2; } card_offset = addr & ~(s->map_size-1); while (len) { sys = set_cis_map(s, card_offset, flags); if (!sys) { dev_dbg(&s->dev, "could not map memory\n"); memset(ptr, 0xff, len); return -1; } end = sys + s->map_size; sys = sys + (addr & (s->map_size-1)); for ( ; len > 0; len--, buf++, sys += inc) { if (sys == end) break; *buf = readb(sys); } card_offset += s->map_size; addr = 0; } } dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", *(u_char *)(ptr+0), *(u_char *)(ptr+1), *(u_char *)(ptr+2), *(u_char *)(ptr+3)); return 0; } /** * pcmcia_write_cis_mem() - low-level function to write CIS memory * * Probably only useful for writing one-byte registers. Must be called * with ops_mutex held. */ int pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, u_int len, void *ptr) { void __iomem *sys, *end; unsigned char *buf = ptr; dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); if (attr & IS_INDIRECT) { /* Indirect accesses use a bunch of special registers at fixed locations in common memory */ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN; if (attr & IS_ATTR) { addr *= 2; flags = ICTRL0_AUTOINC; } sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); if (!sys) { dev_dbg(&s->dev, "could not map memory\n"); return -EINVAL; } writeb(flags, sys+CISREG_ICTRL0); writeb(addr & 0xff, sys+CISREG_IADDR0); writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); for ( ; len > 0; len--, buf++) writeb(*buf, sys+CISREG_IDATA0); } else { u_int inc = 1, card_offset, flags; flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0); if (attr & IS_ATTR) { flags |= MAP_ATTRIB; inc++; addr *= 2; } card_offset = addr & ~(s->map_size-1); while (len) { sys = set_cis_map(s, card_offset, flags); if (!sys) { dev_dbg(&s->dev, "could not map memory\n"); return -EINVAL; } end = sys + s->map_size; sys = sys + (addr & (s->map_size-1)); for ( ; len > 0; len--, buf++, sys += inc) { if (sys == end) break; writeb(*buf, sys); } card_offset += s->map_size; addr = 0; } } return 0; } /** * read_cis_cache() - read CIS memory or its associated cache * * This is a wrapper around read_cis_mem, with the same interface, * but which caches information, for cards whose CIS may not be * readable all the time. */ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, size_t len, void *ptr) { struct cis_cache_entry *cis; int ret = 0; if (s->state & SOCKET_CARDBUS) return -EINVAL; mutex_lock(&s->ops_mutex); if (s->fake_cis) { if (s->fake_cis_len >= addr+len) memcpy(ptr, s->fake_cis+addr, len); else { memset(ptr, 0xff, len); ret = -EINVAL; } mutex_unlock(&s->ops_mutex); return ret; } list_for_each_entry(cis, &s->cis_cache, node) { if (cis->addr == addr && cis->len == len && cis->attr == attr) { memcpy(ptr, cis->cache, len); mutex_unlock(&s->ops_mutex); return 0; } } ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr); if (ret == 0) { /* Copy data into the cache */ cis = kmalloc(sizeof(struct cis_cache_entry) + len, GFP_KERNEL); if (cis) { cis->addr = addr; cis->len = len; cis->attr = attr; memcpy(cis->cache, ptr, len); list_add(&cis->node, &s->cis_cache); } } mutex_unlock(&s->ops_mutex); return ret; } static void remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len) { struct cis_cache_entry *cis; mutex_lock(&s->ops_mutex); list_for_each_entry(cis, &s->cis_cache, node) if (cis->addr == addr && cis->len == len && cis->attr == attr) { list_del(&cis->node); kfree(cis); break; } mutex_unlock(&s->ops_mutex); } /** * destroy_cis_cache() - destroy the CIS cache * @s: pcmcia_socket for which CIS cache shall be destroyed * * This destroys the CIS cache but keeps any fake CIS alive. Must be * called with ops_mutex held. */ void destroy_cis_cache(struct pcmcia_socket *s) { struct list_head *l, *n; struct cis_cache_entry *cis; list_for_each_safe(l, n, &s->cis_cache) { cis = list_entry(l, struct cis_cache_entry, node); list_del(&cis->node); kfree(cis); } } /** * verify_cis_cache() - does the CIS match what is in the CIS cache? */ int verify_cis_cache(struct pcmcia_socket *s) { struct cis_cache_entry *cis; char *buf; int ret; if (s->state & SOCKET_CARDBUS) return -EINVAL; buf = kmalloc(256, GFP_KERNEL); if (buf == NULL) { dev_printk(KERN_WARNING, &s->dev, "no memory for verifying CIS\n"); return -ENOMEM; } mutex_lock(&s->ops_mutex); list_for_each_entry(cis, &s->cis_cache, node) { int len = cis->len; if (len > 256) len = 256; ret = pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf); if (ret || memcmp(buf, cis->cache, len) != 0) { kfree(buf); mutex_unlock(&s->ops_mutex); return -1; } } kfree(buf); mutex_unlock(&s->ops_mutex); return 0; } /** * pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS * * For really bad cards, we provide a facility for uploading a * replacement CIS. */ int pcmcia_replace_cis(struct pcmcia_socket *s, const u8 *data, const size_t len) { if (len > CISTPL_MAX_CIS_SIZE) { dev_printk(KERN_WARNING, &s->dev, "replacement CIS too big\n"); return -EINVAL; } mutex_lock(&s->ops_mutex); kfree(s->fake_cis); s->fake_cis = kmalloc(len, GFP_KERNEL); if (s->fake_cis == NULL) { dev_printk(KERN_WARNING, &s->dev, "no memory to replace CIS\n"); mutex_unlock(&s->ops_mutex); return -ENOMEM; } s->fake_cis_len = len; memcpy(s->fake_cis, data, len); dev_info(&s->dev, "Using replacement CIS\n"); mutex_unlock(&s->ops_mutex); return 0; } /* The high-level CIS tuple services */ typedef struct tuple_flags { u_int link_space:4; u_int has_link:1; u_int mfc_fn:3; u_int space:4; } tuple_flags; #define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space) #define HAS_LINK(f) (((tuple_flags *)(&(f)))->has_link) #define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn) #define SPACE(f) (((tuple_flags *)(&(f)))->space) int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) { if (!s) return -EINVAL; if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) return -ENODEV; tuple->TupleLink = tuple->Flags = 0; /* Assume presence of a LONGLINK_C to address 0 */ tuple->CISOffset = tuple->LinkOffset = 0; SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1; if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) { cisdata_t req = tuple->DesiredTuple; tuple->DesiredTuple = CISTPL_LONGLINK_MFC; if (pccard_get_next_tuple(s, function, tuple) == 0) { tuple->DesiredTuple = CISTPL_LINKTARGET; if (pccard_get_next_tuple(s, function, tuple) != 0) return -ENOSPC; } else tuple->CISOffset = tuple->TupleLink = 0; tuple->DesiredTuple = req; } return pccard_get_next_tuple(s, function, tuple); } static int follow_link(struct pcmcia_socket *s, tuple_t *tuple) { u_char link[5]; u_int ofs; int ret; if (MFC_FN(tuple->Flags)) { /* Get indirect link from the MFC tuple */ ret = read_cis_cache(s, LINK_SPACE(tuple->Flags), tuple->LinkOffset, 5, link); if (ret) return -1; ofs = get_unaligned_le32(link + 1); SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); /* Move to the next indirect link */ tuple->LinkOffset += 5; MFC_FN(tuple->Flags)--; } else if (HAS_LINK(tuple->Flags)) { ofs = tuple->LinkOffset; SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags); HAS_LINK(tuple->Flags) = 0; } else return -1; if (SPACE(tuple->Flags)) { /* This is ugly, but a common CIS error is to code the long link offset incorrectly, so we check the right spot... */ ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); if (ret) return -1; if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && (strncmp(link+2, "CIS", 3) == 0)) return ofs; remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5); /* Then, we try the wrong spot... */ ofs = ofs >> 1; } ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); if (ret) return -1; if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && (strncmp(link+2, "CIS", 3) == 0)) return ofs; remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5); return -1; } int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) { u_char link[2], tmp; int ofs, i, attr; int ret; if (!s) return -EINVAL; if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) return -ENODEV; link[1] = tuple->TupleLink; ofs = tuple->CISOffset + tuple->TupleLink; attr = SPACE(tuple->Flags); for (i = 0; i < MAX_TUPLES; i++) { if (link[1] == 0xff) link[0] = CISTPL_END; else { ret = read_cis_cache(s, attr, ofs, 2, link); if (ret) return -1; if (link[0] == CISTPL_NULL) { ofs++; continue; } } /* End of chain? Follow long link if possible */ if (link[0] == CISTPL_END) { ofs = follow_link(s, tuple); if (ofs < 0) return -ENOSPC; attr = SPACE(tuple->Flags); ret = read_cis_cache(s, attr, ofs, 2, link); if (ret) return -1; } /* Is this a link tuple? Make a note of it */ if ((link[0] == CISTPL_LONGLINK_A) || (link[0] == CISTPL_LONGLINK_C) || (link[0] == CISTPL_LONGLINK_MFC) || (link[0] == CISTPL_LINKTARGET) || (link[0] == CISTPL_INDIRECT) || (link[0] == CISTPL_NO_LINK)) { switch (link[0]) { case CISTPL_LONGLINK_A: HAS_LINK(tuple->Flags) = 1; LINK_SPACE(tuple->Flags) = attr | IS_ATTR; ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); if (ret) return -1; break; case CISTPL_LONGLINK_C: HAS_LINK(tuple->Flags) = 1; LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR; ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); if (ret) return -1; break; case CISTPL_INDIRECT: HAS_LINK(tuple->Flags) = 1; LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT; tuple->LinkOffset = 0; break; case CISTPL_LONGLINK_MFC: tuple->LinkOffset = ofs + 3; LINK_SPACE(tuple->Flags) = attr; if (function == BIND_FN_ALL) { /* Follow all the MFC links */ ret = read_cis_cache(s, attr, ofs+2, 1, &tmp); if (ret) return -1; MFC_FN(tuple->Flags) = tmp; } else { /* Follow exactly one of the links */ MFC_FN(tuple->Flags) = 1; tuple->LinkOffset += function * 5; } break; case CISTPL_NO_LINK: HAS_LINK(tuple->Flags) = 0; break; } if ((tuple->Attributes & TUPLE_RETURN_LINK) && (tuple->DesiredTuple == RETURN_FIRST_TUPLE)) break; } else if (tuple->DesiredTuple == RETURN_FIRST_TUPLE) break; if (link[0] == tuple->DesiredTuple) break; ofs += link[1] + 2; } if (i == MAX_TUPLES) { dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n"); return -ENOSPC; } tuple->TupleCode = link[0]; tuple->TupleLink = link[1]; tuple->CISOffset = ofs + 2; return 0; } int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple) { u_int len; int ret; if (!s) return -EINVAL; if (tuple->TupleLink < tuple->TupleOffset) return -ENOSPC; len = tuple->TupleLink - tuple->TupleOffset; tuple->TupleDataLen = tuple->TupleLink; if (len == 0) return 0; ret = read_cis_cache(s, SPACE(tuple->Flags), tuple->CISOffset + tuple->TupleOffset, min(len, (u_int) tuple->TupleDataMax), tuple->TupleData); if (ret) return -1; return 0; } /* Parsing routines for individual tuples */ static int parse_device(tuple_t *tuple, cistpl_device_t *device) { int i; u_char scale; u_char *p, *q; p = (u_char *)tuple->TupleData; q = p + tuple->TupleDataLen; device->ndev = 0; for (i = 0; i < CISTPL_MAX_DEVICES; i++) { if (*p == 0xff) break; device->dev[i].type = (*p >> 4); device->dev[i].wp = (*p & 0x08) ? 1 : 0; switch (*p & 0x07) { case 0: device->dev[i].speed = 0; break; case 1: device->dev[i].speed = 250; break; case 2: device->dev[i].speed = 200; break; case 3: device->dev[i].speed = 150; break; case 4: device->dev[i].speed = 100; break; case 7: if (++p == q) return -EINVAL; device->dev[i].speed = SPEED_CVT(*p); while (*p & 0x80) if (++p == q) return -EINVAL; break; default: return -EINVAL; } if (++p == q) return -EINVAL; if (*p == 0xff) break; scale = *p & 7; if (scale == 7) return -EINVAL; device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2)); device->ndev++; if (++p == q) break; } return 0; } static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum) { u_char *p; if (tuple->TupleDataLen < 5) return -EINVAL; p = (u_char *) tuple->TupleData; csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2; csum->len = get_unaligned_le16(p + 2); csum->sum = *(p + 4); return 0; } static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link) { if (tuple->TupleDataLen < 4) return -EINVAL; link->addr = get_unaligned_le32(tuple->TupleData); return 0; } static int parse_longlink_mfc(tuple_t *tuple, cistpl_longlink_mfc_t *link) { u_char *p; int i; p = (u_char *)tuple->TupleData; link->nfn = *p; p++; if (tuple->TupleDataLen <= link->nfn*5) return -EINVAL; for (i = 0; i < link->nfn; i++) { link->fn[i].space = *p; p++; link->fn[i].addr = get_unaligned_le32(p); p += 4; } return 0; } static int parse_strings(u_char *p, u_char *q, int max, char *s, u_char *ofs, u_char *found) { int i, j, ns; if (p == q) return -EINVAL; ns = 0; j = 0; for (i = 0; i < max; i++) { if (*p == 0xff) break; ofs[i] = j; ns++; for (;;) { s[j++] = (*p == 0xff) ? '\0' : *p; if ((*p == '\0') || (*p == 0xff)) break; if (++p == q) return -EINVAL; } if ((*p == 0xff) || (++p == q)) break; } if (found) { *found = ns; return 0; } return (ns == max) ? 0 : -EINVAL; } static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1) { u_char *p, *q; p = (u_char *)tuple->TupleData; q = p + tuple->TupleDataLen; vers_1->major = *p; p++; vers_1->minor = *p; p++; if (p >= q) return -EINVAL; return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS, vers_1->str, vers_1->ofs, &vers_1->ns); } static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr) { u_char *p, *q; p = (u_char *)tuple->TupleData; q = p + tuple->TupleDataLen; return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS, altstr->str, altstr->ofs, &altstr->ns); } static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec) { u_char *p, *q; int nid; p = (u_char *)tuple->TupleData; q = p + tuple->TupleDataLen; for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) { if (p > q-2) break; jedec->id[nid].mfr = p[0]; jedec->id[nid].info = p[1]; p += 2; } jedec->nid = nid; return 0; } static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m) { if (tuple->TupleDataLen < 4) return -EINVAL; m->manf = get_unaligned_le16(tuple->TupleData); m->card = get_unaligned_le16(tuple->TupleData + 2); return 0; } static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f) { u_char *p; if (tuple->TupleDataLen < 2) return -EINVAL; p = (u_char *)tuple->TupleData; f->func = p[0]; f->sysinit = p[1]; return 0; } static int parse_funce(tuple_t *tuple, cistpl_funce_t *f) { u_char *p; int i; if (tuple->TupleDataLen < 1) return -EINVAL; p = (u_char *)tuple->TupleData; f->type = p[0]; for (i = 1; i < tuple->TupleDataLen; i++) f->data[i-1] = p[i]; return 0; } static int parse_config(tuple_t *tuple, cistpl_config_t *config) { int rasz, rmsz, i; u_char *p; p = (u_char *)tuple->TupleData; rasz = *p & 0x03; rmsz = (*p & 0x3c) >> 2; if (tuple->TupleDataLen < rasz+rmsz+4) return -EINVAL; config->last_idx = *(++p); p++; config->base = 0; for (i = 0; i <= rasz; i++) config->base += p[i] << (8*i); p += rasz+1; for (i = 0; i < 4; i++) config->rmask[i] = 0; for (i = 0; i <= rmsz; i++) config->rmask[i>>2] += p[i] << (8*(i%4)); config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4); return 0; } /* The following routines are all used to parse the nightmarish * config table entries. */ static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr) { int i; u_int scale; if (p == q) return NULL; pwr->present = *p; pwr->flags = 0; p++; for (i = 0; i < 7; i++) if (pwr->present & (1<<i)) { if (p == q) return NULL; pwr->param[i] = POWER_CVT(*p); scale = POWER_SCALE(*p); while (*p & 0x80) { if (++p == q) return NULL; if ((*p & 0x7f) < 100) pwr->param[i] += (*p & 0x7f) * scale / 100; else if (*p == 0x7d) pwr->flags |= CISTPL_POWER_HIGHZ_OK; else if (*p == 0x7e) pwr->param[i] = 0; else if (*p == 0x7f) pwr->flags |= CISTPL_POWER_HIGHZ_REQ; else return NULL; } p++; } return p; } static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing) { u_char scale; if (p == q) return NULL; scale = *p; if ((scale & 3) != 3) { if (++p == q) return NULL; timing->wait = SPEED_CVT(*p); timing->waitscale = exponent[scale & 3]; } else timing->wait = 0; scale >>= 2; if ((scale & 7) != 7) { if (++p == q) return NULL; timing->ready = SPEED_CVT(*p); timing->rdyscale = exponent[scale & 7]; } else timing->ready = 0; scale >>= 3; if (scale != 7) { if (++p == q) return NULL; timing->reserved = SPEED_CVT(*p); timing->rsvscale = exponent[scale]; } else timing->reserved = 0; p++; return p; } static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) { int i, j, bsz, lsz; if (p == q) return NULL; io->flags = *p; if (!(*p & 0x80)) { io->nwin = 1; io->win[0].base = 0; io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK)); return p+1; } if (++p == q) return NULL; io->nwin = (*p & 0x0f) + 1; bsz = (*p & 0x30) >> 4; if (bsz == 3) bsz++; lsz = (*p & 0xc0) >> 6; if (lsz == 3) lsz++; p++; for (i = 0; i < io->nwin; i++) { io->win[i].base = 0; io->win[i].len = 1; for (j = 0; j < bsz; j++, p++) { if (p == q) return NULL; io->win[i].base += *p << (j*8); } for (j = 0; j < lsz; j++, p++) { if (p == q) return NULL; io->win[i].len += *p << (j*8); } } return p; } static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem) { int i, j, asz, lsz, has_ha; u_int len, ca, ha; if (p == q) return NULL; mem->nwin = (*p & 0x07) + 1; lsz = (*p & 0x18) >> 3; asz = (*p & 0x60) >> 5; has_ha = (*p & 0x80); if (++p == q) return NULL; for (i = 0; i < mem->nwin; i++) { len = ca = ha = 0; for (j = 0; j < lsz; j++, p++) { if (p == q) return NULL; len += *p << (j*8); } for (j = 0; j < asz; j++, p++) { if (p == q) return NULL; ca += *p << (j*8); } if (has_ha) for (j = 0; j < asz; j++, p++) { if (p == q) return NULL; ha += *p << (j*8); } mem->win[i].len = len << 8; mem->win[i].card_addr = ca << 8; mem->win[i].host_addr = ha << 8; } return p; } static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq) { if (p == q) return NULL; irq->IRQInfo1 = *p; p++; if (irq->IRQInfo1 & IRQ_INFO2_VALID) { if (p+2 > q) return NULL; irq->IRQInfo2 = (p[1]<<8) + p[0]; p += 2; } return p; } static int parse_cftable_entry(tuple_t *tuple, cistpl_cftable_entry_t *entry) { u_char *p, *q, features; p = tuple->TupleData; q = p + tuple->TupleDataLen; entry->index = *p & 0x3f; entry->flags = 0; if (*p & 0x40) entry->flags |= CISTPL_CFTABLE_DEFAULT; if (*p & 0x80) { if (++p == q) return -EINVAL; if (*p & 0x10) entry->flags |= CISTPL_CFTABLE_BVDS; if (*p & 0x20) entry->flags |= CISTPL_CFTABLE_WP; if (*p & 0x40) entry->flags |= CISTPL_CFTABLE_RDYBSY; if (*p & 0x80) entry->flags |= CISTPL_CFTABLE_MWAIT; entry->interface = *p & 0x0f; } else entry->interface = 0; /* Process optional features */ if (++p == q) return -EINVAL; features = *p; p++; /* Power options */ if ((features & 3) > 0) { p = parse_power(p, q, &entry->vcc); if (p == NULL) return -EINVAL; } else entry->vcc.present = 0; if ((features & 3) > 1) { p = parse_power(p, q, &entry->vpp1); if (p == NULL) return -EINVAL; } else entry->vpp1.present = 0; if ((features & 3) > 2) { p = parse_power(p, q, &entry->vpp2); if (p == NULL) return -EINVAL; } else entry->vpp2.present = 0; /* Timing options */ if (features & 0x04) { p = parse_timing(p, q, &entry->timing); if (p == NULL) return -EINVAL; } else { entry->timing.wait = 0; entry->timing.ready = 0; entry->timing.reserved = 0; } /* I/O window options */ if (features & 0x08) { p = parse_io(p, q, &entry->io); if (p == NULL) return -EINVAL; } else entry->io.nwin = 0; /* Interrupt options */ if (features & 0x10) { p = parse_irq(p, q, &entry->irq); if (p == NULL) return -EINVAL; } else entry->irq.IRQInfo1 = 0; switch (features & 0x60) { case 0x00: entry->mem.nwin = 0; break; case 0x20: entry->mem.nwin = 1; entry->mem.win[0].len = get_unaligned_le16(p) << 8; entry->mem.win[0].card_addr = 0; entry->mem.win[0].host_addr = 0; p += 2; if (p > q) return -EINVAL; break; case 0x40: entry->mem.nwin = 1; entry->mem.win[0].len = get_unaligned_le16(p) << 8; entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8; entry->mem.win[0].host_addr = 0; p += 4; if (p > q) return -EINVAL; break; case 0x60: p = parse_mem(p, q, &entry->mem); if (p == NULL) return -EINVAL; break; } /* Misc features */ if (features & 0x80) { if (p == q) return -EINVAL; entry->flags |= (*p << 8); while (*p & 0x80) if (++p == q) return -EINVAL; p++; } entry->subtuples = q-p; return 0; } static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo) { u_char *p, *q; int n; p = (u_char *)tuple->TupleData; q = p + tuple->TupleDataLen; for (n = 0; n < CISTPL_MAX_DEVICES; n++) { if (p > q-6) break; geo->geo[n].buswidth = p[0]; geo->geo[n].erase_block = 1 << (p[1]-1); geo->geo[n].read_block = 1 << (p[2]-1); geo->geo[n].write_block = 1 << (p[3]-1); geo->geo[n].partition = 1 << (p[4]-1); geo->geo[n].interleave = 1 << (p[5]-1); p += 6; } geo->ngeo = n; return 0; } static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2) { u_char *p, *q; if (tuple->TupleDataLen < 10) return -EINVAL; p = tuple->TupleData; q = p + tuple->TupleDataLen; v2->vers = p[0]; v2->comply = p[1]; v2->dindex = get_unaligned_le16(p + 2); v2->vspec8 = p[6]; v2->vspec9 = p[7]; v2->nhdr = p[8]; p += 9; return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL); } static int parse_org(tuple_t *tuple, cistpl_org_t *org) { u_char *p, *q; int i; p = tuple->TupleData; q = p + tuple->TupleDataLen; if (p == q) return -EINVAL; org->data_org = *p; if (++p == q) return -EINVAL; for (i = 0; i < 30; i++) { org->desc[i] = *p; if (*p == '\0') break; if (++p == q) return -EINVAL; } return 0; } static int parse_format(tuple_t *tuple, cistpl_format_t *fmt) { u_char *p; if (tuple->TupleDataLen < 10) return -EINVAL; p = tuple->TupleData; fmt->type = p[0]; fmt->edc = p[1]; fmt->offset = get_unaligned_le32(p + 2); fmt->length = get_unaligned_le32(p + 6); return 0; } int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse) { int ret = 0; if (tuple->TupleDataLen > tuple->TupleDataMax) return -EINVAL; switch (tuple->TupleCode) { case CISTPL_DEVICE: case CISTPL_DEVICE_A: ret = parse_device(tuple, &parse->device); break; case CISTPL_CHECKSUM: ret = parse_checksum(tuple, &parse->checksum); break; case CISTPL_LONGLINK_A: case CISTPL_LONGLINK_C: ret = parse_longlink(tuple, &parse->longlink); break; case CISTPL_LONGLINK_MFC: ret = parse_longlink_mfc(tuple, &parse->longlink_mfc); break; case CISTPL_VERS_1: ret = parse_vers_1(tuple, &parse->version_1); break; case CISTPL_ALTSTR: ret = parse_altstr(tuple, &parse->altstr); break; case CISTPL_JEDEC_A: case CISTPL_JEDEC_C: ret = parse_jedec(tuple, &parse->jedec); break; case CISTPL_MANFID: ret = parse_manfid(tuple, &parse->manfid); break; case CISTPL_FUNCID: ret = parse_funcid(tuple, &parse->funcid); break; case CISTPL_FUNCE: ret = parse_funce(tuple, &parse->funce); break; case CISTPL_CONFIG: ret = parse_config(tuple, &parse->config); break; case CISTPL_CFTABLE_ENTRY: ret = parse_cftable_entry(tuple, &parse->cftable_entry); break; case CISTPL_DEVICE_GEO: case CISTPL_DEVICE_GEO_A: ret = parse_device_geo(tuple, &parse->device_geo); break; case CISTPL_VERS_2: ret = parse_vers_2(tuple, &parse->vers_2); break; case CISTPL_ORG: ret = parse_org(tuple, &parse->org); break; case CISTPL_FORMAT: case CISTPL_FORMAT_A: ret = parse_format(tuple, &parse->format); break; case CISTPL_NO_LINK: case CISTPL_LINKTARGET: ret = 0; break; default: ret = -EINVAL; break; } if (ret) pr_debug("parse_tuple failed %d\n", ret); return ret; } EXPORT_SYMBOL(pcmcia_parse_tuple); /** * pccard_validate_cis() - check whether card has a sensible CIS * @s: the struct pcmcia_socket we are to check * @info: returns the number of tuples in the (valid) CIS, or 0 * * This tries to determine if a card has a sensible CIS. In @info, it * returns the number of tuples in the CIS, or 0 if the CIS looks bad. The * checks include making sure several critical tuples are present and * valid; seeing if the total number of tuples is reasonable; and * looking for tuples that use reserved codes. * * The function returns 0 on success. */ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info) { tuple_t *tuple; cisparse_t *p; unsigned int count = 0; int ret, reserved, dev_ok = 0, ident_ok = 0; if (!s) return -EINVAL; if (s->functions) { WARN_ON(1); return -EINVAL; } /* We do not want to validate the CIS cache... */ mutex_lock(&s->ops_mutex); destroy_cis_cache(s); mutex_unlock(&s->ops_mutex); tuple = kmalloc(sizeof(*tuple), GFP_KERNEL); if (tuple == NULL) { dev_warn(&s->dev, "no memory to validate CIS\n"); return -ENOMEM; } p = kmalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) { kfree(tuple); dev_warn(&s->dev, "no memory to validate CIS\n"); return -ENOMEM; } count = reserved = 0; tuple->DesiredTuple = RETURN_FIRST_TUPLE; tuple->Attributes = TUPLE_RETURN_COMMON; ret = pccard_get_first_tuple(s, BIND_FN_ALL, tuple); if (ret != 0) goto done; /* First tuple should be DEVICE; we should really have either that or a CFTABLE_ENTRY of some sort */ if ((tuple->TupleCode == CISTPL_DEVICE) || (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p)) || (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p))) dev_ok++; /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2 tuple, for card identification. Certain old D-Link and Linksys cards have only a broken VERS_2 tuple; hence the bogus test. */ if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) || (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) || (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC)) ident_ok++; if (!dev_ok && !ident_ok) goto done; for (count = 1; count < MAX_TUPLES; count++) { ret = pccard_get_next_tuple(s, BIND_FN_ALL, tuple); if (ret != 0) break; if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) || ((tuple->TupleCode > 0x47) && (tuple->TupleCode < 0x80)) || ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff))) reserved++; } if ((count == MAX_TUPLES) || (reserved > 5) || ((!dev_ok || !ident_ok) && (count > 10))) count = 0; ret = 0; done: /* invalidate CIS cache on failure */ if (!dev_ok || !ident_ok || !count) { mutex_lock(&s->ops_mutex); destroy_cis_cache(s); mutex_unlock(&s->ops_mutex); ret = -EIO; } if (info) *info = count; kfree(tuple); kfree(p); return ret; } #define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev) static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off, size_t count) { tuple_t tuple; int status, i; loff_t pointer = 0; ssize_t ret = 0; u_char *tuplebuffer; u_char *tempbuffer; tuplebuffer = kmalloc(sizeof(u_char) * 256, GFP_KERNEL); if (!tuplebuffer) return -ENOMEM; tempbuffer = kmalloc(sizeof(u_char) * 258, GFP_KERNEL); if (!tempbuffer) { ret = -ENOMEM; goto free_tuple; } memset(&tuple, 0, sizeof(tuple_t)); tuple.Attributes = TUPLE_RETURN_LINK | TUPLE_RETURN_COMMON; tuple.DesiredTuple = RETURN_FIRST_TUPLE; tuple.TupleOffset = 0; status = pccard_get_first_tuple(s, BIND_FN_ALL, &tuple); while (!status) { tuple.TupleData = tuplebuffer; tuple.TupleDataMax = 255; memset(tuplebuffer, 0, sizeof(u_char) * 255); status = pccard_get_tuple_data(s, &tuple); if (status) break; if (off < (pointer + 2 + tuple.TupleDataLen)) { tempbuffer[0] = tuple.TupleCode & 0xff; tempbuffer[1] = tuple.TupleLink & 0xff; for (i = 0; i < tuple.TupleDataLen; i++) tempbuffer[i + 2] = tuplebuffer[i] & 0xff; for (i = 0; i < (2 + tuple.TupleDataLen); i++) { if (((i + pointer) >= off) && (i + pointer) < (off + count)) { buf[ret] = tempbuffer[i]; ret++; } } } pointer += 2 + tuple.TupleDataLen; if (pointer >= (off + count)) break; if (tuple.TupleCode == CISTPL_END) break; status = pccard_get_next_tuple(s, BIND_FN_ALL, &tuple); } kfree(tempbuffer); free_tuple: kfree(tuplebuffer); return ret; } static ssize_t pccard_show_cis(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { unsigned int size = 0x200; if (off >= size) count = 0; else { struct pcmcia_socket *s; unsigned int chains = 1; if (off + count > size) count = size - off; s = to_socket(container_of(kobj, struct device, kobj)); if (!(s->state & SOCKET_PRESENT)) return -ENODEV; if (!s->functions && pccard_validate_cis(s, &chains)) return -EIO; if (!chains) return -ENODATA; count = pccard_extract_cis(s, buf, off, count); } return count; } static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pcmcia_socket *s; int error; s = to_socket(container_of(kobj, struct device, kobj)); if (off) return -EINVAL; if (count >= CISTPL_MAX_CIS_SIZE) return -EINVAL; if (!(s->state & SOCKET_PRESENT)) return -ENODEV; error = pcmcia_replace_cis(s, buf, count); if (error) return -EIO; pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY); return count; } struct bin_attribute pccard_cis_attr = { .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR }, .size = 0x200, .read = pccard_show_cis, .write = pccard_store_cis, };
gpl-2.0
felipito/linux-stable
block/blk-mq-sysfs.c
140
10731
#include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/smp.h> #include <linux/blk-mq.h> #include "blk-mq.h" #include "blk-mq-tag.h" static void blk_mq_sysfs_release(struct kobject *kobj) { } struct blk_mq_ctx_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_mq_ctx *, char *); ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); }; struct blk_mq_hw_ctx_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_mq_hw_ctx *, char *); ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); }; static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, char *page) { struct blk_mq_ctx_sysfs_entry *entry; struct blk_mq_ctx *ctx; struct request_queue *q; ssize_t res; entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); ctx = container_of(kobj, struct blk_mq_ctx, kobj); q = ctx->queue; if (!entry->show) return -EIO; res = -ENOENT; mutex_lock(&q->sysfs_lock); if (!blk_queue_dying(q)) res = entry->show(ctx, page); mutex_unlock(&q->sysfs_lock); return res; } static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct blk_mq_ctx_sysfs_entry *entry; struct blk_mq_ctx *ctx; struct request_queue *q; ssize_t res; entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); ctx = container_of(kobj, struct blk_mq_ctx, kobj); q = ctx->queue; if (!entry->store) return -EIO; res = -ENOENT; mutex_lock(&q->sysfs_lock); if (!blk_queue_dying(q)) res = entry->store(ctx, page, length); mutex_unlock(&q->sysfs_lock); return res; } static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, struct attribute *attr, char *page) { struct blk_mq_hw_ctx_sysfs_entry *entry; struct blk_mq_hw_ctx *hctx; struct request_queue *q; ssize_t res; entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); q = hctx->queue; if (!entry->show) return -EIO; res = -ENOENT; mutex_lock(&q->sysfs_lock); if (!blk_queue_dying(q)) res = entry->show(hctx, page); mutex_unlock(&q->sysfs_lock); return res; } static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct blk_mq_hw_ctx_sysfs_entry *entry; struct blk_mq_hw_ctx *hctx; struct request_queue *q; ssize_t res; entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); q = hctx->queue; if (!entry->store) return -EIO; res = -ENOENT; mutex_lock(&q->sysfs_lock); if (!blk_queue_dying(q)) res = entry->store(hctx, page, length); mutex_unlock(&q->sysfs_lock); return res; } static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) { return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); } static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page) { return sprintf(page, "%lu\n", ctx->rq_merged); } static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) { return sprintf(page, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); } static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) { char *start_page = page; struct request *rq; page += sprintf(page, "%s:\n", msg); list_for_each_entry(rq, list, queuelist) page += sprintf(page, "\t%p\n", rq); return page - start_page; } static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) { ssize_t ret; spin_lock(&ctx->lock); ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending"); spin_unlock(&ctx->lock); return ret; } static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx, char *page) { return sprintf(page, "%lu\n", hctx->queued); } static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page) { return sprintf(page, "%lu\n", hctx->run); } static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx, char *page) { char *start_page = page; int i; page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]); for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) { unsigned long d = 1U << (i - 1); page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]); } return page - start_page; } static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, char *page) { ssize_t ret; spin_lock(&hctx->lock); ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending"); spin_unlock(&hctx->lock); return ret; } static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) { return blk_mq_tag_sysfs_show(hctx->tags, page); } static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page) { return sprintf(page, "%u\n", atomic_read(&hctx->nr_active)); } static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { unsigned int i, first = 1; ssize_t ret = 0; blk_mq_disable_hotplug(); for_each_cpu(i, hctx->cpumask) { if (first) ret += sprintf(ret + page, "%u", i); else ret += sprintf(ret + page, ", %u", i); first = 0; } blk_mq_enable_hotplug(); ret += sprintf(ret + page, "\n"); return ret; } static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = { .attr = {.name = "dispatched", .mode = S_IRUGO }, .show = blk_mq_sysfs_dispatched_show, }; static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = { .attr = {.name = "merged", .mode = S_IRUGO }, .show = blk_mq_sysfs_merged_show, }; static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = { .attr = {.name = "completed", .mode = S_IRUGO }, .show = blk_mq_sysfs_completed_show, }; static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = { .attr = {.name = "rq_list", .mode = S_IRUGO }, .show = blk_mq_sysfs_rq_list_show, }; static struct attribute *default_ctx_attrs[] = { &blk_mq_sysfs_dispatched.attr, &blk_mq_sysfs_merged.attr, &blk_mq_sysfs_completed.attr, &blk_mq_sysfs_rq_list.attr, NULL, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = { .attr = {.name = "queued", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_queued_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = { .attr = {.name = "run", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_run_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { .attr = {.name = "dispatched", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_dispatched_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = { .attr = {.name = "active", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_active_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { .attr = {.name = "pending", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_rq_list_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { .attr = {.name = "tags", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_tags_show, }; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { .attr = {.name = "cpu_list", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_cpus_show, }; static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_queued.attr, &blk_mq_hw_sysfs_run.attr, &blk_mq_hw_sysfs_dispatched.attr, &blk_mq_hw_sysfs_pending.attr, &blk_mq_hw_sysfs_tags.attr, &blk_mq_hw_sysfs_cpus.attr, &blk_mq_hw_sysfs_active.attr, NULL, }; static const struct sysfs_ops blk_mq_sysfs_ops = { .show = blk_mq_sysfs_show, .store = blk_mq_sysfs_store, }; static const struct sysfs_ops blk_mq_hw_sysfs_ops = { .show = blk_mq_hw_sysfs_show, .store = blk_mq_hw_sysfs_store, }; static struct kobj_type blk_mq_ktype = { .sysfs_ops = &blk_mq_sysfs_ops, .release = blk_mq_sysfs_release, }; static struct kobj_type blk_mq_ctx_ktype = { .sysfs_ops = &blk_mq_sysfs_ops, .default_attrs = default_ctx_attrs, .release = blk_mq_sysfs_release, }; static struct kobj_type blk_mq_hw_ktype = { .sysfs_ops = &blk_mq_hw_sysfs_ops, .default_attrs = default_hw_ctx_attrs, .release = blk_mq_sysfs_release, }; static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) { struct blk_mq_ctx *ctx; int i; if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) return; hctx_for_each_ctx(hctx, ctx, i) kobject_del(&ctx->kobj); kobject_del(&hctx->kobj); } static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; int i, ret; if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) return 0; ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); if (ret) return ret; hctx_for_each_ctx(hctx, ctx, i) { ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) break; } return ret; } void blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; int i, j; queue_for_each_hw_ctx(q, hctx, i) { blk_mq_unregister_hctx(hctx); hctx_for_each_ctx(hctx, ctx, j) kobject_put(&ctx->kobj); kobject_put(&hctx->kobj); } kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); kobject_del(&q->mq_kobj); kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); } static void blk_mq_sysfs_init(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; int i, j; kobject_init(&q->mq_kobj, &blk_mq_ktype); queue_for_each_hw_ctx(q, hctx, i) { kobject_init(&hctx->kobj, &blk_mq_hw_ktype); hctx_for_each_ctx(hctx, ctx, j) kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); } } int blk_mq_register_disk(struct gendisk *disk) { struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; int ret, i; blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) return ret; kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { hctx->flags |= BLK_MQ_F_SYSFS_UP; ret = blk_mq_register_hctx(hctx); if (ret) break; } if (ret) { blk_mq_unregister_disk(disk); return ret; } return 0; } void blk_mq_sysfs_unregister(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); } int blk_mq_sysfs_register(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i, ret = 0; queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); if (ret) break; } return ret; }
gpl-2.0
NKMSKV/kernel-salsa
drivers/rtc/rtc-mv.c
140
4154
/* * Driver for the RTC in Marvell SoCs. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/io.h> #include <linux/platform_device.h> #define RTC_TIME_REG_OFFS 0 #define RTC_SECONDS_OFFS 0 #define RTC_MINUTES_OFFS 8 #define RTC_HOURS_OFFS 16 #define RTC_WDAY_OFFS 24 #define RTC_HOURS_12H_MODE (1 << 22) /* 12 hours mode */ #define RTC_DATE_REG_OFFS 4 #define RTC_MDAY_OFFS 0 #define RTC_MONTH_OFFS 8 #define RTC_YEAR_OFFS 16 struct rtc_plat_data { struct rtc_device *rtc; void __iomem *ioaddr; }; static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct rtc_plat_data *pdata = dev_get_drvdata(dev); void __iomem *ioaddr = pdata->ioaddr; u32 rtc_reg; rtc_reg = (bin2bcd(tm->tm_sec) << RTC_SECONDS_OFFS) | (bin2bcd(tm->tm_min) << RTC_MINUTES_OFFS) | (bin2bcd(tm->tm_hour) << RTC_HOURS_OFFS) | (bin2bcd(tm->tm_wday) << RTC_WDAY_OFFS); writel(rtc_reg, ioaddr + RTC_TIME_REG_OFFS); rtc_reg = (bin2bcd(tm->tm_mday) << RTC_MDAY_OFFS) | (bin2bcd(tm->tm_mon + 1) << RTC_MONTH_OFFS) | (bin2bcd(tm->tm_year % 100) << RTC_YEAR_OFFS); writel(rtc_reg, ioaddr + RTC_DATE_REG_OFFS); return 0; } static int mv_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct rtc_plat_data *pdata = dev_get_drvdata(dev); void __iomem *ioaddr = pdata->ioaddr; u32 rtc_time, rtc_date; unsigned int year, month, day, hour, minute, second, wday; rtc_time = readl(ioaddr + RTC_TIME_REG_OFFS); rtc_date = readl(ioaddr + RTC_DATE_REG_OFFS); second = rtc_time & 0x7f; minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f; hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hours mode */ wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7; day = rtc_date & 0x3f; month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f; year = (rtc_date >> RTC_YEAR_OFFS) & 0xff; tm->tm_sec = bcd2bin(second); tm->tm_min = bcd2bin(minute); tm->tm_hour = bcd2bin(hour); tm->tm_mday = bcd2bin(day); tm->tm_wday = bcd2bin(wday); tm->tm_mon = bcd2bin(month) - 1; /* hw counts from year 2000, but tm_year is relative to 1900 */ tm->tm_year = bcd2bin(year) + 100; return rtc_valid_tm(tm); } static const struct rtc_class_ops mv_rtc_ops = { .read_time = mv_rtc_read_time, .set_time = mv_rtc_set_time, }; static int __init mv_rtc_probe(struct platform_device *pdev) { struct resource *res; struct rtc_plat_data *pdata; resource_size_t size; u32 rtc_time; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; size = resource_size(res); if (!devm_request_mem_region(&pdev->dev, res->start, size, pdev->name)) return -EBUSY; pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, size); if (!pdata->ioaddr) return -ENOMEM; /* make sure the 24 hours mode is enabled */ rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS); if (rtc_time & RTC_HOURS_12H_MODE) { dev_err(&pdev->dev, "24 Hours mode not supported.\n"); return -EINVAL; } platform_set_drvdata(pdev, pdata); pdata->rtc = rtc_device_register(pdev->name, &pdev->dev, &mv_rtc_ops, THIS_MODULE); if (IS_ERR(pdata->rtc)) return PTR_ERR(pdata->rtc); return 0; } static int __exit mv_rtc_remove(struct platform_device *pdev) { struct rtc_plat_data *pdata = platform_get_drvdata(pdev); rtc_device_unregister(pdata->rtc); return 0; } static struct platform_driver mv_rtc_driver = { .remove = __exit_p(mv_rtc_remove), .driver = { .name = "rtc-mv", .owner = THIS_MODULE, }, }; static __init int mv_init(void) { return platform_driver_probe(&mv_rtc_driver, mv_rtc_probe); } static __exit void mv_exit(void) { platform_driver_unregister(&mv_rtc_driver); } module_init(mv_init); module_exit(mv_exit); MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); MODULE_DESCRIPTION("Marvell RTC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rtc-mv");
gpl-2.0
theplaymate/ferra
drivers/acpi/acpica/nssearch.c
140
13965
/******************************************************************************* * * Module Name: nssearch - Namespace search * ******************************************************************************/ /* * Copyright (C) 2000 - 2008, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nssearch") /* Local prototypes */ static acpi_status acpi_ns_search_parent_tree(u32 target_name, struct acpi_namespace_node *node, acpi_object_type type, struct acpi_namespace_node **return_node); /******************************************************************************* * * FUNCTION: acpi_ns_search_one_scope * * PARAMETERS: target_name - Ascii ACPI name to search for * parent_node - Starting node where search will begin * Type - Object type to match * return_node - Where the matched Named obj is returned * * RETURN: Status * * DESCRIPTION: Search a single level of the namespace. Performs a * simple search of the specified level, and does not add * entries or search parents. * * * Named object lists are built (and subsequently dumped) in the * order in which the names are encountered during the namespace load; * * All namespace searching is linear in this implementation, but * could be easily modified to support any improved search * algorithm. However, the linear search was chosen for simplicity * and because the trees are small and the other interpreter * execution overhead is relatively high. * * Note: CPU execution analysis has shown that the AML interpreter spends * a very small percentage of its time searching the namespace. Therefore, * the linear search seems to be sufficient, as there would seem to be * little value in improving the search. * ******************************************************************************/ acpi_status acpi_ns_search_one_scope(u32 target_name, struct acpi_namespace_node *parent_node, acpi_object_type type, struct acpi_namespace_node **return_node) { struct acpi_namespace_node *node; ACPI_FUNCTION_TRACE(ns_search_one_scope); #ifdef ACPI_DEBUG_OUTPUT if (ACPI_LV_NAMES & acpi_dbg_level) { char *scope_name; scope_name = acpi_ns_get_external_pathname(parent_node); if (scope_name) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Searching %s (%p) For [%4.4s] (%s)\n", scope_name, parent_node, ACPI_CAST_PTR(char, &target_name), acpi_ut_get_type_name(type))); ACPI_FREE(scope_name); } } #endif /* * Search for name at this namespace level, which is to say that we * must search for the name among the children of this object */ node = parent_node->child; while (node) { /* Check for match against the name */ if (node->name.integer == target_name) { /* Resolve a control method alias if any */ if (acpi_ns_get_type(node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) { node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object); } /* Found matching entry */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n", ACPI_CAST_PTR(char, &target_name), acpi_ut_get_type_name(node->type), node, acpi_ut_get_node_name(parent_node), parent_node)); *return_node = node; return_ACPI_STATUS(AE_OK); } /* * The last entry in the list points back to the parent, * so a flag is used to indicate the end-of-list */ if (node->flags & ANOBJ_END_OF_PEER_LIST) { /* Searched entire list, we are done */ break; } /* Didn't match name, move on to the next peer object */ node = node->peer; } /* Searched entire namespace level, not found */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Name [%4.4s] (%s) not found in search in scope [%4.4s] %p first child %p\n", ACPI_CAST_PTR(char, &target_name), acpi_ut_get_type_name(type), acpi_ut_get_node_name(parent_node), parent_node, parent_node->child)); return_ACPI_STATUS(AE_NOT_FOUND); } /******************************************************************************* * * FUNCTION: acpi_ns_search_parent_tree * * PARAMETERS: target_name - Ascii ACPI name to search for * Node - Starting node where search will begin * Type - Object type to match * return_node - Where the matched Node is returned * * RETURN: Status * * DESCRIPTION: Called when a name has not been found in the current namespace * level. Before adding it or giving up, ACPI scope rules require * searching enclosing scopes in cases identified by acpi_ns_local(). * * "A name is located by finding the matching name in the current * name space, and then in the parent name space. If the parent * name space does not contain the name, the search continues * recursively until either the name is found or the name space * does not have a parent (the root of the name space). This * indicates that the name is not found" (From ACPI Specification, * section 5.3) * ******************************************************************************/ static acpi_status acpi_ns_search_parent_tree(u32 target_name, struct acpi_namespace_node *node, acpi_object_type type, struct acpi_namespace_node **return_node) { acpi_status status; struct acpi_namespace_node *parent_node; ACPI_FUNCTION_TRACE(ns_search_parent_tree); parent_node = acpi_ns_get_parent_node(node); /* * If there is no parent (i.e., we are at the root) or type is "local", * we won't be searching the parent tree. */ if (!parent_node) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "[%4.4s] has no parent\n", ACPI_CAST_PTR(char, &target_name))); return_ACPI_STATUS(AE_NOT_FOUND); } if (acpi_ns_local(type)) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "[%4.4s] type [%s] must be local to this scope (no parent search)\n", ACPI_CAST_PTR(char, &target_name), acpi_ut_get_type_name(type))); return_ACPI_STATUS(AE_NOT_FOUND); } /* Search the parent tree */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Searching parent [%4.4s] for [%4.4s]\n", acpi_ut_get_node_name(parent_node), ACPI_CAST_PTR(char, &target_name))); /* * Search parents until target is found or we have backed up to the root */ while (parent_node) { /* * Search parent scope. Use TYPE_ANY because we don't care about the * object type at this point, we only care about the existence of * the actual name we are searching for. Typechecking comes later. */ status = acpi_ns_search_one_scope(target_name, parent_node, ACPI_TYPE_ANY, return_node); if (ACPI_SUCCESS(status)) { return_ACPI_STATUS(status); } /* Not found here, go up another level (until we reach the root) */ parent_node = acpi_ns_get_parent_node(parent_node); } /* Not found in parent tree */ return_ACPI_STATUS(AE_NOT_FOUND); } /******************************************************************************* * * FUNCTION: acpi_ns_search_and_enter * * PARAMETERS: target_name - Ascii ACPI name to search for (4 chars) * walk_state - Current state of the walk * Node - Starting node where search will begin * interpreter_mode - Add names only in ACPI_MODE_LOAD_PASS_x. * Otherwise,search only. * Type - Object type to match * Flags - Flags describing the search restrictions * return_node - Where the Node is returned * * RETURN: Status * * DESCRIPTION: Search for a name segment in a single namespace level, * optionally adding it if it is not found. If the passed * Type is not Any and the type previously stored in the * entry was Any (i.e. unknown), update the stored type. * * In ACPI_IMODE_EXECUTE, search only. * In other modes, search and add if not found. * ******************************************************************************/ acpi_status acpi_ns_search_and_enter(u32 target_name, struct acpi_walk_state *walk_state, struct acpi_namespace_node *node, acpi_interpreter_mode interpreter_mode, acpi_object_type type, u32 flags, struct acpi_namespace_node **return_node) { acpi_status status; struct acpi_namespace_node *new_node; ACPI_FUNCTION_TRACE(ns_search_and_enter); /* Parameter validation */ if (!node || !target_name || !return_node) { ACPI_ERROR((AE_INFO, "Null parameter: Node %p Name %X ReturnNode %p", node, target_name, return_node)); return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Name must consist of valid ACPI characters. We will repair the name if * necessary because we don't want to abort because of this, but we want * all namespace names to be printable. A warning message is appropriate. * * This issue came up because there are in fact machines that exhibit * this problem, and we want to be able to enable ACPI support for them, * even though there are a few bad names. */ if (!acpi_ut_valid_acpi_name(target_name)) { target_name = acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name)); /* Report warning only if in strict mode or debug mode */ if (!acpi_gbl_enable_interpreter_slack) { ACPI_WARNING((AE_INFO, "Found bad character(s) in name, repaired: [%4.4s]\n", ACPI_CAST_PTR(char, &target_name))); } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found bad character(s) in name, repaired: [%4.4s]\n", ACPI_CAST_PTR(char, &target_name))); } } /* Try to find the name in the namespace level specified by the caller */ *return_node = ACPI_ENTRY_NOT_FOUND; status = acpi_ns_search_one_scope(target_name, node, type, return_node); if (status != AE_NOT_FOUND) { /* * If we found it AND the request specifies that a find is an error, * return the error */ if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) { status = AE_ALREADY_EXISTS; } /* Either found it or there was an error: finished either way */ return_ACPI_STATUS(status); } /* * The name was not found. If we are NOT performing the first pass * (name entry) of loading the namespace, search the parent tree (all the * way to the root if necessary.) We don't want to perform the parent * search when the namespace is actually being loaded. We want to perform * the search when namespace references are being resolved (load pass 2) * and during the execution phase. */ if ((interpreter_mode != ACPI_IMODE_LOAD_PASS1) && (flags & ACPI_NS_SEARCH_PARENT)) { /* * Not found at this level - search parent tree according to the * ACPI specification */ status = acpi_ns_search_parent_tree(target_name, node, type, return_node); if (ACPI_SUCCESS(status)) { return_ACPI_STATUS(status); } } /* In execute mode, just search, never add names. Exit now */ if (interpreter_mode == ACPI_IMODE_EXECUTE) { ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%4.4s Not found in %p [Not adding]\n", ACPI_CAST_PTR(char, &target_name), node)); return_ACPI_STATUS(AE_NOT_FOUND); } /* Create the new named object */ new_node = acpi_ns_create_node(target_name); if (!new_node) { return_ACPI_STATUS(AE_NO_MEMORY); } #ifdef ACPI_ASL_COMPILER /* * Node is an object defined by an External() statement */ if (flags & ACPI_NS_EXTERNAL) { new_node->flags |= ANOBJ_IS_EXTERNAL; } #endif if (flags & ACPI_NS_TEMPORARY) { new_node->flags |= ANOBJ_TEMPORARY; } /* Install the new object into the parent's list of children */ acpi_ns_install_node(walk_state, node, new_node, type); *return_node = new_node; return_ACPI_STATUS(AE_OK); }
gpl-2.0
csimmonds/rowboat-kernel
drivers/staging/winbond/wbhal.c
140
16043
#include "os_common.h" #include "wbhal_f.h" #include "wblinux_f.h" void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address ) { u32 ltmp[2]; if( pHwData->SurpriseRemove ) return; memcpy( pHwData->CurrentMacAddress, current_address, ETH_LENGTH_OF_ADDRESS ); ltmp[0]= cpu_to_le32( *(u32 *)pHwData->CurrentMacAddress ); ltmp[1]= cpu_to_le32( *(u32 *)(pHwData->CurrentMacAddress + 4) ) & 0xffff; Wb35Reg_BurstWrite( pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT ); } void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address ) { if( pHwData->SurpriseRemove ) return; memcpy( pethernet_address, pHwData->PermanentMacAddress, 6 ); } static void hal_led_control(unsigned long data) { struct wbsoft_priv *adapter = (struct wbsoft_priv *) data; phw_data_t pHwData = &adapter->sHwData; struct wb35_reg *reg = &pHwData->reg; u32 LEDSet = (pHwData->SoftwareSet & HAL_LED_SET_MASK) >> HAL_LED_SET_SHIFT; u8 LEDgray[20] = { 0,3,4,6,8,10,11,12,13,14,15,14,13,12,11,10,8,6,4,2 }; u8 LEDgray2[30] = { 7,8,9,10,11,12,13,14,15,0,0,0,0,0,0,0,0,0,0,0,0,0,15,14,13,12,11,10,9,8 }; u32 TimeInterval = 500, ltmp, ltmp2; ltmp=0; if( pHwData->SurpriseRemove ) return; if( pHwData->LED_control ) { ltmp2 = pHwData->LED_control & 0xff; if( ltmp2 == 5 ) // 5 is WPS mode { TimeInterval = 100; ltmp2 = (pHwData->LED_control>>8) & 0xff; switch( ltmp2 ) { case 1: // [0.2 On][0.1 Off]... pHwData->LED_Blinking %= 3; ltmp = 0x1010; // Led 1 & 0 Green and Red if( pHwData->LED_Blinking == 2 ) // Turn off ltmp = 0; break; case 2: // [0.1 On][0.1 Off]... pHwData->LED_Blinking %= 2; ltmp = 0x0010; // Led 0 red color if( pHwData->LED_Blinking ) // Turn off ltmp = 0; break; case 3: // [0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.5 Off]... pHwData->LED_Blinking %= 15; ltmp = 0x0010; // Led 0 red color if( (pHwData->LED_Blinking >= 9) || (pHwData->LED_Blinking%2) ) // Turn off 0.6 sec ltmp = 0; break; case 4: // [300 On][ off ] ltmp = 0x1000; // Led 1 Green color if( pHwData->LED_Blinking >= 3000 ) ltmp = 0; // led maybe on after 300sec * 32bit counter overlap. break; } pHwData->LED_Blinking++; reg->U1BC_LEDConfigure = ltmp; if( LEDSet != 7 ) // Only 111 mode has 2 LEDs on PCB. { reg->U1BC_LEDConfigure |= (ltmp &0xff)<<8; // Copy LED result to each LED control register reg->U1BC_LEDConfigure |= (ltmp &0xff00)>>8; } Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); } } else if( pHwData->CurrentRadioSw || pHwData->CurrentRadioHw ) // If radio off { if( reg->U1BC_LEDConfigure & 0x1010 ) { reg->U1BC_LEDConfigure &= ~0x1010; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); } } else { switch( LEDSet ) { case 4: // [100] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing if( !pHwData->LED_LinkOn ) // Blink only if not Link On { // Blinking if scanning is on progress if( pHwData->LED_Scanning ) { if( pHwData->LED_Blinking == 0 ) { reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 On pHwData->LED_Blinking = 1; TimeInterval = 300; } else { reg->U1BC_LEDConfigure &= ~0x10; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 Off pHwData->LED_Blinking = 0; TimeInterval = 300; } } else { //Turn Off LED_0 if( reg->U1BC_LEDConfigure & 0x10 ) { reg->U1BC_LEDConfigure &= ~0x10; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 Off } } } else { // Turn On LED_0 if( (reg->U1BC_LEDConfigure & 0x10) == 0 ) { reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 Off } } break; case 6: // [110] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing if( !pHwData->LED_LinkOn ) // Blink only if not Link On { // Blinking if scanning is on progress if( pHwData->LED_Scanning ) { if( pHwData->LED_Blinking == 0 ) { reg->U1BC_LEDConfigure &= ~0xf; reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 On pHwData->LED_Blinking = 1; TimeInterval = 300; } else { reg->U1BC_LEDConfigure &= ~0x1f; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 Off pHwData->LED_Blinking = 0; TimeInterval = 300; } } else { // 20060901 Gray blinking if in disconnect state and not scanning ltmp = reg->U1BC_LEDConfigure; reg->U1BC_LEDConfigure &= ~0x1f; if( LEDgray2[(pHwData->LED_Blinking%30)] ) { reg->U1BC_LEDConfigure |= 0x10; reg->U1BC_LEDConfigure |= LEDgray2[ (pHwData->LED_Blinking%30) ]; } pHwData->LED_Blinking++; if( reg->U1BC_LEDConfigure != ltmp ) Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 Off TimeInterval = 100; } } else { // Turn On LED_0 if( (reg->U1BC_LEDConfigure & 0x10) == 0 ) { reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_0 Off } } break; case 5: // [101] Only 1 Led be placed on PCB and use LED_1 for showing if( !pHwData->LED_LinkOn ) // Blink only if not Link On { // Blinking if scanning is on progress if( pHwData->LED_Scanning ) { if( pHwData->LED_Blinking == 0 ) { reg->U1BC_LEDConfigure |= 0x1000; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_1 On pHwData->LED_Blinking = 1; TimeInterval = 300; } else { reg->U1BC_LEDConfigure &= ~0x1000; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_1 Off pHwData->LED_Blinking = 0; TimeInterval = 300; } } else { //Turn Off LED_1 if( reg->U1BC_LEDConfigure & 0x1000 ) { reg->U1BC_LEDConfigure &= ~0x1000; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_1 Off } } } else { // Is transmitting/receiving ?? if( (adapter->RxByteCount != pHwData->RxByteCountLast ) || (adapter->TxByteCount != pHwData->TxByteCountLast ) ) { if( (reg->U1BC_LEDConfigure & 0x3000) != 0x3000 ) { reg->U1BC_LEDConfigure |= 0x3000; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_1 On } // Update variable pHwData->RxByteCountLast = adapter->RxByteCount; pHwData->TxByteCountLast = adapter->TxByteCount; TimeInterval = 200; } else { // Turn On LED_1 and blinking if transmitting/receiving if( (reg->U1BC_LEDConfigure & 0x3000) != 0x1000 ) { reg->U1BC_LEDConfigure &= ~0x3000; reg->U1BC_LEDConfigure |= 0x1000; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); // LED_1 On } } } break; default: // Default setting. 2 LED be placed on PCB. LED_0: Link On LED_1 Active if( (reg->U1BC_LEDConfigure & 0x3000) != 0x3000 ) { reg->U1BC_LEDConfigure |= 0x3000;// LED_1 is always on and event enable Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); } if( pHwData->LED_Blinking ) { // Gray blinking reg->U1BC_LEDConfigure &= ~0x0f; reg->U1BC_LEDConfigure |= 0x10; reg->U1BC_LEDConfigure |= LEDgray[ (pHwData->LED_Blinking-1)%20 ]; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); pHwData->LED_Blinking += 2; if( pHwData->LED_Blinking < 40 ) TimeInterval = 100; else { pHwData->LED_Blinking = 0; // Stop blinking reg->U1BC_LEDConfigure &= ~0x0f; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); } break; } if( pHwData->LED_LinkOn ) { if( !(reg->U1BC_LEDConfigure & 0x10) ) // Check the LED_0 { //Try to turn ON LED_0 after gray blinking reg->U1BC_LEDConfigure |= 0x10; pHwData->LED_Blinking = 1; //Start blinking TimeInterval = 50; } } else { if( reg->U1BC_LEDConfigure & 0x10 ) // Check the LED_0 { reg->U1BC_LEDConfigure &= ~0x10; Wb35Reg_Write( pHwData, 0x03bc, reg->U1BC_LEDConfigure ); } } break; } //20060828.1 Active send null packet to avoid AP disconnect if( pHwData->LED_LinkOn ) { pHwData->NullPacketCount += TimeInterval; if( pHwData->NullPacketCount >= DEFAULT_NULL_PACKET_COUNT ) { pHwData->NullPacketCount = 0; } } } pHwData->time_count += TimeInterval; Wb35Tx_CurrentTime(adapter, pHwData->time_count); // 20060928 add pHwData->LEDTimer.expires = jiffies + msecs_to_jiffies(TimeInterval); add_timer(&pHwData->LEDTimer); } u8 hal_init_hardware(struct ieee80211_hw *hw) { struct wbsoft_priv *priv = hw->priv; phw_data_t pHwData = &priv->sHwData; u16 SoftwareSet; // Initial the variable pHwData->MaxReceiveLifeTime = DEFAULT_MSDU_LIFE_TIME; // Setting Rx maximum MSDU life time pHwData->FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; // Setting default fragment threshold pHwData->InitialResource = 1; if( Wb35Reg_initial(pHwData)) { pHwData->InitialResource = 2; if (Wb35Tx_initial(pHwData)) { pHwData->InitialResource = 3; if (Wb35Rx_initial(pHwData)) { pHwData->InitialResource = 4; init_timer(&pHwData->LEDTimer); pHwData->LEDTimer.function = hal_led_control; pHwData->LEDTimer.data = (unsigned long) priv; pHwData->LEDTimer.expires = jiffies + msecs_to_jiffies(1000); add_timer(&pHwData->LEDTimer); // // For restrict to vendor's hardware // SoftwareSet = hal_software_set( pHwData ); #ifdef Vendor2 // Try to make sure the EEPROM contain SoftwareSet >>= 8; if( SoftwareSet != 0x82 ) return false; #endif Wb35Rx_start(hw); Wb35Tx_EP2VM_start(priv); return true; } } } pHwData->SurpriseRemove = 1; return false; } void hal_halt(phw_data_t pHwData, void *ppa_data) { switch( pHwData->InitialResource ) { case 4: case 3: del_timer_sync(&pHwData->LEDTimer); msleep(100); // Wait for Timer DPC exit 940623.2 Wb35Rx_destroy( pHwData ); // Release the Rx case 2: Wb35Tx_destroy( pHwData ); // Release the Tx case 1: Wb35Reg_destroy( pHwData ); // Release the Wb35 Regisster resources } } //--------------------------------------------------------------------------------------------------- void hal_set_beacon_period( phw_data_t pHwData, u16 beacon_period ) { u32 tmp; if( pHwData->SurpriseRemove ) return; pHwData->BeaconPeriod = beacon_period; tmp = pHwData->BeaconPeriod << 16; tmp |= pHwData->ProbeDelay; Wb35Reg_Write( pHwData, 0x0848, tmp ); } static void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel ) { struct wb35_reg *reg = &pHwData->reg; if( pHwData->SurpriseRemove ) return; printk("Going to channel: %d/%d\n", channel.band, channel.ChanNo); RFSynthesizer_SwitchingChannel( pHwData, channel );// Switch channel pHwData->Channel = channel.ChanNo; pHwData->band = channel.band; #ifdef _PE_STATE_DUMP_ WBDEBUG(("Set channel is %d, band =%d\n", pHwData->Channel, pHwData->band)); #endif reg->M28_MacControl &= ~0xff; // Clean channel information field reg->M28_MacControl |= channel.ChanNo; Wb35Reg_WriteWithCallbackValue( pHwData, 0x0828, reg->M28_MacControl, (s8 *)&channel, sizeof(ChanInfo)); } //--------------------------------------------------------------------------------------------------- void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel ) { hal_set_current_channel_ex( pHwData, channel ); } //--------------------------------------------------------------------------------------------------- void hal_set_accept_broadcast( phw_data_t pHwData, u8 enable ) { struct wb35_reg *reg = &pHwData->reg; if( pHwData->SurpriseRemove ) return; reg->M00_MacControl &= ~0x02000000;//The HW value if (enable) reg->M00_MacControl |= 0x02000000;//The HW value Wb35Reg_Write( pHwData, 0x0800, reg->M00_MacControl ); } //for wep key error detection, we need to accept broadcast packets to be received temporary. void hal_set_accept_promiscuous( phw_data_t pHwData, u8 enable) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return; if (enable) { reg->M00_MacControl |= 0x00400000; Wb35Reg_Write( pHwData, 0x0800, reg->M00_MacControl ); } else { reg->M00_MacControl&=~0x00400000; Wb35Reg_Write( pHwData, 0x0800, reg->M00_MacControl ); } } void hal_set_accept_multicast( phw_data_t pHwData, u8 enable ) { struct wb35_reg *reg = &pHwData->reg; if( pHwData->SurpriseRemove ) return; reg->M00_MacControl &= ~0x01000000;//The HW value if (enable) reg->M00_MacControl |= 0x01000000;//The HW value Wb35Reg_Write( pHwData, 0x0800, reg->M00_MacControl ); } void hal_set_accept_beacon( phw_data_t pHwData, u8 enable ) { struct wb35_reg *reg = &pHwData->reg; if( pHwData->SurpriseRemove ) return; // 20040108 debug if( !enable )//Due to SME and MLME are not suitable for 35 return; reg->M00_MacControl &= ~0x04000000;//The HW value if( enable ) reg->M00_MacControl |= 0x04000000;//The HW value Wb35Reg_Write( pHwData, 0x0800, reg->M00_MacControl ); } //--------------------------------------------------------------------------------------------------- void hal_stop( phw_data_t pHwData ) { struct wb35_reg *reg = &pHwData->reg; pHwData->Wb35Rx.rx_halt = 1; Wb35Rx_stop( pHwData ); pHwData->Wb35Tx.tx_halt = 1; Wb35Tx_stop( pHwData ); reg->D00_DmaControl &= ~0xc0000000;//Tx Off, Rx Off Wb35Reg_Write( pHwData, 0x0400, reg->D00_DmaControl ); } unsigned char hal_idle(phw_data_t pHwData) { struct wb35_reg *reg = &pHwData->reg; PWBUSB pWbUsb = &pHwData->WbUsb; if( !pHwData->SurpriseRemove && ( pWbUsb->DetectCount || reg->EP0vm_state!=VM_STOP ) ) return false; return true; } //--------------------------------------------------------------------------------------------------- void hal_set_phy_type( phw_data_t pHwData, u8 PhyType ) { pHwData->phy_type = PhyType; } void hal_set_radio_mode( phw_data_t pHwData, unsigned char radio_off) { struct wb35_reg *reg = &pHwData->reg; if( pHwData->SurpriseRemove ) return; if (radio_off) //disable Baseband receive off { pHwData->CurrentRadioSw = 1; // off reg->M24_MacControl &= 0xffffffbf; } else { pHwData->CurrentRadioSw = 0; // on reg->M24_MacControl |= 0x00000040; } Wb35Reg_Write( pHwData, 0x0824, reg->M24_MacControl ); } u8 hal_get_antenna_number( phw_data_t pHwData ) { struct wb35_reg *reg = &pHwData->reg; if ((reg->BB2C & BIT(11)) == 0) return 0; else return 1; } //---------------------------------------------------------------------------------------------------- //0 : radio on; 1: radio off u8 hal_get_hw_radio_off( phw_data_t pHwData ) { struct wb35_reg *reg = &pHwData->reg; if( pHwData->SurpriseRemove ) return 1; //read the bit16 of register U1B0 Wb35Reg_Read( pHwData, 0x3b0, &reg->U1B0 ); if ((reg->U1B0 & 0x00010000)) { pHwData->CurrentRadioHw = 1; return 1; } else { pHwData->CurrentRadioHw = 0; return 0; } } unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, u32 * pValue ) { if( number < 0x1000 ) number += 0x1000; return Wb35Reg_ReadSync( pHwData, number, pValue ); } unsigned char hal_set_dxx_reg( phw_data_t pHwData, u16 number, u32 value ) { unsigned char ret; if( number < 0x1000 ) number += 0x1000; ret = Wb35Reg_WriteSync( pHwData, number, value ); return ret; } void hal_set_rf_power(phw_data_t pHwData, u8 PowerIndex) { RFSynthesizer_SetPowerIndex( pHwData, PowerIndex ); }
gpl-2.0
MinsooCha/linux
drivers/crypto/ccp/ccp-ops.c
396
52812
/* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/ccp.h> #include <linux/scatterlist.h> #include <crypto/scatterwalk.h> #include <crypto/sha.h> #include "ccp-dev.h" enum ccp_memtype { CCP_MEMTYPE_SYSTEM = 0, CCP_MEMTYPE_KSB, CCP_MEMTYPE_LOCAL, CCP_MEMTYPE__LAST, }; struct ccp_dma_info { dma_addr_t address; unsigned int offset; unsigned int length; enum dma_data_direction dir; }; struct ccp_dm_workarea { struct device *dev; struct dma_pool *dma_pool; unsigned int length; u8 *address; struct ccp_dma_info dma; }; struct ccp_sg_workarea { struct scatterlist *sg; int nents; struct scatterlist *dma_sg; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; unsigned int sg_used; u64 bytes_left; }; struct ccp_data { struct ccp_sg_workarea sg_wa; struct ccp_dm_workarea dm_wa; }; struct ccp_mem { enum ccp_memtype type; union { struct ccp_dma_info dma; u32 ksb; } u; }; struct ccp_aes_op { enum ccp_aes_type type; enum ccp_aes_mode mode; enum ccp_aes_action action; }; struct ccp_xts_aes_op { enum ccp_aes_action action; enum ccp_xts_aes_unit_size unit_size; }; struct ccp_sha_op { enum ccp_sha_type type; u64 msg_bits; }; struct ccp_rsa_op { u32 mod_size; u32 input_len; }; struct ccp_passthru_op { enum ccp_passthru_bitwise bit_mod; enum ccp_passthru_byteswap byte_swap; }; struct ccp_ecc_op { enum ccp_ecc_function function; }; struct ccp_op { struct ccp_cmd_queue *cmd_q; u32 jobid; u32 ioc; u32 soc; u32 ksb_key; u32 ksb_ctx; u32 init; u32 eom; struct ccp_mem src; struct ccp_mem dst; union { struct ccp_aes_op aes; struct ccp_xts_aes_op xts; struct ccp_sha_op sha; struct ccp_rsa_op rsa; struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; } u; }; /* SHA initial context values */ static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), cpu_to_be32(SHA1_H4), 0, 0, 0, }; static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), }; static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; /* The CCP cannot perform zero-length sha operations so the caller * is required to buffer data for the final operation. However, a * sha operation for a message with a total length of zero is valid * so known values are required to supply the result. */ static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = { 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00, }; static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = { 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, }; static u32 ccp_addr_lo(struct ccp_dma_info *info) { return lower_32_bits(info->address + info->offset); } static u32 ccp_addr_hi(struct ccp_dma_info *info) { return upper_32_bits(info->address + info->offset) & 0x0000ffff; } static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) { struct ccp_cmd_queue *cmd_q = op->cmd_q; struct ccp_device *ccp = cmd_q->ccp; void __iomem *cr_addr; u32 cr0, cmd; unsigned int i; int ret = 0; /* We could read a status register to see how many free slots * are actually available, but reading that register resets it * and you could lose some error information. */ cmd_q->free_slots--; cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT) | (op->jobid << REQ0_JOBID_SHIFT) | REQ0_WAIT_FOR_WRITE; if (op->soc) cr0 |= REQ0_STOP_ON_COMPLETE | REQ0_INT_ON_COMPLETE; if (op->ioc || !cmd_q->free_slots) cr0 |= REQ0_INT_ON_COMPLETE; /* Start at CMD_REQ1 */ cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR; mutex_lock(&ccp->req_mutex); /* Write CMD_REQ1 through CMD_REQx first */ for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR) iowrite32(*(cr + i), cr_addr); /* Tell the CCP to start */ wmb(); iowrite32(cr0, ccp->io_regs + CMD_REQ0); mutex_unlock(&ccp->req_mutex); if (cr0 & REQ0_INT_ON_COMPLETE) { /* Wait for the job to complete */ ret = wait_event_interruptible(cmd_q->int_queue, cmd_q->int_rcvd); if (ret || cmd_q->cmd_error) { /* On error delete all related jobs from the queue */ cmd = (cmd_q->id << DEL_Q_ID_SHIFT) | op->jobid; iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); if (!ret) ret = -EIO; } else if (op->soc) { /* Delete just head job from the queue on SoC */ cmd = DEL_Q_ACTIVE | (cmd_q->id << DEL_Q_ID_SHIFT) | op->jobid; iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); } cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status); cmd_q->int_rcvd = 0; } return ret; } static int ccp_perform_aes(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT) | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) | (op->ksb_key << REQ1_KEY_KSB_SHIFT); cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); if (op->u.aes.mode == CCP_AES_MODE_CFB) cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT); if (op->eom) cr[0] |= REQ1_EOM; if (op->init) cr[0] |= REQ1_INIT; return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_xts_aes(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) | (op->ksb_key << REQ1_KEY_KSB_SHIFT); cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); if (op->eom) cr[0] |= REQ1_EOM; if (op->init) cr[0] |= REQ1_INIT; return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_sha(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT) | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT) | REQ1_INIT; cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); if (op->eom) { cr[0] |= REQ1_EOM; cr[4] = lower_32_bits(op->u.sha.msg_bits); cr[5] = upper_32_bits(op->u.sha.msg_bits); } else { cr[4] = 0; cr[5] = 0; } return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_rsa(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) | (op->ksb_key << REQ1_KEY_KSB_SHIFT) | REQ1_EOM; cr[1] = op->u.rsa.input_len - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_passthru(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT) | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT) | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT); if (op->src.type == CCP_MEMTYPE_SYSTEM) cr[1] = op->src.u.dma.length - 1; else cr[1] = op->dst.u.dma.length - 1; if (op->src.type == CCP_MEMTYPE_SYSTEM) { cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); } else { cr[2] = op->src.u.ksb * CCP_KSB_BYTES; cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); } if (op->dst.type == CCP_MEMTYPE_SYSTEM) { cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); } else { cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); } if (op->eom) cr[0] |= REQ1_EOM; return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_ecc(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = REQ1_ECC_AFFINE_CONVERT | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT) | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT) | REQ1_EOM; cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) { int start; for (;;) { mutex_lock(&ccp->ksb_mutex); start = (u32)bitmap_find_next_zero_area(ccp->ksb, ccp->ksb_count, ccp->ksb_start, count, 0); if (start <= ccp->ksb_count) { bitmap_set(ccp->ksb, start, count); mutex_unlock(&ccp->ksb_mutex); break; } ccp->ksb_avail = 0; mutex_unlock(&ccp->ksb_mutex); /* Wait for KSB entries to become available */ if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail)) return 0; } return KSB_START + start; } static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start, unsigned int count) { if (!start) return; mutex_lock(&ccp->ksb_mutex); bitmap_clear(ccp->ksb, start - KSB_START, count); ccp->ksb_avail = 1; mutex_unlock(&ccp->ksb_mutex); wake_up_interruptible_all(&ccp->ksb_queue); } static u32 ccp_gen_jobid(struct ccp_device *ccp) { return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; } static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); wa->dma_count = 0; } static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, struct scatterlist *sg, u64 len, enum dma_data_direction dma_dir) { memset(wa, 0, sizeof(*wa)); wa->sg = sg; if (!sg) return 0; wa->nents = sg_nents_for_len(sg, len); if (wa->nents < 0) return wa->nents; wa->bytes_left = len; wa->sg_used = 0; if (len == 0) return 0; if (dma_dir == DMA_NONE) return 0; wa->dma_sg = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); if (!wa->dma_count) return -ENOMEM; return 0; } static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; if (wa->sg_used == wa->sg->length) { wa->sg = sg_next(wa->sg); wa->sg_used = 0; } } static void ccp_dm_free(struct ccp_dm_workarea *wa) { if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { if (wa->address) dma_pool_free(wa->dma_pool, wa->address, wa->dma.address); } else { if (wa->dma.address) dma_unmap_single(wa->dev, wa->dma.address, wa->length, wa->dma.dir); kfree(wa->address); } wa->address = NULL; wa->dma.address = 0; } static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, struct ccp_cmd_queue *cmd_q, unsigned int len, enum dma_data_direction dir) { memset(wa, 0, sizeof(*wa)); if (!len) return 0; wa->dev = cmd_q->ccp->dev; wa->length = len; if (len <= CCP_DMAPOOL_MAX_SIZE) { wa->dma_pool = cmd_q->dma_pool; wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, &wa->dma.address); if (!wa->address) return -ENOMEM; wa->dma.length = CCP_DMAPOOL_MAX_SIZE; memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE); } else { wa->address = kzalloc(len, GFP_KERNEL); if (!wa->address) return -ENOMEM; wa->dma.address = dma_map_single(wa->dev, wa->address, len, dir); if (!wa->dma.address) return -ENOMEM; wa->dma.length = len; } wa->dma.dir = dir; return 0; } static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { WARN_ON(!wa->address); scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 0); } static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { WARN_ON(!wa->address); scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 1); } static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, struct scatterlist *sg, unsigned int len, unsigned int se_len, bool sign_extend) { unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; u8 buffer[CCP_REVERSE_BUF_SIZE]; BUG_ON(se_len > sizeof(buffer)); sg_offset = len; dm_offset = 0; nbytes = len; while (nbytes) { ksb_len = min_t(unsigned int, nbytes, se_len); sg_offset -= ksb_len; scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0); for (i = 0; i < ksb_len; i++) wa->address[dm_offset + i] = buffer[ksb_len - i - 1]; dm_offset += ksb_len; nbytes -= ksb_len; if ((ksb_len != se_len) && sign_extend) { /* Must sign-extend to nearest sign-extend length */ if (wa->address[dm_offset - 1] & 0x80) memset(wa->address + dm_offset, 0xff, se_len - ksb_len); } } } static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, struct scatterlist *sg, unsigned int len) { unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; u8 buffer[CCP_REVERSE_BUF_SIZE]; sg_offset = 0; dm_offset = len; nbytes = len; while (nbytes) { ksb_len = min_t(unsigned int, nbytes, sizeof(buffer)); dm_offset -= ksb_len; for (i = 0; i < ksb_len; i++) buffer[ksb_len - i - 1] = wa->address[dm_offset + i]; scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1); sg_offset += ksb_len; nbytes -= ksb_len; } } static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) { ccp_dm_free(&data->dm_wa); ccp_sg_free(&data->sg_wa); } static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, struct scatterlist *sg, u64 sg_len, unsigned int dm_len, enum dma_data_direction dir) { int ret; memset(data, 0, sizeof(*data)); ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, dir); if (ret) goto e_err; ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); if (ret) goto e_err; return 0; e_err: ccp_free_data(data, cmd_q); return ret; } static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) { struct ccp_sg_workarea *sg_wa = &data->sg_wa; struct ccp_dm_workarea *dm_wa = &data->dm_wa; unsigned int buf_count, nbytes; /* Clear the buffer if setting it */ if (!from) memset(dm_wa->address, 0, dm_wa->length); if (!sg_wa->sg) return 0; /* Perform the copy operation * nbytes will always be <= UINT_MAX because dm_wa->length is * an unsigned int */ nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, nbytes, from); /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { nbytes = min(sg_wa->sg->length - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); buf_count += nbytes; ccp_update_sg_workarea(sg_wa, nbytes); } return buf_count; } static unsigned int ccp_fill_queue_buf(struct ccp_data *data) { return ccp_queue_buf(data, 0); } static unsigned int ccp_empty_queue_buf(struct ccp_data *data) { return ccp_queue_buf(data, 1); } static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, struct ccp_op *op, unsigned int block_size, bool blocksize_op) { unsigned int sg_src_len, sg_dst_len, op_len; /* The CCP can only DMA from/to one address each per operation. This * requires that we find the smallest DMA area between the source * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { op_len = sg_src_len; } /* The data operation length will be at least block_size in length * or the smaller of available sg room remaining for the source or * the destination */ op_len = max(op_len, block_size); /* Unless we have to buffer data, there's no reason to wait */ op->soc = 0; if (sg_src_len < block_size) { /* Not enough data in the sg element, so it * needs to be buffered into a blocksize chunk */ int cp_len = ccp_fill_queue_buf(src); op->soc = 1; op->src.u.dma.address = src->dm_wa.dma.address; op->src.u.dma.offset = 0; op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; } else { /* Enough data in the sg element, but we need to * adjust for any previously copied data */ op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); } if (dst) { if (sg_dst_len < block_size) { /* Not enough room in the sg element or we're on the * last piece of data (when using padding), so the * output needs to be buffered into a blocksize chunk */ op->soc = 1; op->dst.u.dma.address = dst->dm_wa.dma.address; op->dst.u.dma.offset = 0; op->dst.u.dma.length = op->src.u.dma.length; } else { /* Enough room in the sg element, but we need to * adjust for any previously used area */ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } } } static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, struct ccp_op *op) { op->init = 0; if (dst) { if (op->dst.u.dma.address == dst->dm_wa.dma.address) ccp_empty_queue_buf(dst); else ccp_update_sg_workarea(&dst->sg_wa, op->dst.u.dma.length); } } static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, u32 byte_swap, bool from) { struct ccp_op op; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = jobid; op.eom = 1; if (from) { op.soc = 1; op.src.type = CCP_MEMTYPE_KSB; op.src.u.ksb = ksb; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = wa->dma.address; op.dst.u.dma.length = wa->length; } else { op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = wa->dma.address; op.src.u.dma.length = wa->length; op.dst.type = CCP_MEMTYPE_KSB; op.dst.u.ksb = ksb; } op.u.passthru.byte_swap = byte_swap; return ccp_perform_passthru(&op); } static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, u32 byte_swap) { return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false); } static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, u32 byte_swap) { return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true); } static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; struct ccp_data src; struct ccp_op op; unsigned int dm_offset; int ret; if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (aes->src_len & (AES_BLOCK_SIZE - 1)) return -EINVAL; if (aes->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->key || !aes->iv || !aes->src) return -EINVAL; if (aes->cmac_final) { if (aes->cmac_key_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->cmac_key) return -EINVAL; } BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); op.ksb_key = cmd_q->ksb_key; op.ksb_ctx = cmd_q->ksb_ctx; op.init = 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; /* All supported key sizes fit in a single (32-byte) KSB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_KSB_BYTES - aes->key_len; ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) KSB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } /* Send data to the CCP AES engine */ ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (ret) goto e_ctx; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); if (aes->cmac_final && !src.sg_wa.bytes_left) { op.eom = 1; /* Push the K1/K2 key to the CCP now */ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, aes->cmac_key_len); ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } } ret = ccp_perform_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } ccp_process_data(&src, NULL, &op); } /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int dm_offset; bool in_place = false; int ret; if (aes->mode == CCP_AES_MODE_CMAC) return ccp_run_aes_cmac_cmd(cmd_q, cmd); if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (((aes->mode == CCP_AES_MODE_ECB) || (aes->mode == CCP_AES_MODE_CBC) || (aes->mode == CCP_AES_MODE_CFB)) && (aes->src_len & (AES_BLOCK_SIZE - 1))) return -EINVAL; if (!aes->key || !aes->src || !aes->dst) return -EINVAL; if (aes->mode != CCP_AES_MODE_ECB) { if (aes->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->iv) return -EINVAL; } BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); op.ksb_key = cmd_q->ksb_key; op.ksb_ctx = cmd_q->ksb_ctx; op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; /* All supported key sizes fit in a single (32-byte) KSB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_KSB_BYTES - aes->key_len; ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) KSB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; if (aes->mode != CCP_AES_MODE_ECB) { /* Load the AES context - conver to LE */ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(aes->src) == sg_virt(aes->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, AES_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, AES_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP AES engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { op.eom = 1; /* Since we don't retrieve the AES context in ECB * mode we have to wait for the operation to complete * on the last piece of data */ if (aes->mode == CCP_AES_MODE_ECB) op.soc = 1; } ret = ccp_perform_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } if (aes->mode != CCP_AES_MODE_ECB) { /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_xts_aes_engine *xts = &cmd->u.xts; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int unit_size, dm_offset; bool in_place = false; int ret; switch (xts->unit_size) { case CCP_XTS_AES_UNIT_SIZE_16: unit_size = 16; break; case CCP_XTS_AES_UNIT_SIZE_512: unit_size = 512; break; case CCP_XTS_AES_UNIT_SIZE_1024: unit_size = 1024; break; case CCP_XTS_AES_UNIT_SIZE_2048: unit_size = 2048; break; case CCP_XTS_AES_UNIT_SIZE_4096: unit_size = 4096; break; default: return -EINVAL; } if (xts->key_len != AES_KEYSIZE_128) return -EINVAL; if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) return -EINVAL; if (xts->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!xts->key || !xts->iv || !xts->src || !xts->dst) return -EINVAL; BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1); BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); op.ksb_key = cmd_q->ksb_key; op.ksb_ctx = cmd_q->ksb_ctx; op.init = 1; op.u.xts.action = xts->action; op.u.xts.unit_size = xts->unit_size; /* All supported key sizes fit in a single (32-byte) KSB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128; ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) KSB entry and * for XTS is already in little endian format so no byte swapping * is needed. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(xts->src) == sg_virt(xts->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, unit_size, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, unit_size, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP AES engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, unit_size, true); if (!src.sg_wa.bytes_left) op.eom = 1; ret = ccp_perform_xts_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sha_engine *sha = &cmd->u.sha; struct ccp_dm_workarea ctx; struct ccp_data src; struct ccp_op op; int ret; if (sha->ctx_len != CCP_SHA_CTXSIZE) return -EINVAL; if (!sha->ctx) return -EINVAL; if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1))) return -EINVAL; if (!sha->src_len) { const u8 *sha_zero; /* Not final, just return */ if (!sha->final) return 0; /* CCP can't do a zero length sha operation so the caller * must buffer the data. */ if (sha->msg_bits) return -EINVAL; /* A sha operation for a message with a total length of zero, * return known result. */ switch (sha->type) { case CCP_SHA_TYPE_1: sha_zero = ccp_sha1_zero; break; case CCP_SHA_TYPE_224: sha_zero = ccp_sha224_zero; break; case CCP_SHA_TYPE_256: sha_zero = ccp_sha256_zero; break; default: return -EINVAL; } scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, sha->ctx_len, 1); return 0; } if (!sha->src) return -EINVAL; BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); op.ksb_ctx = cmd_q->ksb_ctx; op.u.sha.type = sha->type; op.u.sha.msg_bits = sha->msg_bits; /* The SHA context fits in a single (32-byte) KSB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_SHA_KSB_COUNT * CCP_KSB_BYTES, DMA_BIDIRECTIONAL); if (ret) return ret; if (sha->first) { const __be32 *init; switch (sha->type) { case CCP_SHA_TYPE_1: init = ccp_sha1_init; break; case CCP_SHA_TYPE_224: init = ccp_sha224_init; break; case CCP_SHA_TYPE_256: init = ccp_sha256_init; break; default: ret = -EINVAL; goto e_ctx; } memcpy(ctx.address, init, CCP_SHA_CTXSIZE); } else { ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); } ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } /* Send data to the CCP SHA engine */ ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE); if (ret) goto e_ctx; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false); if (sha->final && !src.sg_wa.bytes_left) op.eom = 1; ret = ccp_perform_sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } ccp_process_data(&src, NULL, &op); } /* Retrieve the SHA context - convert from LE to BE using * 32-byte (256-bit) byteswapping to BE */ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); if (sha->final && sha->opad) { /* HMAC operation, recursively perform final SHA */ struct ccp_cmd hmac_cmd; struct scatterlist sg; u64 block_size, digest_size; u8 *hmac_buf; switch (sha->type) { case CCP_SHA_TYPE_1: block_size = SHA1_BLOCK_SIZE; digest_size = SHA1_DIGEST_SIZE; break; case CCP_SHA_TYPE_224: block_size = SHA224_BLOCK_SIZE; digest_size = SHA224_DIGEST_SIZE; break; case CCP_SHA_TYPE_256: block_size = SHA256_BLOCK_SIZE; digest_size = SHA256_DIGEST_SIZE; break; default: ret = -EINVAL; goto e_data; } if (sha->opad_len != block_size) { ret = -EINVAL; goto e_data; } hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); if (!hmac_buf) { ret = -ENOMEM; goto e_data; } sg_init_one(&sg, hmac_buf, block_size + digest_size); scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); memcpy(hmac_buf + block_size, ctx.address, digest_size); memset(&hmac_cmd, 0, sizeof(hmac_cmd)); hmac_cmd.engine = CCP_ENGINE_SHA; hmac_cmd.u.sha.type = sha->type; hmac_cmd.u.sha.ctx = sha->ctx; hmac_cmd.u.sha.ctx_len = sha->ctx_len; hmac_cmd.u.sha.src = &sg; hmac_cmd.u.sha.src_len = block_size + digest_size; hmac_cmd.u.sha.opad = NULL; hmac_cmd.u.sha.opad_len = 0; hmac_cmd.u.sha.first = 1; hmac_cmd.u.sha.final = 1; hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); if (ret) cmd->engine_error = hmac_cmd.engine_error; kfree(hmac_buf); } e_data: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); return ret; } static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; struct ccp_dm_workarea exp, src; struct ccp_data dst; struct ccp_op op; unsigned int ksb_count, i_len, o_len; int ret; if (rsa->key_size > CCP_RSA_MAX_WIDTH) return -EINVAL; if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) return -EINVAL; /* The RSA modulus must precede the message being acted upon, so * it must be copied to a DMA area where the message and the * modulus can be concatenated. Therefore the input buffer * length required is twice the output buffer length (which * must be a multiple of 256-bits). */ o_len = ((rsa->key_size + 255) / 256) * 32; i_len = o_len * 2; ksb_count = o_len / CCP_KSB_BYTES; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count); if (!op.ksb_key) return -EIO; /* The RSA exponent may span multiple (32-byte) KSB entries and must * be in little endian format. Reverse copy each 32-byte chunk * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) * and each byte within that chunk and do not perform any byte swap * operations on the passthru operation. */ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); if (ret) goto e_ksb; ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES, false); ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_exp; } /* Concatenate the modulus and the message. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted. */ ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); if (ret) goto e_exp; ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES, false); src.address += o_len; /* Adjust the address for the copy operation */ ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES, false); src.address -= o_len; /* Reset the address to original value */ /* Prepare the output area for the operation */ ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len, o_len, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = i_len; op.dst.u.dma.address = dst.dm_wa.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = o_len; op.u.rsa.mod_size = rsa->key_size; op.u.rsa.input_len = i_len; ret = ccp_perform_rsa(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len); e_dst: ccp_free_data(&dst, cmd_q); e_src: ccp_dm_free(&src); e_exp: ccp_dm_free(&exp); e_ksb: ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count); return ret; } static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_engine *pt = &cmd->u.passthru; struct ccp_dm_workarea mask; struct ccp_data src, dst; struct ccp_op op; bool in_place = false; unsigned int i; int ret; if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) return -EINVAL; if (!pt->src || !pt->dst) return -EINVAL; if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) return -EINVAL; if (!pt->mask) return -EINVAL; } BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ op.ksb_key = cmd_q->ksb_key; ret = ccp_init_dm_workarea(&mask, cmd_q, CCP_PASSTHRU_KSB_COUNT * CCP_KSB_BYTES, DMA_TO_DEVICE); if (ret) return ret; ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_mask; } } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(pt->src) == sg_virt(pt->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, CCP_PASSTHRU_MASKSIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_mask; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP Passthru engine * Because the CCP engine works on a single source and destination * dma address at a time, each entry in the source scatterlist * (after the dma_map_sg call) must be less than or equal to the * (remaining) length in the destination scatterlist entry and the * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE */ dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { ret = -EINVAL; goto e_dst; } if (i == src.sg_wa.dma_count) { op.eom = 1; op.soc = 1; } op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); op.src.u.dma.offset = 0; op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); op.dst.u.dma.offset = dst.sg_wa.sg_used; op.dst.u.dma.length = op.src.u.dma.length; ret = ccp_perform_passthru(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } dst.sg_wa.sg_used += src.sg_wa.sg->length; if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } src.sg_wa.sg = sg_next(src.sg_wa.sg); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_mask: if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) ccp_dm_free(&mask); return ret; } static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_dm_workarea src, dst; struct ccp_op op; int ret; u8 *save; if (!ecc->u.mm.operand_1 || (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) if (!ecc->u.mm.operand_2 || (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (!ecc->u.mm.result || (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted and placed in a * fixed length buffer. */ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, DMA_TO_DEVICE); if (ret) return ret; /* Save the workarea address since it is updated in order to perform * the concatenation */ save = src.address; /* Copy the ECC modulus */ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first operand */ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1, ecc->u.mm.operand_1_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { /* Copy the second operand */ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2, ecc->u.mm.operand_2_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; } /* Restore the workarea address */ src.address = save; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = src.length; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = dst.length; op.u.ecc.function = cmd->u.ecc.function; ret = ccp_perform_ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ecc->ecc_result = le16_to_cpup( (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { ret = -EIO; goto e_dst; } /* Save the ECC result */ ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES); e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); return ret; } static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_dm_workarea src, dst; struct ccp_op op; int ret; u8 *save; if (!ecc->u.pm.point_1.x || (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_1.y || (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { if (!ecc->u.pm.point_2.x || (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_2.y || (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } else { if (!ecc->u.pm.domain_a || (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) if (!ecc->u.pm.scalar || (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } if (!ecc->u.pm.result.x || (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.result.y || (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted and placed in a * fixed length buffer. */ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, DMA_TO_DEVICE); if (ret) return ret; /* Save the workarea address since it is updated in order to perform * the concatenation */ save = src.address; /* Copy the ECC modulus */ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first point X and Y coordinate */ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x, ecc->u.pm.point_1.x_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y, ecc->u.pm.point_1.y_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Set the first point Z coordianate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { /* Copy the second point X and Y coordinate */ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x, ecc->u.pm.point_2.x_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y, ecc->u.pm.point_2.y_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; /* Set the second point Z coordianate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; } else { /* Copy the Domain "a" parameter */ ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a, ecc->u.pm.domain_a_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { /* Copy the scalar value */ ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar, ecc->u.pm.scalar_len, CCP_ECC_OPERAND_SIZE, false); src.address += CCP_ECC_OPERAND_SIZE; } } /* Restore the workarea address */ src.address = save; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = src.length; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = dst.length; op.u.ecc.function = cmd->u.ecc.function; ret = ccp_perform_ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ecc->ecc_result = le16_to_cpup( (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { ret = -EIO; goto e_dst; } /* Save the workarea address since it is updated as we walk through * to copy the point math result */ save = dst.address; /* Save the ECC result X and Y coordinates */ ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x, CCP_ECC_MODULUS_BYTES); dst.address += CCP_ECC_OUTPUT_SIZE; ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y, CCP_ECC_MODULUS_BYTES); dst.address += CCP_ECC_OUTPUT_SIZE; /* Restore the workarea address */ dst.address = save; e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); return ret; } static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; ecc->ecc_result = 0; if (!ecc->mod || (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; switch (ecc->function) { case CCP_ECC_FUNCTION_MMUL_384BIT: case CCP_ECC_FUNCTION_MADD_384BIT: case CCP_ECC_FUNCTION_MINV_384BIT: return ccp_run_ecc_mm_cmd(cmd_q, cmd); case CCP_ECC_FUNCTION_PADD_384BIT: case CCP_ECC_FUNCTION_PMUL_384BIT: case CCP_ECC_FUNCTION_PDBL_384BIT: return ccp_run_ecc_pm_cmd(cmd_q, cmd); default: return -EINVAL; } } int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; cmd->engine_error = 0; cmd_q->cmd_error = 0; cmd_q->int_rcvd = 0; cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); switch (cmd->engine) { case CCP_ENGINE_AES: ret = ccp_run_aes_cmd(cmd_q, cmd); break; case CCP_ENGINE_XTS_AES_128: ret = ccp_run_xts_aes_cmd(cmd_q, cmd); break; case CCP_ENGINE_SHA: ret = ccp_run_sha_cmd(cmd_q, cmd); break; case CCP_ENGINE_RSA: ret = ccp_run_rsa_cmd(cmd_q, cmd); break; case CCP_ENGINE_PASSTHRU: ret = ccp_run_passthru_cmd(cmd_q, cmd); break; case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; default: ret = -EINVAL; } return ret; }
gpl-2.0
nexusexperience/nx_kernel_xiaomi_msm8916
drivers/platform/msm/ipa/ipa_flt.c
396
37215
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "ipa_i.h" #define IPA_FLT_TABLE_WORD_SIZE (4) #define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3) #define IPA_FLT_BIT_MASK (0x1) #define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1) #define IPA_FLT_STATUS_OF_ADD_FAILED (-1) #define IPA_FLT_STATUS_OF_DEL_FAILED (-1) #define IPA_FLT_STATUS_OF_MDFY_FAILED (-1) static int ipa_generate_hw_rule_from_eq( const struct ipa_ipfltri_rule_eq *attrib, u8 **buf) { int num_offset_meq_32 = attrib->num_offset_meq_32; int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16; int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32; int num_offset_meq_128 = attrib->num_offset_meq_128; int i; if (attrib->tos_eq_present) { *buf = ipa_write_8(attrib->tos_eq, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->protocol_eq_present) { *buf = ipa_write_8(attrib->protocol_eq, *buf); *buf = ipa_pad_to_32(*buf); } if (num_offset_meq_32) { *buf = ipa_write_8(attrib->offset_meq_32[0].offset, *buf); *buf = ipa_write_32(attrib->offset_meq_32[0].mask, *buf); *buf = ipa_write_32(attrib->offset_meq_32[0].value, *buf); *buf = ipa_pad_to_32(*buf); num_offset_meq_32--; } if (num_offset_meq_32) { *buf = ipa_write_8(attrib->offset_meq_32[1].offset, *buf); *buf = ipa_write_32(attrib->offset_meq_32[1].mask, *buf); *buf = ipa_write_32(attrib->offset_meq_32[1].value, *buf); *buf = ipa_pad_to_32(*buf); num_offset_meq_32--; } if (num_ihl_offset_range_16) { *buf = ipa_write_8(attrib->ihl_offset_range_16[0].offset, *buf); *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_high, *buf); *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_low, *buf); *buf = ipa_pad_to_32(*buf); num_ihl_offset_range_16--; } if (num_ihl_offset_range_16) { *buf = ipa_write_8(attrib->ihl_offset_range_16[1].offset, *buf); *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_high, *buf); *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_low, *buf); *buf = ipa_pad_to_32(*buf); num_ihl_offset_range_16--; } if (attrib->ihl_offset_eq_16_present) { *buf = ipa_write_8(attrib->ihl_offset_eq_16.offset, *buf); *buf = ipa_write_16(attrib->ihl_offset_eq_16.value, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->ihl_offset_eq_32_present) { *buf = ipa_write_8(attrib->ihl_offset_eq_32.offset, *buf); *buf = ipa_write_32(attrib->ihl_offset_eq_32.value, *buf); *buf = ipa_pad_to_32(*buf); } if (num_ihl_offset_meq_32) { *buf = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, *buf); *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, *buf); *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].value, *buf); *buf = ipa_pad_to_32(*buf); num_ihl_offset_meq_32--; } /* TODO check layout of 16 byte mask and value */ if (num_offset_meq_128) { *buf = ipa_write_8(attrib->offset_meq_128[0].offset, *buf); for (i = 0; i < 16; i++) *buf = ipa_write_8(attrib->offset_meq_128[0].mask[i], *buf); for (i = 0; i < 16; i++) *buf = ipa_write_8(attrib->offset_meq_128[0].value[i], *buf); *buf = ipa_pad_to_32(*buf); num_offset_meq_128--; } if (num_offset_meq_128) { *buf = ipa_write_8(attrib->offset_meq_128[1].offset, *buf); for (i = 0; i < 16; i++) *buf = ipa_write_8(attrib->offset_meq_128[1].mask[i], *buf); for (i = 0; i < 16; i++) *buf = ipa_write_8(attrib->offset_meq_128[1].value[i], *buf); *buf = ipa_pad_to_32(*buf); num_offset_meq_128--; } if (attrib->tc_eq_present) { *buf = ipa_write_8(attrib->tc_eq, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->fl_eq_present) { *buf = ipa_write_32(attrib->fl_eq, *buf); *buf = ipa_pad_to_32(*buf); } if (num_ihl_offset_meq_32) { *buf = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, *buf); *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, *buf); *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].value, *buf); *buf = ipa_pad_to_32(*buf); num_ihl_offset_meq_32--; } if (attrib->metadata_meq32_present) { *buf = ipa_write_8(attrib->metadata_meq32.offset, *buf); *buf = ipa_write_32(attrib->metadata_meq32.mask, *buf); *buf = ipa_write_32(attrib->metadata_meq32.value, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->ipv4_frag_eq_present) *buf = ipa_pad_to_32(*buf); return 0; } /** * ipa_generate_flt_hw_rule() - generates the filtering hardware rule * @ip: the ip address family type * @entry: routing entry * @buf: output buffer, buf == NULL means * caller wants to know the size of the rule as seen * by HW so they did not pass a valid buffer, we will use a * scratch buffer instead. * With this scheme we are going to * generate the rule twice, once to know size using scratch * buffer and second to write the rule to the actual caller * supplied buffer which is of required size * * Returns: 0 on success, negative on failure * * caller needs to hold any needed locks to ensure integrity * */ static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip, struct ipa_flt_entry *entry, u8 *buf) { struct ipa_flt_rule_hw_hdr *hdr; const struct ipa_flt_rule *rule = (const struct ipa_flt_rule *)&entry->rule; u16 en_rule = 0; u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; u8 *start; if (buf == NULL) { memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE); buf = (u8 *)tmp; } start = buf; hdr = (struct ipa_flt_rule_hw_hdr *)buf; hdr->u.hdr.action = entry->rule.action; hdr->u.hdr.retain_hdr = entry->rule.retain_hdr; hdr->u.hdr.to_uc = entry->rule.to_uc; if (entry->rt_tbl) hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx; else hdr->u.hdr.rt_tbl_idx = entry->rule.rt_tbl_idx; hdr->u.hdr.rsvd = 0; buf += sizeof(struct ipa_flt_rule_hw_hdr); if (rule->eq_attrib_type) { if (ipa_generate_hw_rule_from_eq(&rule->eq_attrib, &buf)) { IPAERR("fail to generate hw rule\n"); return -EPERM; } en_rule = rule->eq_attrib.rule_eq_bitmap; } else { if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) { IPAERR("fail to generate hw rule\n"); return -EPERM; } } IPADBG("en_rule 0x%x, action=%d, rt_idx=%d, uc=%d, retain_hdr=%d\n", en_rule, hdr->u.hdr.action, hdr->u.hdr.rt_tbl_idx, hdr->u.hdr.to_uc, hdr->u.hdr.retain_hdr); hdr->u.hdr.en_rule = en_rule; ipa_write_32(hdr->u.word, (u8 *)hdr); if (entry->hw_len == 0) { entry->hw_len = buf - start; } else if (entry->hw_len != (buf - start)) { IPAERR("hw_len differs b/w passes passed=%x calc=%td\n", entry->hw_len, (buf - start)); return -EPERM; } return 0; } /** * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table * @ip: the ip address family type * @hdr_sz: header size * * Returns: 0 on success, negative on failure * * caller needs to hold any needed locks to ensure integrity * */ static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz) { struct ipa_flt_tbl *tbl; struct ipa_flt_entry *entry; u32 total_sz = 0; u32 rule_set_sz; int i; *hdr_sz = 0; tbl = &ipa_ctx->glob_flt_tbl[ip]; rule_set_sz = 0; list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { if (ipa_generate_flt_hw_rule(ip, entry, NULL)) { IPAERR("failed to find HW FLT rule size\n"); return -EPERM; } IPADBG("glob ip %d len %d\n", ip, entry->hw_len); rule_set_sz += entry->hw_len; } if (rule_set_sz) { tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE; /* this rule-set uses a word in header block */ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; if (!tbl->in_sys) { /* add the terminator */ total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE); total_sz = (total_sz + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) & ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; } } for (i = 0; i < IPA_NUM_PIPES; i++) { tbl = &ipa_ctx->flt_tbl[i][ip]; rule_set_sz = 0; list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { if (ipa_generate_flt_hw_rule(ip, entry, NULL)) { IPAERR("failed to find HW FLT rule size\n"); return -EPERM; } IPADBG("pipe %d len %d\n", i, entry->hw_len); rule_set_sz += entry->hw_len; } if (rule_set_sz) { tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE; /* this rule-set uses a word in header block */ *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; if (!tbl->in_sys) { /* add the terminator */ total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE); total_sz = (total_sz + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) & ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; } } } *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; total_sz += *hdr_sz; IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip); return total_sz; } static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr, u32 body_start_offset, u8 *hdr2, u32 *hdr_top) { struct ipa_flt_tbl *tbl; struct ipa_flt_entry *entry; int i; u32 offset; u8 *body; struct ipa_mem_buffer flt_tbl_mem; u8 *ftbl_membody; *hdr_top = 0; body = base; #define IPA_WRITE_FLT_HDR(idx, val) { \ if (idx <= 5) { \ *((u32 *)hdr + 1 + idx) = val; \ } else if (idx >= 6 && idx <= 10) { \ WARN_ON(1); \ } else if (idx >= 11 && idx <= 19) { \ *((u32 *)hdr2 + idx - 11) = val; \ } else { \ WARN_ON(1); \ } \ } tbl = &ipa_ctx->glob_flt_tbl[ip]; if (!list_empty(&tbl->head_flt_rule_list)) { *hdr_top |= IPA_FLT_BIT_MASK; if (!tbl->in_sys) { offset = body - base + body_start_offset; if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) { IPAERR("offset is not word multiple %d\n", offset); goto proc_err; } offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; /* rule is at an offset from base */ offset |= IPA_FLT_BIT_MASK; if (hdr2) *(u32 *)hdr = offset; else hdr = ipa_write_32(offset, hdr); /* generate the rule-set */ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { if (ipa_generate_flt_hw_rule(ip, entry, body)) { IPAERR("failed to gen HW FLT rule\n"); goto proc_err; } body += entry->hw_len; } /* write the rule-set terminator */ body = ipa_write_32(0, body); if ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) /* advance body to next word boundary */ body = body + (IPA_FLT_TABLE_WORD_SIZE - ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)); } else { WARN_ON(tbl->sz == 0); /* allocate memory for the flt tbl */ flt_tbl_mem.size = tbl->sz; flt_tbl_mem.base = dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size, &flt_tbl_mem.phys_base, GFP_KERNEL); if (!flt_tbl_mem.base) { IPAERR("fail to alloc DMA buff of size %d\n", flt_tbl_mem.size); WARN_ON(1); goto proc_err; } WARN_ON(flt_tbl_mem.phys_base & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT); ftbl_membody = flt_tbl_mem.base; memset(flt_tbl_mem.base, 0, flt_tbl_mem.size); if (hdr2) *(u32 *)hdr = flt_tbl_mem.phys_base; else hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr); /* generate the rule-set */ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { if (ipa_generate_flt_hw_rule(ip, entry, ftbl_membody)) { IPAERR("failed to gen HW FLT rule\n"); WARN_ON(1); } ftbl_membody += entry->hw_len; } /* write the rule-set terminator */ ftbl_membody = ipa_write_32(0, ftbl_membody); if (tbl->curr_mem.phys_base) { WARN_ON(tbl->prev_mem.phys_base); tbl->prev_mem = tbl->curr_mem; } tbl->curr_mem = flt_tbl_mem; } } for (i = 0; i < IPA_NUM_PIPES; i++) { tbl = &ipa_ctx->flt_tbl[i][ip]; if (!list_empty(&tbl->head_flt_rule_list)) { /* pipe "i" is at bit "i+1" */ *hdr_top |= (1 << (i + 1)); if (!tbl->in_sys) { offset = body - base + body_start_offset; if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) { IPAERR("ofst is not word multiple %d\n", offset); goto proc_err; } offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; /* rule is at an offset from base */ offset |= IPA_FLT_BIT_MASK; if (hdr2) IPA_WRITE_FLT_HDR(i, offset) else hdr = ipa_write_32(offset, hdr); /* generate the rule-set */ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { if (ipa_generate_flt_hw_rule(ip, entry, body)) { IPAERR("fail gen FLT rule\n"); goto proc_err; } body += entry->hw_len; } /* write the rule-set terminator */ body = ipa_write_32(0, body); if ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) /* advance body to next word boundary */ body = body + (IPA_FLT_TABLE_WORD_SIZE - ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)); } else { WARN_ON(tbl->sz == 0); /* allocate memory for the flt tbl */ flt_tbl_mem.size = tbl->sz; flt_tbl_mem.base = dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size, &flt_tbl_mem.phys_base, GFP_KERNEL); if (!flt_tbl_mem.base) { IPAERR("fail alloc DMA buff size %d\n", flt_tbl_mem.size); WARN_ON(1); goto proc_err; } WARN_ON(flt_tbl_mem.phys_base & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT); ftbl_membody = flt_tbl_mem.base; memset(flt_tbl_mem.base, 0, flt_tbl_mem.size); if (hdr2) IPA_WRITE_FLT_HDR(i, flt_tbl_mem.phys_base) else hdr = ipa_write_32( flt_tbl_mem.phys_base, hdr); /* generate the rule-set */ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { if (ipa_generate_flt_hw_rule(ip, entry, ftbl_membody)) { IPAERR("fail gen FLT rule\n"); WARN_ON(1); } ftbl_membody += entry->hw_len; } /* write the rule-set terminator */ ftbl_membody = ipa_write_32(0, ftbl_membody); if (tbl->curr_mem.phys_base) { WARN_ON(tbl->prev_mem.phys_base); tbl->prev_mem = tbl->curr_mem; } tbl->curr_mem = flt_tbl_mem; } } } return 0; proc_err: return -EPERM; } /** * ipa_generate_flt_hw_tbl() - generates the filtering hardware table * @ip: [in] the ip address family type * @mem: [out] buffer to put the filtering table * * Returns: 0 on success, negative on failure */ static int ipa_generate_flt_hw_tbl_v1(enum ipa_ip_type ip, struct ipa_mem_buffer *mem) { u32 hdr_top = 0; u32 hdr_sz; u8 *hdr; u8 *body; u8 *base; mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz); mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size); if (mem->size == 0) { IPAERR("flt tbl empty ip=%d\n", ip); goto error; } mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size, &mem->phys_base, GFP_KERNEL); if (!mem->base) { IPAERR("fail to alloc DMA buff of size %d\n", mem->size); goto error; } memset(mem->base, 0, mem->size); /* build the flt tbl in the DMA buffer to submit to IPA HW */ base = hdr = (u8 *)mem->base; body = base + hdr_sz; /* write a dummy header to move cursor */ hdr = ipa_write_32(hdr_top, hdr); if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, 0, &hdr_top)) { IPAERR("fail to generate FLT HW table\n"); goto proc_err; } /* now write the hdr_top */ ipa_write_32(hdr_top, base); IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); return 0; proc_err: dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); error: return -EPERM; } static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip) { struct ipa_flt_tbl *tbl; int i; tbl = &ipa_ctx->glob_flt_tbl[ip]; if (tbl->prev_mem.phys_base) { IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip); dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, tbl->prev_mem.base, tbl->prev_mem.phys_base); memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); } if (list_empty(&tbl->head_flt_rule_list)) { if (tbl->curr_mem.phys_base) { IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip); dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size, tbl->curr_mem.base, tbl->curr_mem.phys_base); memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem)); } } for (i = 0; i < IPA_NUM_PIPES; i++) { tbl = &ipa_ctx->flt_tbl[i][ip]; if (tbl->prev_mem.phys_base) { IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip); dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, tbl->prev_mem.base, tbl->prev_mem.phys_base); memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); } if (list_empty(&tbl->head_flt_rule_list)) { if (tbl->curr_mem.phys_base) { IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n", i, ip); dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size, tbl->curr_mem.base, tbl->curr_mem.phys_base); memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem)); } } } } int __ipa_commit_flt_v1(enum ipa_ip_type ip) { struct ipa_desc desc = { 0 }; struct ipa_mem_buffer *mem; void *cmd; struct ipa_ip_v4_filter_init *v4; struct ipa_ip_v6_filter_init *v6; u16 avail; u16 size; mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL); if (!mem) { IPAERR("failed to alloc memory object\n"); goto fail_alloc_mem; } if (ip == IPA_IP_v4) { avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_MEM_v1_RAM_V4_FLT_SIZE : IPA_MEM_PART(v4_flt_size_ddr); size = sizeof(struct ipa_ip_v4_filter_init); } else { avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_MEM_v1_RAM_V6_FLT_SIZE : IPA_MEM_PART(v6_flt_size_ddr); size = sizeof(struct ipa_ip_v6_filter_init); } cmd = kmalloc(size, GFP_KERNEL); if (!cmd) { IPAERR("failed to alloc immediate command object\n"); goto fail_alloc_cmd; } if (ipa_generate_flt_hw_tbl_v1(ip, mem)) { IPAERR("fail to generate FLT HW TBL ip %d\n", ip); goto fail_hw_tbl_gen; } if (mem->size > avail) { IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail); goto fail_send_cmd; } if (ip == IPA_IP_v4) { v4 = (struct ipa_ip_v4_filter_init *)cmd; desc.opcode = IPA_IP_V4_FILTER_INIT; v4->ipv4_rules_addr = mem->phys_base; v4->size_ipv4_rules = mem->size; v4->ipv4_addr = IPA_MEM_v1_RAM_V4_FLT_OFST; } else { v6 = (struct ipa_ip_v6_filter_init *)cmd; desc.opcode = IPA_IP_V6_FILTER_INIT; v6->ipv6_rules_addr = mem->phys_base; v6->size_ipv6_rules = mem->size; v6->ipv6_addr = IPA_MEM_v1_RAM_V6_FLT_OFST; } desc.pyld = cmd; desc.len = size; desc.type = IPA_IMM_CMD_DESC; IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); if (ipa_send_cmd(1, &desc)) { IPAERR("fail to send immediate command\n"); goto fail_send_cmd; } __ipa_reap_sys_flt_tbls(ip); dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); kfree(cmd); kfree(mem); return 0; fail_send_cmd: if (mem->phys_base) dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); fail_hw_tbl_gen: kfree(cmd); fail_alloc_cmd: kfree(mem); fail_alloc_mem: return -EPERM; } static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip, struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head1, struct ipa_mem_buffer *head2) { int i; u32 hdr_sz; int num_words; u32 *entr; u32 body_start_offset; u32 hdr_top; if (ip == IPA_IP_v4) body_start_offset = IPA_MEM_PART(apps_v4_flt_ofst) - IPA_MEM_PART(v4_flt_ofst); else body_start_offset = IPA_MEM_PART(apps_v6_flt_ofst) - IPA_MEM_PART(v6_flt_ofst); num_words = 7; head1->size = num_words * 4; head1->base = dma_alloc_coherent(ipa_ctx->pdev, head1->size, &head1->phys_base, GFP_KERNEL); if (!head1->base) { IPAERR("fail to alloc DMA buff of size %d\n", head1->size); goto err; } entr = (u32 *)head1->base; for (i = 0; i < num_words; i++) { *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; entr++; } num_words = 9; head2->size = num_words * 4; head2->base = dma_alloc_coherent(ipa_ctx->pdev, head2->size, &head2->phys_base, GFP_KERNEL); if (!head2->base) { IPAERR("fail to alloc DMA buff of size %d\n", head2->size); goto head_err; } entr = (u32 *)head2->base; for (i = 0; i < num_words; i++) { *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; entr++; } mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz); mem->size -= hdr_sz; mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size); if (mem->size) { mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size, &mem->phys_base, GFP_KERNEL); if (!mem->base) { IPAERR("fail to alloc DMA buff of size %d\n", mem->size); goto body_err; } memset(mem->base, 0, mem->size); } if (ipa_generate_flt_hw_tbl_common(ip, mem->base, head1->base, body_start_offset, head2->base, &hdr_top)) { IPAERR("fail to generate FLT HW table\n"); goto proc_err; } IPADBG("HEAD1\n"); IPA_DUMP_BUFF(head1->base, head1->phys_base, head1->size); IPADBG("HEAD2\n"); IPA_DUMP_BUFF(head2->base, head2->phys_base, head2->size); if (mem->size) { IPADBG("BODY\n"); IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); } return 0; proc_err: if (mem->size) dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); body_err: dma_free_coherent(ipa_ctx->pdev, head2->size, head2->base, head2->phys_base); head_err: dma_free_coherent(ipa_ctx->pdev, head1->size, head1->base, head1->phys_base); err: return -EPERM; } int __ipa_commit_flt_v2(enum ipa_ip_type ip) { struct ipa_desc *desc; struct ipa_hw_imm_cmd_dma_shared_mem *cmd; struct ipa_mem_buffer body; struct ipa_mem_buffer head1; struct ipa_mem_buffer head2; int rc = 0; u32 local_addrb; u32 local_addrh; bool lcl; int num_desc = 0; int i; u16 avail; desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC); if (desc == NULL) { IPAERR("fail to alloc desc blob ip %d\n", ip); rc = -ENOMEM; goto fail_desc; } cmd = kzalloc(16 * sizeof(*cmd), GFP_ATOMIC); if (cmd == NULL) { IPAERR("fail to alloc cmd blob ip %d\n", ip); rc = -ENOMEM; goto fail_imm; } if (ip == IPA_IP_v4) { avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_MEM_PART(apps_v4_flt_size) : IPA_MEM_PART(v4_flt_size_ddr); local_addrh = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v4_flt_ofst) + 4; local_addrb = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(apps_v4_flt_ofst); lcl = ipa_ctx->ip4_flt_tbl_lcl; } else { avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_MEM_PART(apps_v6_flt_size) : IPA_MEM_PART(v6_flt_size_ddr); local_addrh = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v6_flt_ofst) + 4; local_addrb = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(apps_v6_flt_ofst); lcl = ipa_ctx->ip6_flt_tbl_lcl; } if (ipa_generate_flt_hw_tbl_v2(ip, &body, &head1, &head2)) { IPAERR("fail to generate FLT HW TBL ip %d\n", ip); rc = -EFAULT; goto fail_gen; } if (body.size > avail) { IPAERR("tbl too big, needed %d avail %d\n", body.size, avail); goto fail_send_cmd; } cmd[num_desc].size = 4; cmd[num_desc].system_addr = head1.phys_base; cmd[num_desc].local_addr = local_addrh; desc[num_desc].opcode = IPA_DMA_SHARED_MEM; desc[num_desc].pyld = &cmd[num_desc]; desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc[num_desc++].type = IPA_IMM_CMD_DESC; for (i = 0; i < 6; i++) { if (ipa_ctx->skip_ep_cfg_shadow[i]) { IPADBG("skip %d\n", i); continue; } if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) == i || ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) == i || ipa_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD) == i) { IPADBG("skip %d\n", i); continue; } if (ip == IPA_IP_v4) { local_addrh = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v4_flt_ofst) + 8 + i * 4; } else { local_addrh = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v6_flt_ofst) + 8 + i * 4; } cmd[num_desc].size = 4; cmd[num_desc].system_addr = head1.phys_base + 4 + i * 4; cmd[num_desc].local_addr = local_addrh; desc[num_desc].opcode = IPA_DMA_SHARED_MEM; desc[num_desc].pyld = &cmd[num_desc]; desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc[num_desc++].type = IPA_IMM_CMD_DESC; } for (i = 11; i < IPA_NUM_PIPES; i++) { if (ipa_ctx->skip_ep_cfg_shadow[i]) { IPADBG("skip %d\n", i); continue; } if (ip == IPA_IP_v4) { local_addrh = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v4_flt_ofst) + 13 * 4 + (i - 11) * 4; } else { local_addrh = ipa_ctx->smem_restricted_bytes + IPA_MEM_PART(v6_flt_ofst) + 13 * 4 + (i - 11) * 4; } cmd[num_desc].size = 4; cmd[num_desc].system_addr = head2.phys_base + (i - 11) * 4; cmd[num_desc].local_addr = local_addrh; desc[num_desc].opcode = IPA_DMA_SHARED_MEM; desc[num_desc].pyld = &cmd[num_desc]; desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc[num_desc++].type = IPA_IMM_CMD_DESC; } if (lcl) { cmd[num_desc].size = body.size; cmd[num_desc].system_addr = body.phys_base; cmd[num_desc].local_addr = local_addrb; desc[num_desc].opcode = IPA_DMA_SHARED_MEM; desc[num_desc].pyld = &cmd[num_desc]; desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); desc[num_desc++].type = IPA_IMM_CMD_DESC; if (ipa_send_cmd(num_desc, desc)) { IPAERR("fail to send immediate command\n"); rc = -EFAULT; goto fail_send_cmd; } } else { if (ipa_send_cmd(num_desc, desc)) { IPAERR("fail to send immediate command\n"); rc = -EFAULT; goto fail_send_cmd; } } __ipa_reap_sys_flt_tbls(ip); fail_send_cmd: if (body.size) dma_free_coherent(ipa_ctx->pdev, body.size, body.base, body.phys_base); dma_free_coherent(ipa_ctx->pdev, head1.size, head1.base, head1.phys_base); dma_free_coherent(ipa_ctx->pdev, head2.size, head2.base, head2.phys_base); fail_gen: kfree(cmd); fail_imm: kfree(desc); fail_desc: return rc; } static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl) { struct ipa_flt_entry *entry; struct ipa_rt_tbl *rt_tbl = NULL; int id; if (rule->action != IPA_PASS_TO_EXCEPTION) { if (!rule->eq_attrib_type) { if (!rule->rt_tbl_hdl) { IPAERR("invalid RT tbl\n"); goto error; } rt_tbl = ipa_id_find(rule->rt_tbl_hdl); if (rt_tbl == NULL) { IPAERR("RT tbl not found\n"); goto error; } if (rt_tbl->cookie != IPA_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } } else { if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ? IPA_MEM_PART(v4_modem_rt_index_hi) : IPA_MEM_PART(v6_modem_rt_index_hi))) { IPAERR("invalid RT tbl\n"); goto error; } } } entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL); if (!entry) { IPAERR("failed to alloc FLT rule object\n"); goto error; } INIT_LIST_HEAD(&entry->link); entry->rule = *rule; entry->cookie = IPA_COOKIE; entry->rt_tbl = rt_tbl; entry->tbl = tbl; if (add_rear) { if (tbl->sticky_rear) list_add_tail(&entry->link, tbl->head_flt_rule_list.prev); else list_add_tail(&entry->link, &tbl->head_flt_rule_list); } else { list_add(&entry->link, &tbl->head_flt_rule_list); } tbl->rule_cnt++; if (entry->rt_tbl) entry->rt_tbl->ref_cnt++; id = ipa_id_alloc(entry); if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); } *rule_hdl = id; entry->id = id; IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt); return 0; error: return -EPERM; } static int __ipa_del_flt_rule(u32 rule_hdl) { struct ipa_flt_entry *entry; int id; entry = ipa_id_find(rule_hdl); if (entry == NULL) { IPAERR("lookup failed\n"); return -EINVAL; } if (entry->cookie != IPA_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } id = entry->id; list_del(&entry->link); entry->tbl->rule_cnt--; if (entry->rt_tbl) entry->rt_tbl->ref_cnt--; IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt); entry->cookie = 0; kmem_cache_free(ipa_ctx->flt_rule_cache, entry); /* remove the handle from the database */ ipa_id_remove(id); return 0; } static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, enum ipa_ip_type ip) { struct ipa_flt_entry *entry; struct ipa_rt_tbl *rt_tbl = NULL; entry = ipa_id_find(frule->rule_hdl); if (entry == NULL) { IPAERR("lookup failed\n"); goto error; } if (entry->cookie != IPA_COOKIE) { IPAERR("bad params\n"); goto error; } if (entry->rt_tbl) entry->rt_tbl->ref_cnt--; if (frule->rule.action != IPA_PASS_TO_EXCEPTION) { if (!frule->rule.eq_attrib_type) { if (!frule->rule.rt_tbl_hdl) { IPAERR("invalid RT tbl\n"); goto error; } rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl); if (rt_tbl == NULL) { IPAERR("RT tbl not found\n"); goto error; } if (rt_tbl->cookie != IPA_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } } else { if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ? IPA_MEM_PART(v4_modem_rt_index_hi) : IPA_MEM_PART(v6_modem_rt_index_hi))) { IPAERR("invalid RT tbl\n"); goto error; } } } entry->rule = frule->rule; entry->rt_tbl = rt_tbl; if (entry->rt_tbl) entry->rt_tbl->ref_cnt++; entry->hw_len = 0; return 0; error: return -EPERM; } static int __ipa_add_global_flt_rule(enum ipa_ip_type ip, const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl) { struct ipa_flt_tbl *tbl; if (rule == NULL || rule_hdl == NULL) { IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl); return -EINVAL; } tbl = &ipa_ctx->glob_flt_tbl[ip]; IPADBG("add global flt rule ip=%d\n", ip); return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl); } static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl) { struct ipa_flt_tbl *tbl; int ipa_ep_idx; if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) { IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule, rule_hdl, ep); return -EINVAL; } ipa_ep_idx = ipa_get_ep_mapping(ep); if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) { IPAERR("ep not valid ep=%d\n", ep); return -EINVAL; } if (ipa_ctx->ep[ipa_ep_idx].valid == 0) IPADBG("ep not connected ep_idx=%d\n", ipa_ep_idx); tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip]; IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep); return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl); } /** * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally * commit to IPA HW * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) { int i; int result; if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { IPAERR("bad parm\n"); return -EINVAL; } mutex_lock(&ipa_ctx->lock); for (i = 0; i < rules->num_rules; i++) { if (rules->global) result = __ipa_add_global_flt_rule(rules->ip, &rules->rules[i].rule, rules->rules[i].at_rear, &rules->rules[i].flt_rule_hdl); else result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, &rules->rules[i].rule, rules->rules[i].at_rear, &rules->rules[i].flt_rule_hdl); if (result) { IPAERR("failed to add flt rule %d\n", i); rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; } else { rules->rules[i].status = 0; } } if (rules->commit) if (ipa_ctx->ctrl->ipa_commit_flt(rules->ip)) { result = -EPERM; goto bail; } result = 0; bail: mutex_unlock(&ipa_ctx->lock); return result; } EXPORT_SYMBOL(ipa_add_flt_rule); /** * ipa_del_flt_rule() - Remove the specified filtering rules from SW and * optionally commit to IPA HW * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) { int i; int result; if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { IPAERR("bad parm\n"); return -EINVAL; } mutex_lock(&ipa_ctx->lock); for (i = 0; i < hdls->num_hdls; i++) { if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) { IPAERR("failed to del rt rule %i\n", i); hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED; } else { hdls->hdl[i].status = 0; } } if (hdls->commit) if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) { result = -EPERM; goto bail; } result = 0; bail: mutex_unlock(&ipa_ctx->lock); return result; } EXPORT_SYMBOL(ipa_del_flt_rule); /** * ipa_mdfy_flt_rule() - Modify the specified filtering rules in SW and optionally * commit to IPA HW * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) { int i; int result; if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { IPAERR("bad parm\n"); return -EINVAL; } mutex_lock(&ipa_ctx->lock); for (i = 0; i < hdls->num_rules; i++) { if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) { IPAERR("failed to mdfy rt rule %i\n", i); hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED; } else { hdls->rules[i].status = 0; } } if (hdls->commit) if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) { result = -EPERM; goto bail; } result = 0; bail: mutex_unlock(&ipa_ctx->lock); return result; } EXPORT_SYMBOL(ipa_mdfy_flt_rule); /** * ipa_commit_flt() - Commit the current SW filtering table of specified type to * IPA HW * @ip: [in] the family of routing tables * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_commit_flt(enum ipa_ip_type ip) { int result; if (ip >= IPA_IP_MAX) { IPAERR("bad parm\n"); return -EINVAL; } mutex_lock(&ipa_ctx->lock); if (ipa_ctx->ctrl->ipa_commit_flt(ip)) { result = -EPERM; goto bail; } result = 0; bail: mutex_unlock(&ipa_ctx->lock); return result; } EXPORT_SYMBOL(ipa_commit_flt); /** * ipa_reset_flt() - Reset the current SW filtering table of specified type * (does not commit to HW) * @ip: [in] the family of routing tables * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_reset_flt(enum ipa_ip_type ip) { struct ipa_flt_tbl *tbl; struct ipa_flt_entry *entry; struct ipa_flt_entry *next; int i; int id; if (ip >= IPA_IP_MAX) { IPAERR("bad parm\n"); return -EINVAL; } tbl = &ipa_ctx->glob_flt_tbl[ip]; mutex_lock(&ipa_ctx->lock); IPADBG("reset flt ip=%d\n", ip); list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) { if (ipa_id_find(entry->id) == NULL) { WARN_ON(1); mutex_unlock(&ipa_ctx->lock); return -EFAULT; } if ((ip == IPA_IP_v4 && entry->rule.attrib.attrib_mask == IPA_FLT_PROTOCOL && entry->rule.attrib.u.v4.protocol == IPA_INVALID_L4_PROTOCOL) || (ip == IPA_IP_v6 && entry->rule.attrib.attrib_mask == IPA_FLT_NEXT_HDR && entry->rule.attrib.u.v6.next_hdr == IPA_INVALID_L4_PROTOCOL)) continue; list_del(&entry->link); entry->tbl->rule_cnt--; if (entry->rt_tbl) entry->rt_tbl->ref_cnt--; entry->cookie = 0; id = entry->id; kmem_cache_free(ipa_ctx->flt_rule_cache, entry); /* remove the handle from the database */ ipa_id_remove(id); } for (i = 0; i < IPA_NUM_PIPES; i++) { tbl = &ipa_ctx->flt_tbl[i][ip]; list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) { if (ipa_id_find(entry->id) == NULL) { WARN_ON(1); mutex_unlock(&ipa_ctx->lock); return -EFAULT; } list_del(&entry->link); entry->tbl->rule_cnt--; if (entry->rt_tbl) entry->rt_tbl->ref_cnt--; entry->cookie = 0; id = entry->id; kmem_cache_free(ipa_ctx->flt_rule_cache, entry); /* remove the handle from the database */ ipa_id_remove(id); } } mutex_unlock(&ipa_ctx->lock); return 0; } EXPORT_SYMBOL(ipa_reset_flt); void ipa_install_dflt_flt_rules(u32 ipa_ep_idx) { struct ipa_flt_tbl *tbl; struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx]; struct ipa_flt_rule rule; memset(&rule, 0, sizeof(rule)); mutex_lock(&ipa_ctx->lock); tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; tbl->sticky_rear = true; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false, &ep->dflt_flt4_rule_hdl); ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4); tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; tbl->sticky_rear = true; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false, &ep->dflt_flt6_rule_hdl); ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6); mutex_unlock(&ipa_ctx->lock); } void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx) { struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx]; mutex_lock(&ipa_ctx->lock); if (ep->dflt_flt4_rule_hdl) { __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl); ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4); ep->dflt_flt4_rule_hdl = 0; } if (ep->dflt_flt6_rule_hdl) { __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl); ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6); ep->dflt_flt6_rule_hdl = 0; } mutex_unlock(&ipa_ctx->lock); }
gpl-2.0
bagnz0r/GT-I8160_Kernel
drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c
908
11850
/* * Copyright (C) 2010-2012 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "mali_kernel_common.h" #include "mali_kernel_core.h" #include "mali_kernel_memory_engine.h" #include "mali_block_allocator.h" #include "mali_osk.h" #define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */ typedef struct block_info { struct block_info * next; } block_info; /* The structure used as the handle produced by block_allocator_allocate, * and removed by block_allocator_release */ typedef struct block_allocator_allocation { /* The list will be released in reverse order */ block_info *last_allocated; mali_allocation_engine * engine; mali_memory_allocation * descriptor; u32 start_offset; u32 mapping_length; } block_allocator_allocation; typedef struct block_allocator { _mali_osk_lock_t *mutex; block_info * all_blocks; block_info * first_free; u32 base; u32 cpu_usage_adjust; u32 num_blocks; } block_allocator; MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block); static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info); static void block_allocator_release(void * ctx, void * handle); static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block); static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block ); static void block_allocator_destroy(mali_physical_memory_allocator * allocator); static u32 block_allocator_stat(mali_physical_memory_allocator * allocator); mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name) { mali_physical_memory_allocator * allocator; block_allocator * info; u32 usable_size; u32 num_blocks; usable_size = size & ~(MALI_BLOCK_SIZE - 1); MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size)); MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size)); num_blocks = usable_size / MALI_BLOCK_SIZE; MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks)); if (usable_size == 0) { MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size)); return NULL; } allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator)); if (NULL != allocator) { info = _mali_osk_malloc(sizeof(block_allocator)); if (NULL != info) { info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO); if (NULL != info->mutex) { info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks); if (NULL != info->all_blocks) { u32 i; info->first_free = NULL; info->num_blocks = num_blocks; info->base = base_address; info->cpu_usage_adjust = cpu_usage_adjust; for ( i = 0; i < num_blocks; i++) { info->all_blocks[i].next = info->first_free; info->first_free = &info->all_blocks[i]; } allocator->allocate = block_allocator_allocate; allocator->allocate_page_table_block = block_allocator_allocate_page_table_block; allocator->destroy = block_allocator_destroy; allocator->stat = block_allocator_stat; allocator->ctx = info; allocator->name = name; return allocator; } _mali_osk_lock_term(info->mutex); } _mali_osk_free(info); } _mali_osk_free(allocator); } return NULL; } static void block_allocator_destroy(mali_physical_memory_allocator * allocator) { block_allocator * info; MALI_DEBUG_ASSERT_POINTER(allocator); MALI_DEBUG_ASSERT_POINTER(allocator->ctx); info = (block_allocator*)allocator->ctx; _mali_osk_free(info->all_blocks); _mali_osk_lock_term(info->mutex); _mali_osk_free(info); _mali_osk_free(allocator); } MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block) { return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE); } static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info) { block_allocator * info; u32 left; block_info * last_allocated = NULL; mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE; block_allocator_allocation *ret_allocation; MALI_DEBUG_ASSERT_POINTER(ctx); MALI_DEBUG_ASSERT_POINTER(descriptor); MALI_DEBUG_ASSERT_POINTER(offset); MALI_DEBUG_ASSERT_POINTER(alloc_info); info = (block_allocator*)ctx; left = descriptor->size - *offset; MALI_DEBUG_ASSERT(0 != left); if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE; ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) ); if ( NULL == ret_allocation ) { /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); return result; } ret_allocation->start_offset = *offset; ret_allocation->mapping_length = 0; while ((left > 0) && (info->first_free)) { block_info * block; u32 phys_addr; u32 padding; u32 current_mapping_size; block = info->first_free; info->first_free = info->first_free->next; block->next = last_allocated; last_allocated = block; phys_addr = get_phys(info, block); padding = *offset & (MALI_BLOCK_SIZE-1); if (MALI_BLOCK_SIZE - padding < left) { current_mapping_size = MALI_BLOCK_SIZE - padding; } else { current_mapping_size = left; } if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size)) { MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n")); result = MALI_MEM_ALLOC_INTERNAL_FAILURE; mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0); /* release all memory back to the pool */ while (last_allocated) { /* This relinks every block we've just allocated back into the free-list */ block = last_allocated->next; last_allocated->next = info->first_free; info->first_free = last_allocated; last_allocated = block; } break; } *offset += current_mapping_size; left -= current_mapping_size; ret_allocation->mapping_length += current_mapping_size; } _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); if (last_allocated) { if (left) result = MALI_MEM_ALLOC_PARTIAL; else result = MALI_MEM_ALLOC_FINISHED; /* Record all the information about this allocation */ ret_allocation->last_allocated = last_allocated; ret_allocation->engine = engine; ret_allocation->descriptor = descriptor; alloc_info->ctx = info; alloc_info->handle = ret_allocation; alloc_info->release = block_allocator_release; } else { /* Free the allocation information - nothing to be passed back */ _mali_osk_free( ret_allocation ); } return result; } static void block_allocator_release(void * ctx, void * handle) { block_allocator * info; block_info * block, * next; block_allocator_allocation *allocation; MALI_DEBUG_ASSERT_POINTER(ctx); MALI_DEBUG_ASSERT_POINTER(handle); info = (block_allocator*)ctx; allocation = (block_allocator_allocation*)handle; block = allocation->last_allocated; MALI_DEBUG_ASSERT_POINTER(block); if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) { MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n")); return; } /* unmap */ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0); while (block) { MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks)))); next = block->next; /* relink into free-list */ block->next = info->first_free; info->first_free = block; /* advance the loop */ block = next; } _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); _mali_osk_free( allocation ); } static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block) { block_allocator * info; mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE; MALI_DEBUG_ASSERT_POINTER(ctx); MALI_DEBUG_ASSERT_POINTER(block); info = (block_allocator*)ctx; if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE; if (NULL != info->first_free) { void * virt; u32 phys; u32 size; block_info * alloc; alloc = info->first_free; phys = get_phys(info, alloc); /* Does not modify info or alloc */ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" ); /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE, * because it's unlikely another allocator will be able to map in. */ if ( NULL != virt ) { block->ctx = info; /* same as incoming ctx */ block->handle = alloc; block->phys_base = phys; block->size = size; block->release = block_allocator_release_page_table_block; block->mapping = virt; info->first_free = alloc->next; alloc->next = NULL; /* Could potentially link many blocks together instead */ result = MALI_MEM_ALLOC_FINISHED; } } else result = MALI_MEM_ALLOC_NONE; _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); return result; } static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block ) { block_allocator * info; block_info * block, * next; MALI_DEBUG_ASSERT_POINTER( page_table_block ); info = (block_allocator*)page_table_block->ctx; block = (block_info*)page_table_block->handle; MALI_DEBUG_ASSERT_POINTER(info); MALI_DEBUG_ASSERT_POINTER(block); if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) { MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n")); return; } /* Unmap all the physical memory at once */ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping ); /** @note This loop handles the case where more than one block_info was linked. * Probably unnecessary for page table block releasing. */ while (block) { next = block->next; MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks)))); block->next = info->first_free; info->first_free = block; block = next; } _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); } static u32 block_allocator_stat(mali_physical_memory_allocator * allocator) { block_allocator * info; block_info *block; u32 free_blocks = 0; MALI_DEBUG_ASSERT_POINTER(allocator); info = (block_allocator*)allocator->ctx; block = info->first_free; while(block) { free_blocks++; block = block->next; } return (info->num_blocks - free_blocks) * MALI_BLOCK_SIZE; }
gpl-2.0
xb446909/personalprojects
crosstool/source/linux-4.1.2/net/sched/cls_basic.c
908
6920
/* * net/sched/cls_basic.c Basic Packet Classifier. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/act_api.h> #include <net/pkt_cls.h> struct basic_head { u32 hgenerator; struct list_head flist; struct rcu_head rcu; }; struct basic_filter { u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_result res; struct tcf_proto *tp; struct list_head link; struct rcu_head rcu; }; static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { int r; struct basic_head *head = rcu_dereference_bh(tp->root); struct basic_filter *f; list_for_each_entry_rcu(f, &head->flist, link) { if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; *res = f->res; r = tcf_exts_exec(skb, &f->exts, res); if (r < 0) continue; return r; } return -1; } static unsigned long basic_get(struct tcf_proto *tp, u32 handle) { unsigned long l = 0UL; struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; if (head == NULL) return 0UL; list_for_each_entry(f, &head->flist, link) { if (f->handle == handle) { l = (unsigned long) f; break; } } return l; } static int basic_init(struct tcf_proto *tp) { struct basic_head *head; head = kzalloc(sizeof(*head), GFP_KERNEL); if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->flist); rcu_assign_pointer(tp->root, head); return 0; } static void basic_delete_filter(struct rcu_head *head) { struct basic_filter *f = container_of(head, struct basic_filter, rcu); tcf_exts_destroy(&f->exts); tcf_em_tree_destroy(&f->ematches); kfree(f); } static bool basic_destroy(struct tcf_proto *tp, bool force) { struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f, *n; if (!force && !list_empty(&head->flist)) return false; list_for_each_entry_safe(f, n, &head->flist, link) { list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, basic_delete_filter); } RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } static int basic_delete(struct tcf_proto *tp, unsigned long arg) { struct basic_filter *f = (struct basic_filter *) arg; list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, basic_delete_filter); return 0; } static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = { [TCA_BASIC_CLASSID] = { .type = NLA_U32 }, [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, }; static int basic_set_parms(struct net *net, struct tcf_proto *tp, struct basic_filter *f, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr) { int err; struct tcf_exts e; struct tcf_ematch_tree t; tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); err = tcf_exts_validate(net, tp, tb, est, &e, ovr); if (err < 0) return err; err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &t); if (err < 0) goto errout; if (tb[TCA_BASIC_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]); tcf_bind_filter(tp, &f->res, base); } tcf_exts_change(tp, &f->exts, &e); tcf_em_tree_change(tp, &f->ematches, &t); f->tp = tp; return 0; errout: tcf_exts_destroy(&e); return err; } static int basic_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { int err; struct basic_head *head = rtnl_dereference(tp->root); struct nlattr *tb[TCA_BASIC_MAX + 1]; struct basic_filter *fold = (struct basic_filter *) *arg; struct basic_filter *fnew; if (tca[TCA_OPTIONS] == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS], basic_policy); if (err < 0) return err; if (fold != NULL) { if (handle && fold->handle != handle) return -EINVAL; } fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); if (!fnew) return -ENOBUFS; tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); err = -EINVAL; if (handle) { fnew->handle = handle; } else if (fold) { fnew->handle = fold->handle; } else { unsigned int i = 0x80000000; do { if (++head->hgenerator == 0x7FFFFFFF) head->hgenerator = 1; } while (--i > 0 && basic_get(tp, head->hgenerator)); if (i <= 0) { pr_err("Insufficient number of handles\n"); goto errout; } fnew->handle = head->hgenerator; } err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr); if (err < 0) goto errout; *arg = (unsigned long)fnew; if (fold) { list_replace_rcu(&fold->link, &fnew->link); tcf_unbind_filter(tp, &fold->res); call_rcu(&fold->rcu, basic_delete_filter); } else { list_add_rcu(&fnew->link, &head->flist); } return 0; errout: kfree(fnew); return err; } static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; list_for_each_entry(f, &head->flist, link) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long) f, arg) < 0) { arg->stop = 1; break; } skip: arg->count++; } } static int basic_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct basic_filter *f = (struct basic_filter *) fh; struct nlattr *nest; if (f == NULL) return skb->len; t->tcm_handle = f->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (f->res.classid && nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid)) goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts) < 0 || tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) goto nla_put_failure; nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &f->exts) < 0) goto nla_put_failure; return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static struct tcf_proto_ops cls_basic_ops __read_mostly = { .kind = "basic", .classify = basic_classify, .init = basic_init, .destroy = basic_destroy, .get = basic_get, .change = basic_change, .delete = basic_delete, .walk = basic_walk, .dump = basic_dump, .owner = THIS_MODULE, }; static int __init init_basic(void) { return register_tcf_proto_ops(&cls_basic_ops); } static void __exit exit_basic(void) { unregister_tcf_proto_ops(&cls_basic_ops); } module_init(init_basic) module_exit(exit_basic) MODULE_LICENSE("GPL");
gpl-2.0
slz/samsung-exhibit-ii-kernel
arch/arm/mach-integrator/cpu.c
908
5250
/* * linux/arch/arm/mach-integrator/cpu.c * * Copyright (C) 2001-2002 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * CPU support functions */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/cpufreq.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/platform.h> #include <asm/mach-types.h> #include <asm/hardware/icst.h> static struct cpufreq_driver integrator_driver; #define CM_ID IO_ADDRESS(INTEGRATOR_HDR_ID) #define CM_OSC IO_ADDRESS(INTEGRATOR_HDR_OSC) #define CM_STAT IO_ADDRESS(INTEGRATOR_HDR_STAT) #define CM_LOCK IO_ADDRESS(INTEGRATOR_HDR_LOCK) static const struct icst_params lclk_params = { .ref = 24000000, .vco_max = ICST525_VCO_MAX_5V, .vco_min = ICST525_VCO_MIN, .vd_min = 8, .vd_max = 132, .rd_min = 24, .rd_max = 24, .s2div = icst525_s2div, .idx2s = icst525_idx2s, }; static const struct icst_params cclk_params = { .ref = 24000000, .vco_max = ICST525_VCO_MAX_5V, .vco_min = ICST525_VCO_MIN, .vd_min = 12, .vd_max = 160, .rd_min = 24, .rd_max = 24, .s2div = icst525_s2div, .idx2s = icst525_idx2s, }; /* * Validate the speed policy. */ static int integrator_verify_policy(struct cpufreq_policy *policy) { struct icst_vco vco; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); policy->max = icst_hz(&cclk_params, vco) / 1000; vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); policy->min = icst_hz(&cclk_params, vco) / 1000; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; } static int integrator_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { cpumask_t cpus_allowed; int cpu = policy->cpu; struct icst_vco vco; struct cpufreq_freqs freqs; u_int cm_osc; /* * Save this threads cpus_allowed mask. */ cpus_allowed = current->cpus_allowed; /* * Bind to the specified CPU. When this call returns, * we should be running on the right CPU. */ set_cpus_allowed(current, cpumask_of_cpu(cpu)); BUG_ON(cpu != smp_processor_id()); /* get current setting */ cm_osc = __raw_readl(CM_OSC); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; } else if (machine_is_cintegrator()) { vco.s = 1; } vco.v = cm_osc & 255; vco.r = 22; freqs.old = icst_hz(&cclk_params, vco) / 1000; /* icst_hz_to_vco rounds down -- so we need the next * larger freq in case of CPUFREQ_RELATION_L. */ if (relation == CPUFREQ_RELATION_L) target_freq += 999; if (target_freq > policy->max) target_freq = policy->max; vco = icst_hz_to_vco(&cclk_params, target_freq * 1000); freqs.new = icst_hz(&cclk_params, vco) / 1000; freqs.cpu = policy->cpu; if (freqs.old == freqs.new) { set_cpus_allowed(current, cpus_allowed); return 0; } cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cm_osc = __raw_readl(CM_OSC); if (machine_is_integrator()) { cm_osc &= 0xfffff800; cm_osc |= vco.s << 8; } else if (machine_is_cintegrator()) { cm_osc &= 0xffffff00; } cm_osc |= vco.v; __raw_writel(0xa05f, CM_LOCK); __raw_writel(cm_osc, CM_OSC); __raw_writel(0, CM_LOCK); /* * Restore the CPUs allowed mask. */ set_cpus_allowed(current, cpus_allowed); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return 0; } static unsigned int integrator_get(unsigned int cpu) { cpumask_t cpus_allowed; unsigned int current_freq; u_int cm_osc; struct icst_vco vco; cpus_allowed = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(cpu)); BUG_ON(cpu != smp_processor_id()); /* detect memory etc. */ cm_osc = __raw_readl(CM_OSC); if (machine_is_integrator()) { vco.s = (cm_osc >> 8) & 7; } else if (machine_is_cintegrator()) { vco.s = 1; } vco.v = cm_osc & 255; vco.r = 22; current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ set_cpus_allowed(current, cpus_allowed); return current_freq; } static int integrator_cpufreq_init(struct cpufreq_policy *policy) { /* set default policy and cpuinfo */ policy->cpuinfo.max_freq = 160000; policy->cpuinfo.min_freq = 12000; policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ policy->cur = policy->min = policy->max = integrator_get(policy->cpu); return 0; } static struct cpufreq_driver integrator_driver = { .verify = integrator_verify_policy, .target = integrator_set_target, .get = integrator_get, .init = integrator_cpufreq_init, .name = "integrator", }; static int __init integrator_cpu_init(void) { return cpufreq_register_driver(&integrator_driver); } static void __exit integrator_cpu_exit(void) { cpufreq_unregister_driver(&integrator_driver); } MODULE_AUTHOR ("Russell M. King"); MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); MODULE_LICENSE ("GPL"); module_init(integrator_cpu_init); module_exit(integrator_cpu_exit);
gpl-2.0
drowningchild/msm-2.6.38
drivers/staging/rt2860/common/cmm_data_usb.c
1164
31321
/* ************************************************************************* * Ralink Tech Inc. * 5F., No.36, Taiyuan St., Jhubei City, * Hsinchu County 302, * Taiwan, R.O.C. * * (c) Copyright 2002-2007, Ralink Technology, Inc. * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * ************************************************************************* */ /* All functions in this file must be USB-depended, or you should out your function in other files. */ #ifdef RTMP_MAC_USB #include "../rt_config.h" /* We can do copy the frame into pTxContext when match following conditions. => => => */ static inline int RtmpUSBCanDoWrite(struct rt_rtmp_adapter *pAd, u8 QueIdx, struct rt_ht_tx_context *pHTTXContext) { int canWrite = NDIS_STATUS_RESOURCES; if (((pHTTXContext->CurWritePosition) < pHTTXContext->NextBulkOutPosition) && (pHTTXContext->CurWritePosition + LOCAL_TXBUF_SIZE) > pHTTXContext->NextBulkOutPosition) { DBGPRINT(RT_DEBUG_ERROR, ("RtmpUSBCanDoWrite c1!\n")); RTUSB_SET_BULK_FLAG(pAd, (fRTUSB_BULK_OUT_DATA_NORMAL << QueIdx)); } else if ((pHTTXContext->CurWritePosition == 8) && (pHTTXContext->NextBulkOutPosition < LOCAL_TXBUF_SIZE)) { DBGPRINT(RT_DEBUG_ERROR, ("RtmpUSBCanDoWrite c2!\n")); RTUSB_SET_BULK_FLAG(pAd, (fRTUSB_BULK_OUT_DATA_NORMAL << QueIdx)); } else if (pHTTXContext->bCurWriting == TRUE) { DBGPRINT(RT_DEBUG_ERROR, ("RtmpUSBCanDoWrite c3!\n")); } else { canWrite = NDIS_STATUS_SUCCESS; } return canWrite; } u16 RtmpUSB_WriteSubTxResource(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk, IN BOOLEAN bIsLast, u16 * FreeNumber) { /* Dummy function. Should be removed in the future. */ return 0; } u16 RtmpUSB_WriteFragTxResource(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk, u8 fragNum, u16 * FreeNumber) { struct rt_ht_tx_context *pHTTXContext; u16 hwHdrLen; /* The hwHdrLen consist of 802.11 header length plus the header padding length. */ u32 fillOffset; struct rt_txinfo *pTxInfo; struct rt_txwi *pTxWI; u8 *pWirelessPacket = NULL; u8 QueIdx; int Status; unsigned long IrqFlags; u32 USBDMApktLen = 0, DMAHdrLen, padding; BOOLEAN TxQLastRound = FALSE; /* */ /* get Tx Ring Resource & Dma Buffer address */ /* */ QueIdx = pTxBlk->QueIdx; pHTTXContext = &pAd->TxContext[QueIdx]; RTMP_IRQ_LOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); pHTTXContext = &pAd->TxContext[QueIdx]; fillOffset = pHTTXContext->CurWritePosition; if (fragNum == 0) { /* Check if we have enough space for this bulk-out batch. */ Status = RtmpUSBCanDoWrite(pAd, QueIdx, pHTTXContext); if (Status == NDIS_STATUS_SUCCESS) { pHTTXContext->bCurWriting = TRUE; /* Reserve space for 8 bytes padding. */ if ((pHTTXContext->ENextBulkOutPosition == pHTTXContext->CurWritePosition)) { pHTTXContext->ENextBulkOutPosition += 8; pHTTXContext->CurWritePosition += 8; fillOffset += 8; } pTxBlk->Priv = 0; pHTTXContext->CurWriteRealPos = pHTTXContext->CurWritePosition; } else { RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); RELEASE_NDIS_PACKET(pAd, pTxBlk->pPacket, NDIS_STATUS_FAILURE); return (Status); } } else { /* For sub-sequent frames of this bulk-out batch. Just copy it to our bulk-out buffer. */ Status = ((pHTTXContext->bCurWriting == TRUE) ? NDIS_STATUS_SUCCESS : NDIS_STATUS_FAILURE); if (Status == NDIS_STATUS_SUCCESS) { fillOffset += pTxBlk->Priv; } else { RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); RELEASE_NDIS_PACKET(pAd, pTxBlk->pPacket, NDIS_STATUS_FAILURE); return (Status); } } NdisZeroMemory((u8 *)(&pTxBlk->HeaderBuf[0]), TXINFO_SIZE); pTxInfo = (struct rt_txinfo *)(&pTxBlk->HeaderBuf[0]); pTxWI = (struct rt_txwi *) (&pTxBlk->HeaderBuf[TXINFO_SIZE]); pWirelessPacket = &pHTTXContext->TransferBuffer->field.WirelessPacket[fillOffset]; /* copy TXWI + WLAN Header + LLC into DMA Header Buffer */ /*hwHdrLen = ROUND_UP(pTxBlk->MpduHeaderLen, 4); */ hwHdrLen = pTxBlk->MpduHeaderLen + pTxBlk->HdrPadLen; /* Build our URB for USBD */ DMAHdrLen = TXWI_SIZE + hwHdrLen; USBDMApktLen = DMAHdrLen + pTxBlk->SrcBufLen; padding = (4 - (USBDMApktLen % 4)) & 0x03; /* round up to 4 byte alignment */ USBDMApktLen += padding; pTxBlk->Priv += (TXINFO_SIZE + USBDMApktLen); /* For TxInfo, the length of USBDMApktLen = TXWI_SIZE + 802.11 header + payload */ RTMPWriteTxInfo(pAd, pTxInfo, (u16)(USBDMApktLen), FALSE, FIFO_EDCA, FALSE /*NextValid */ , FALSE); if (fragNum == pTxBlk->TotalFragNum) { pTxInfo->USBDMATxburst = 0; if ((pHTTXContext->CurWritePosition + pTxBlk->Priv + 3906) > MAX_TXBULK_LIMIT) { pTxInfo->SwUseLastRound = 1; TxQLastRound = TRUE; } } else { pTxInfo->USBDMATxburst = 1; } NdisMoveMemory(pWirelessPacket, pTxBlk->HeaderBuf, TXINFO_SIZE + TXWI_SIZE + hwHdrLen); pWirelessPacket += (TXINFO_SIZE + TXWI_SIZE + hwHdrLen); pHTTXContext->CurWriteRealPos += (TXINFO_SIZE + TXWI_SIZE + hwHdrLen); RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); NdisMoveMemory(pWirelessPacket, pTxBlk->pSrcBufData, pTxBlk->SrcBufLen); /* Zero the last padding. */ pWirelessPacket += pTxBlk->SrcBufLen; NdisZeroMemory(pWirelessPacket, padding + 8); if (fragNum == pTxBlk->TotalFragNum) { RTMP_IRQ_LOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); /* Update the pHTTXContext->CurWritePosition. 3906 used to prevent the NextBulkOut is a A-RALINK/A-MSDU Frame. */ pHTTXContext->CurWritePosition += pTxBlk->Priv; if (TxQLastRound == TRUE) pHTTXContext->CurWritePosition = 8; pHTTXContext->CurWriteRealPos = pHTTXContext->CurWritePosition; /* Finally, set bCurWriting as FALSE */ pHTTXContext->bCurWriting = FALSE; RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); /* succeed and release the skb buffer */ RELEASE_NDIS_PACKET(pAd, pTxBlk->pPacket, NDIS_STATUS_SUCCESS); } return (Status); } u16 RtmpUSB_WriteSingleTxResource(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk, IN BOOLEAN bIsLast, u16 * FreeNumber) { struct rt_ht_tx_context *pHTTXContext; u16 hwHdrLen; u32 fillOffset; struct rt_txinfo *pTxInfo; struct rt_txwi *pTxWI; u8 *pWirelessPacket; u8 QueIdx; unsigned long IrqFlags; int Status; u32 USBDMApktLen = 0, DMAHdrLen, padding; BOOLEAN bTxQLastRound = FALSE; /* For USB, didn't need PCI_MAP_SINGLE() */ /*SrcBufPA = PCI_MAP_SINGLE(pAd, (char *) pTxBlk->pSrcBufData, pTxBlk->SrcBufLen, PCI_DMA_TODEVICE); */ /* */ /* get Tx Ring Resource & Dma Buffer address */ /* */ QueIdx = pTxBlk->QueIdx; RTMP_IRQ_LOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); pHTTXContext = &pAd->TxContext[QueIdx]; fillOffset = pHTTXContext->CurWritePosition; /* Check ring full. */ Status = RtmpUSBCanDoWrite(pAd, QueIdx, pHTTXContext); if (Status == NDIS_STATUS_SUCCESS) { pHTTXContext->bCurWriting = TRUE; pTxInfo = (struct rt_txinfo *)(&pTxBlk->HeaderBuf[0]); pTxWI = (struct rt_txwi *) (&pTxBlk->HeaderBuf[TXINFO_SIZE]); /* Reserve space for 8 bytes padding. */ if ((pHTTXContext->ENextBulkOutPosition == pHTTXContext->CurWritePosition)) { pHTTXContext->ENextBulkOutPosition += 8; pHTTXContext->CurWritePosition += 8; fillOffset += 8; } pHTTXContext->CurWriteRealPos = pHTTXContext->CurWritePosition; pWirelessPacket = &pHTTXContext->TransferBuffer->field. WirelessPacket[fillOffset]; /* copy TXWI + WLAN Header + LLC into DMA Header Buffer */ /*hwHdrLen = ROUND_UP(pTxBlk->MpduHeaderLen, 4); */ hwHdrLen = pTxBlk->MpduHeaderLen + pTxBlk->HdrPadLen; /* Build our URB for USBD */ DMAHdrLen = TXWI_SIZE + hwHdrLen; USBDMApktLen = DMAHdrLen + pTxBlk->SrcBufLen; padding = (4 - (USBDMApktLen % 4)) & 0x03; /* round up to 4 byte alignment */ USBDMApktLen += padding; pTxBlk->Priv = (TXINFO_SIZE + USBDMApktLen); /* For TxInfo, the length of USBDMApktLen = TXWI_SIZE + 802.11 header + payload */ RTMPWriteTxInfo(pAd, pTxInfo, (u16)(USBDMApktLen), FALSE, FIFO_EDCA, FALSE /*NextValid */ , FALSE); if ((pHTTXContext->CurWritePosition + 3906 + pTxBlk->Priv) > MAX_TXBULK_LIMIT) { pTxInfo->SwUseLastRound = 1; bTxQLastRound = TRUE; } NdisMoveMemory(pWirelessPacket, pTxBlk->HeaderBuf, TXINFO_SIZE + TXWI_SIZE + hwHdrLen); pWirelessPacket += (TXINFO_SIZE + TXWI_SIZE + hwHdrLen); /* We unlock it here to prevent the first 8 bytes maybe over-writed issue. */ /* 1. First we got CurWritePosition but the first 8 bytes still not write to the pTxcontext. */ /* 2. An interrupt break our routine and handle bulk-out complete. */ /* 3. In the bulk-out compllete, it need to do another bulk-out, */ /* if the ENextBulkOutPosition is just the same as CurWritePosition, it will save the first 8 bytes from CurWritePosition, */ /* but the payload still not copyed. the pTxContext->SavedPad[] will save as allzero. and set the bCopyPad = TRUE. */ /* 4. Interrupt complete. */ /* 5. Our interrupted routine go back and fill the first 8 bytes to pTxContext. */ /* 6. Next time when do bulk-out, it found the bCopyPad==TRUE and will copy the SavedPad[] to pTxContext->NextBulkOutPosition. */ /* and the packet will wrong. */ pHTTXContext->CurWriteRealPos += (TXINFO_SIZE + TXWI_SIZE + hwHdrLen); RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); NdisMoveMemory(pWirelessPacket, pTxBlk->pSrcBufData, pTxBlk->SrcBufLen); pWirelessPacket += pTxBlk->SrcBufLen; NdisZeroMemory(pWirelessPacket, padding + 8); RTMP_IRQ_LOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); pHTTXContext->CurWritePosition += pTxBlk->Priv; if (bTxQLastRound) pHTTXContext->CurWritePosition = 8; pHTTXContext->CurWriteRealPos = pHTTXContext->CurWritePosition; pHTTXContext->bCurWriting = FALSE; } RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); /* succeed and release the skb buffer */ RELEASE_NDIS_PACKET(pAd, pTxBlk->pPacket, NDIS_STATUS_SUCCESS); return (Status); } u16 RtmpUSB_WriteMultiTxResource(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk, u8 frameNum, u16 * FreeNumber) { struct rt_ht_tx_context *pHTTXContext; u16 hwHdrLen; /* The hwHdrLen consist of 802.11 header length plus the header padding length. */ u32 fillOffset; struct rt_txinfo *pTxInfo; struct rt_txwi *pTxWI; u8 *pWirelessPacket = NULL; u8 QueIdx; int Status; unsigned long IrqFlags; /*u32 USBDMApktLen = 0, DMAHdrLen, padding; */ /* */ /* get Tx Ring Resource & Dma Buffer address */ /* */ QueIdx = pTxBlk->QueIdx; pHTTXContext = &pAd->TxContext[QueIdx]; RTMP_IRQ_LOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); if (frameNum == 0) { /* Check if we have enough space for this bulk-out batch. */ Status = RtmpUSBCanDoWrite(pAd, QueIdx, pHTTXContext); if (Status == NDIS_STATUS_SUCCESS) { pHTTXContext->bCurWriting = TRUE; pTxInfo = (struct rt_txinfo *)(&pTxBlk->HeaderBuf[0]); pTxWI = (struct rt_txwi *) (&pTxBlk->HeaderBuf[TXINFO_SIZE]); /* Reserve space for 8 bytes padding. */ if ((pHTTXContext->ENextBulkOutPosition == pHTTXContext->CurWritePosition)) { pHTTXContext->CurWritePosition += 8; pHTTXContext->ENextBulkOutPosition += 8; } fillOffset = pHTTXContext->CurWritePosition; pHTTXContext->CurWriteRealPos = pHTTXContext->CurWritePosition; pWirelessPacket = &pHTTXContext->TransferBuffer->field. WirelessPacket[fillOffset]; /* */ /* Copy TXINFO + TXWI + WLAN Header + LLC into DMA Header Buffer */ /* */ if (pTxBlk->TxFrameType == TX_AMSDU_FRAME) /*hwHdrLen = ROUND_UP(pTxBlk->MpduHeaderLen-LENGTH_AMSDU_SUBFRAMEHEAD, 4)+LENGTH_AMSDU_SUBFRAMEHEAD; */ hwHdrLen = pTxBlk->MpduHeaderLen - LENGTH_AMSDU_SUBFRAMEHEAD + pTxBlk->HdrPadLen + LENGTH_AMSDU_SUBFRAMEHEAD; else if (pTxBlk->TxFrameType == TX_RALINK_FRAME) /*hwHdrLen = ROUND_UP(pTxBlk->MpduHeaderLen-LENGTH_ARALINK_HEADER_FIELD, 4)+LENGTH_ARALINK_HEADER_FIELD; */ hwHdrLen = pTxBlk->MpduHeaderLen - LENGTH_ARALINK_HEADER_FIELD + pTxBlk->HdrPadLen + LENGTH_ARALINK_HEADER_FIELD; else /*hwHdrLen = ROUND_UP(pTxBlk->MpduHeaderLen, 4); */ hwHdrLen = pTxBlk->MpduHeaderLen + pTxBlk->HdrPadLen; /* Update the pTxBlk->Priv. */ pTxBlk->Priv = TXINFO_SIZE + TXWI_SIZE + hwHdrLen; /* pTxInfo->USBDMApktLen now just a temp value and will to correct latter. */ RTMPWriteTxInfo(pAd, pTxInfo, (u16)(pTxBlk->Priv), FALSE, FIFO_EDCA, FALSE /*NextValid */ , FALSE); /* Copy it. */ NdisMoveMemory(pWirelessPacket, pTxBlk->HeaderBuf, pTxBlk->Priv); pHTTXContext->CurWriteRealPos += pTxBlk->Priv; pWirelessPacket += pTxBlk->Priv; } } else { /* For sub-sequent frames of this bulk-out batch. Just copy it to our bulk-out buffer. */ Status = ((pHTTXContext->bCurWriting == TRUE) ? NDIS_STATUS_SUCCESS : NDIS_STATUS_FAILURE); if (Status == NDIS_STATUS_SUCCESS) { fillOffset = (pHTTXContext->CurWritePosition + pTxBlk->Priv); pWirelessPacket = &pHTTXContext->TransferBuffer->field. WirelessPacket[fillOffset]; /*hwHdrLen = pTxBlk->MpduHeaderLen; */ NdisMoveMemory(pWirelessPacket, pTxBlk->HeaderBuf, pTxBlk->MpduHeaderLen); pWirelessPacket += (pTxBlk->MpduHeaderLen); pTxBlk->Priv += pTxBlk->MpduHeaderLen; } else { /* It should not happened now unless we are going to shutdown. */ DBGPRINT(RT_DEBUG_ERROR, ("WriteMultiTxResource():bCurWriting is FALSE when handle sub-sequent frames.\n")); Status = NDIS_STATUS_FAILURE; } } /* We unlock it here to prevent the first 8 bytes maybe over-write issue. */ /* 1. First we got CurWritePosition but the first 8 bytes still not write to the pTxContext. */ /* 2. An interrupt break our routine and handle bulk-out complete. */ /* 3. In the bulk-out compllete, it need to do another bulk-out, */ /* if the ENextBulkOutPosition is just the same as CurWritePosition, it will save the first 8 bytes from CurWritePosition, */ /* but the payload still not copyed. the pTxContext->SavedPad[] will save as allzero. and set the bCopyPad = TRUE. */ /* 4. Interrupt complete. */ /* 5. Our interrupted routine go back and fill the first 8 bytes to pTxContext. */ /* 6. Next time when do bulk-out, it found the bCopyPad==TRUE and will copy the SavedPad[] to pTxContext->NextBulkOutPosition. */ /* and the packet will wrong. */ RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); if (Status != NDIS_STATUS_SUCCESS) { DBGPRINT(RT_DEBUG_ERROR, ("WriteMultiTxResource: CWPos = %ld, NBOutPos = %ld.\n", pHTTXContext->CurWritePosition, pHTTXContext->NextBulkOutPosition)); goto done; } /* Copy the frame content into DMA buffer and update the pTxBlk->Priv */ NdisMoveMemory(pWirelessPacket, pTxBlk->pSrcBufData, pTxBlk->SrcBufLen); pWirelessPacket += pTxBlk->SrcBufLen; pTxBlk->Priv += pTxBlk->SrcBufLen; done: /* Release the skb buffer here */ RELEASE_NDIS_PACKET(pAd, pTxBlk->pPacket, NDIS_STATUS_SUCCESS); return (Status); } void RtmpUSB_FinalWriteTxResource(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk, u16 totalMPDUSize, u16 TxIdx) { u8 QueIdx; struct rt_ht_tx_context *pHTTXContext; u32 fillOffset; struct rt_txinfo *pTxInfo; struct rt_txwi *pTxWI; u32 USBDMApktLen, padding; unsigned long IrqFlags; u8 *pWirelessPacket; QueIdx = pTxBlk->QueIdx; pHTTXContext = &pAd->TxContext[QueIdx]; RTMP_IRQ_LOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); if (pHTTXContext->bCurWriting == TRUE) { fillOffset = pHTTXContext->CurWritePosition; if (((pHTTXContext->ENextBulkOutPosition == pHTTXContext->CurWritePosition) || ((pHTTXContext->ENextBulkOutPosition - 8) == pHTTXContext->CurWritePosition)) && (pHTTXContext->bCopySavePad == TRUE)) pWirelessPacket = (u8 *)(&pHTTXContext->SavedPad[0]); else pWirelessPacket = (u8 *)(&pHTTXContext->TransferBuffer->field. WirelessPacket[fillOffset]); /* */ /* Update TxInfo->USBDMApktLen , */ /* the length = TXWI_SIZE + 802.11_hdr + 802.11_hdr_pad + payload_of_all_batch_frames + Bulk-Out-padding */ /* */ pTxInfo = (struct rt_txinfo *)(pWirelessPacket); /* Calculate the bulk-out padding */ USBDMApktLen = pTxBlk->Priv - TXINFO_SIZE; padding = (4 - (USBDMApktLen % 4)) & 0x03; /* round up to 4 byte alignment */ USBDMApktLen += padding; pTxInfo->USBDMATxPktLen = USBDMApktLen; /* */ /* Update TXWI->MPDUtotalByteCount , */ /* the length = 802.11 header + payload_of_all_batch_frames */ pTxWI = (struct rt_txwi *) (pWirelessPacket + TXINFO_SIZE); pTxWI->MPDUtotalByteCount = totalMPDUSize; /* */ /* Update the pHTTXContext->CurWritePosition */ /* */ pHTTXContext->CurWritePosition += (TXINFO_SIZE + USBDMApktLen); if ((pHTTXContext->CurWritePosition + 3906) > MAX_TXBULK_LIMIT) { /* Add 3906 for prevent the NextBulkOut packet size is a A-RALINK/A-MSDU Frame. */ pHTTXContext->CurWritePosition = 8; pTxInfo->SwUseLastRound = 1; } pHTTXContext->CurWriteRealPos = pHTTXContext->CurWritePosition; /* */ /* Zero the last padding. */ /* */ pWirelessPacket = (&pHTTXContext->TransferBuffer->field. WirelessPacket[fillOffset + pTxBlk->Priv]); NdisZeroMemory(pWirelessPacket, padding + 8); /* Finally, set bCurWriting as FALSE */ pHTTXContext->bCurWriting = FALSE; } else { /* It should not happened now unless we are going to shutdown. */ DBGPRINT(RT_DEBUG_ERROR, ("FinalWriteTxResource():bCurWriting is FALSE when handle last frames.\n")); } RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[QueIdx], IrqFlags); } void RtmpUSBDataLastTxIdx(struct rt_rtmp_adapter *pAd, u8 QueIdx, u16 TxIdx) { /* DO nothing for USB. */ } /* When can do bulk-out: 1. TxSwFreeIdx < TX_RING_SIZE; It means has at least one Ring entity is ready for bulk-out, kick it out. 2. If TxSwFreeIdx == TX_RING_SIZE Check if the CurWriting flag is FALSE, if it's FALSE, we can do kick out. */ void RtmpUSBDataKickOut(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk, u8 QueIdx) { RTUSB_SET_BULK_FLAG(pAd, (fRTUSB_BULK_OUT_DATA_NORMAL << QueIdx)); RTUSBKickBulkOut(pAd); } /* Must be run in Interrupt context This function handle RT2870 specific TxDesc and cpu index update and kick the packet out. */ int RtmpUSBMgmtKickOut(struct rt_rtmp_adapter *pAd, u8 QueIdx, void *pPacket, u8 *pSrcBufVA, u32 SrcBufLen) { struct rt_txinfo *pTxInfo; unsigned long BulkOutSize; u8 padLen; u8 *pDest; unsigned long SwIdx = pAd->MgmtRing.TxCpuIdx; struct rt_tx_context *pMLMEContext = (struct rt_tx_context *)pAd->MgmtRing.Cell[SwIdx].AllocVa; unsigned long IrqFlags; pTxInfo = (struct rt_txinfo *)(pSrcBufVA); /* Build our URB for USBD */ BulkOutSize = SrcBufLen; BulkOutSize = (BulkOutSize + 3) & (~3); RTMPWriteTxInfo(pAd, pTxInfo, (u16)(BulkOutSize - TXINFO_SIZE), TRUE, EpToQueue[MGMTPIPEIDX], FALSE, FALSE); BulkOutSize += 4; /* Always add 4 extra bytes at every packet. */ /* If BulkOutSize is multiple of BulkOutMaxPacketSize, add extra 4 bytes again. */ if ((BulkOutSize % pAd->BulkOutMaxPacketSize) == 0) BulkOutSize += 4; padLen = BulkOutSize - SrcBufLen; ASSERT((padLen <= RTMP_PKT_TAIL_PADDING)); /* Now memzero all extra padding bytes. */ pDest = (u8 *)(pSrcBufVA + SrcBufLen); skb_put(GET_OS_PKT_TYPE(pPacket), padLen); NdisZeroMemory(pDest, padLen); RTMP_IRQ_LOCK(&pAd->MLMEBulkOutLock, IrqFlags); pAd->MgmtRing.Cell[pAd->MgmtRing.TxCpuIdx].pNdisPacket = pPacket; pMLMEContext->TransferBuffer = (struct rt_tx_buffer *)(GET_OS_PKT_DATAPTR(pPacket)); /* Length in TxInfo should be 8 less than bulkout size. */ pMLMEContext->BulkOutSize = BulkOutSize; pMLMEContext->InUse = TRUE; pMLMEContext->bWaitingBulkOut = TRUE; /*for debug */ /*hex_dump("RtmpUSBMgmtKickOut", &pMLMEContext->TransferBuffer->field.WirelessPacket[0], (pMLMEContext->BulkOutSize > 16 ? 16 : pMLMEContext->BulkOutSize)); */ /*pAd->RalinkCounters.KickTxCount++; */ /*pAd->RalinkCounters.OneSecTxDoneCount++; */ /*if (pAd->MgmtRing.TxSwFreeIdx == MGMT_RING_SIZE) */ /* needKickOut = TRUE; */ /* Decrease the TxSwFreeIdx and Increase the TX_CTX_IDX */ pAd->MgmtRing.TxSwFreeIdx--; INC_RING_INDEX(pAd->MgmtRing.TxCpuIdx, MGMT_RING_SIZE); RTMP_IRQ_UNLOCK(&pAd->MLMEBulkOutLock, IrqFlags); RTUSB_SET_BULK_FLAG(pAd, fRTUSB_BULK_OUT_MLME); /*if (needKickOut) */ RTUSBKickBulkOut(pAd); return 0; } void RtmpUSBNullFrameKickOut(struct rt_rtmp_adapter *pAd, u8 QueIdx, u8 * pNullFrame, u32 frameLen) { if (pAd->NullContext.InUse == FALSE) { struct rt_tx_context *pNullContext; struct rt_txinfo *pTxInfo; struct rt_txwi * pTxWI; u8 *pWirelessPkt; pNullContext = &(pAd->NullContext); /* Set the in use bit */ pNullContext->InUse = TRUE; pWirelessPkt = (u8 *)& pNullContext->TransferBuffer->field. WirelessPacket[0]; RTMPZeroMemory(&pWirelessPkt[0], 100); pTxInfo = (struct rt_txinfo *)& pWirelessPkt[0]; RTMPWriteTxInfo(pAd, pTxInfo, (u16)(sizeof(struct rt_header_802_11) + TXWI_SIZE), TRUE, EpToQueue[MGMTPIPEIDX], FALSE, FALSE); pTxInfo->QSEL = FIFO_EDCA; pTxWI = (struct rt_txwi *) & pWirelessPkt[TXINFO_SIZE]; RTMPWriteTxWI(pAd, pTxWI, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, 0, BSSID_WCID, (sizeof(struct rt_header_802_11)), 0, 0, (u8)pAd->CommonCfg.MlmeTransmit.field.MCS, IFS_HTTXOP, FALSE, &pAd->CommonCfg.MlmeTransmit); RTMPMoveMemory(&pWirelessPkt[TXWI_SIZE + TXINFO_SIZE], &pAd->NullFrame, sizeof(struct rt_header_802_11)); pAd->NullContext.BulkOutSize = TXINFO_SIZE + TXWI_SIZE + sizeof(pAd->NullFrame) + 4; /* Fill out frame length information for global Bulk out arbitor */ /*pNullContext->BulkOutSize = TransferBufferLength; */ DBGPRINT(RT_DEBUG_TRACE, ("SYNC - send NULL Frame @%d Mbps...\n", RateIdToMbps[pAd->CommonCfg.TxRate])); RTUSB_SET_BULK_FLAG(pAd, fRTUSB_BULK_OUT_DATA_NULL); /* Kick bulk out */ RTUSBKickBulkOut(pAd); } } /* ======================================================================== Routine Description: Get a received packet. Arguments: pAd device control block pSaveRxD receive descriptor information *pbReschedule need reschedule flag *pRxPending pending received packet flag Return Value: the recieved packet Note: ======================================================================== */ void *GetPacketFromRxRing(struct rt_rtmp_adapter *pAd, OUT PRT28XX_RXD_STRUC pSaveRxD, OUT BOOLEAN * pbReschedule, IN u32 * pRxPending) { struct rt_rx_context *pRxContext; void *pSkb; u8 *pData; unsigned long ThisFrameLen; unsigned long RxBufferLength; struct rt_rxwi * pRxWI; pRxContext = &pAd->RxContext[pAd->NextRxBulkInReadIndex]; if ((pRxContext->Readable == FALSE) || (pRxContext->InUse == TRUE)) return NULL; RxBufferLength = pRxContext->BulkInOffset - pAd->ReadPosition; if (RxBufferLength < (RT2870_RXDMALEN_FIELD_SIZE + sizeof(struct rt_rxwi) + sizeof(struct rt_rxinfo))) { goto label_null; } pData = &pRxContext->TransferBuffer[pAd->ReadPosition]; /* 4KB */ /* The RXDMA field is 4 bytes, now just use the first 2 bytes. The Length including the (RXWI + MSDU + Padding) */ ThisFrameLen = *pData + (*(pData + 1) << 8); if (ThisFrameLen == 0) { DBGPRINT(RT_DEBUG_TRACE, ("BIRIdx(%d): RXDMALen is zero.[%ld], BulkInBufLen = %ld)\n", pAd->NextRxBulkInReadIndex, ThisFrameLen, pRxContext->BulkInOffset)); goto label_null; } if ((ThisFrameLen & 0x3) != 0) { DBGPRINT(RT_DEBUG_ERROR, ("BIRIdx(%d): RXDMALen not multiple of 4.[%ld], BulkInBufLen = %ld)\n", pAd->NextRxBulkInReadIndex, ThisFrameLen, pRxContext->BulkInOffset)); goto label_null; } if ((ThisFrameLen + 8) > RxBufferLength) /* 8 for (RT2870_RXDMALEN_FIELD_SIZE + sizeof(struct rt_rxinfo)) */ { DBGPRINT(RT_DEBUG_TRACE, ("BIRIdx(%d):FrameLen(0x%lx) outranges. BulkInLen=0x%lx, remaining RxBufLen=0x%lx, ReadPos=0x%lx\n", pAd->NextRxBulkInReadIndex, ThisFrameLen, pRxContext->BulkInOffset, RxBufferLength, pAd->ReadPosition)); /* error frame. finish this loop */ goto label_null; } /* skip USB frame length field */ pData += RT2870_RXDMALEN_FIELD_SIZE; pRxWI = (struct rt_rxwi *) pData; if (pRxWI->MPDUtotalByteCount > ThisFrameLen) { DBGPRINT(RT_DEBUG_ERROR, ("%s():pRxWIMPDUtotalByteCount(%d) large than RxDMALen(%ld)\n", __FUNCTION__, pRxWI->MPDUtotalByteCount, ThisFrameLen)); goto label_null; } /* allocate a rx packet */ pSkb = dev_alloc_skb(ThisFrameLen); if (pSkb == NULL) { DBGPRINT(RT_DEBUG_ERROR, ("%s():Cannot Allocate sk buffer for this Bulk-In buffer!\n", __FUNCTION__)); goto label_null; } /* copy the rx packet */ memcpy(skb_put(pSkb, ThisFrameLen), pData, ThisFrameLen); RTPKT_TO_OSPKT(pSkb)->dev = get_netdev_from_bssid(pAd, BSS0); RTMP_SET_PACKET_SOURCE(OSPKT_TO_RTPKT(pSkb), PKTSRC_NDIS); /* copy RxD */ *pSaveRxD = *(struct rt_rxinfo *) (pData + ThisFrameLen); /* update next packet read position. */ pAd->ReadPosition += (ThisFrameLen + RT2870_RXDMALEN_FIELD_SIZE + RXINFO_SIZE); /* 8 for (RT2870_RXDMALEN_FIELD_SIZE + sizeof(struct rt_rxinfo)) */ return pSkb; label_null: return NULL; } /* ======================================================================== Routine Description: Check Rx descriptor, return NDIS_STATUS_FAILURE if any error dound Arguments: pRxD Pointer to the Rx descriptor Return Value: NDIS_STATUS_SUCCESS No err NDIS_STATUS_FAILURE Error Note: ======================================================================== */ int RTMPCheckRxError(struct rt_rtmp_adapter *pAd, struct rt_header_802_11 * pHeader, struct rt_rxwi * pRxWI, IN PRT28XX_RXD_STRUC pRxINFO) { struct rt_cipher_key *pWpaKey; int dBm; if (pAd->bPromiscuous == TRUE) return (NDIS_STATUS_SUCCESS); if (pRxINFO == NULL) return (NDIS_STATUS_FAILURE); /* Phy errors & CRC errors */ if (pRxINFO->Crc) { /* Check RSSI for Noise Hist statistic collection. */ dBm = (int)(pRxWI->RSSI0) - pAd->BbpRssiToDbmDelta; if (dBm <= -87) pAd->StaCfg.RPIDensity[0] += 1; else if (dBm <= -82) pAd->StaCfg.RPIDensity[1] += 1; else if (dBm <= -77) pAd->StaCfg.RPIDensity[2] += 1; else if (dBm <= -72) pAd->StaCfg.RPIDensity[3] += 1; else if (dBm <= -67) pAd->StaCfg.RPIDensity[4] += 1; else if (dBm <= -62) pAd->StaCfg.RPIDensity[5] += 1; else if (dBm <= -57) pAd->StaCfg.RPIDensity[6] += 1; else if (dBm > -57) pAd->StaCfg.RPIDensity[7] += 1; return (NDIS_STATUS_FAILURE); } /* Add Rx size to channel load counter, we should ignore error counts */ pAd->StaCfg.CLBusyBytes += (pRxWI->MPDUtotalByteCount + 14); /* Drop ToDs promiscous frame, it is opened due to CCX 2 channel load statistics */ if (pHeader->FC.ToDs) { DBGPRINT_RAW(RT_DEBUG_ERROR, ("Err;FC.ToDs\n")); return NDIS_STATUS_FAILURE; } /* Paul 04-03 for OFDM Rx length issue */ if (pRxWI->MPDUtotalByteCount > MAX_AGGREGATION_SIZE) { DBGPRINT_RAW(RT_DEBUG_ERROR, ("received packet too long\n")); return NDIS_STATUS_FAILURE; } /* Drop not U2M frames, cant's drop here because we will drop beacon in this case */ /* I am kind of doubting the U2M bit operation */ /* if (pRxD->U2M == 0) */ /* return(NDIS_STATUS_FAILURE); */ /* drop decyption fail frame */ if (pRxINFO->Decrypted && pRxINFO->CipherErr) { if (((pRxINFO->CipherErr & 1) == 1) && pAd->CommonCfg.bWirelessEvent && INFRA_ON(pAd)) RTMPSendWirelessEvent(pAd, IW_ICV_ERROR_EVENT_FLAG, pAd->MacTab.Content[BSSID_WCID]. Addr, BSS0, 0); if (((pRxINFO->CipherErr & 2) == 2) && pAd->CommonCfg.bWirelessEvent && INFRA_ON(pAd)) RTMPSendWirelessEvent(pAd, IW_MIC_ERROR_EVENT_FLAG, pAd->MacTab.Content[BSSID_WCID]. Addr, BSS0, 0); /* */ /* MIC Error */ /* */ if ((pRxINFO->CipherErr == 2) && pRxINFO->MyBss) { pWpaKey = &pAd->SharedKey[BSS0][pRxWI->KeyIndex]; RTMPReportMicError(pAd, pWpaKey); DBGPRINT_RAW(RT_DEBUG_ERROR, ("Rx MIC Value error\n")); } if (pRxINFO->Decrypted && (pAd->SharedKey[BSS0][pRxWI->KeyIndex].CipherAlg == CIPHER_AES) && (pHeader->Sequence == pAd->FragFrame.Sequence)) { /* */ /* Acceptable since the First FragFrame no CipherErr problem. */ /* */ return (NDIS_STATUS_SUCCESS); } return (NDIS_STATUS_FAILURE); } return (NDIS_STATUS_SUCCESS); } void RtmpUsbStaAsicForceWakeupTimeout(void *SystemSpecific1, void *FunctionContext, void *SystemSpecific2, void *SystemSpecific3) { struct rt_rtmp_adapter *pAd = (struct rt_rtmp_adapter *)FunctionContext; if (pAd && pAd->Mlme.AutoWakeupTimerRunning) { AsicSendCommandToMcu(pAd, 0x31, 0xff, 0x00, 0x02); OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_DOZE); pAd->Mlme.AutoWakeupTimerRunning = FALSE; } } void RT28xxUsbStaAsicForceWakeup(struct rt_rtmp_adapter *pAd, IN BOOLEAN bFromTx) { BOOLEAN Canceled; if (pAd->Mlme.AutoWakeupTimerRunning) RTMPCancelTimer(&pAd->Mlme.AutoWakeupTimer, &Canceled); AsicSendCommandToMcu(pAd, 0x31, 0xff, 0x00, 0x02); OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_DOZE); } void RT28xxUsbStaAsicSleepThenAutoWakeup(struct rt_rtmp_adapter *pAd, u16 TbttNumToNextWakeUp) { /* we have decided to SLEEP, so at least do it for a BEACON period. */ if (TbttNumToNextWakeUp == 0) TbttNumToNextWakeUp = 1; RTMPSetTimer(&pAd->Mlme.AutoWakeupTimer, AUTO_WAKEUP_TIMEOUT); pAd->Mlme.AutoWakeupTimerRunning = TRUE; AsicSendCommandToMcu(pAd, 0x30, 0xff, 0xff, 0x02); /* send POWER-SAVE command to MCU. Timeout 40us. */ OPSTATUS_SET_FLAG(pAd, fOP_STATUS_DOZE); } #endif /* RTMP_MAC_USB // */
gpl-2.0
pio-masaki/at100-kernel
arch/sh/drivers/pci/fixups-landisk.c
2444
1502
/* * arch/sh/drivers/pci/fixups-landisk.c * * PCI initialization for the I-O DATA Device, Inc. LANDISK board * * Copyright (C) 2006 kogiidena * Copyright (C) 2010 Nobuhiro Iwamatsu * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include "pci-sh4.h" #define PCIMCR_MRSET_OFF 0xBFFFFFFF #define PCIMCR_RFSH_OFF 0xFFFFFFFB int pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin) { /* * slot0: pin1-4 = irq5,6,7,8 * slot1: pin1-4 = irq6,7,8,5 * slot2: pin1-4 = irq7,8,5,6 * slot3: pin1-4 = irq8,5,6,7 */ int irq = ((slot + pin - 1) & 0x3) + 5; if ((slot | (pin - 1)) > 0x3) { printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n", slot, pin - 1 + 'A'); return -1; } return irq; } int pci_fixup_pcic(struct pci_channel *chan) { unsigned long bcr1, mcr; bcr1 = __raw_readl(SH7751_BCR1); bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ pci_write_reg(chan, bcr1, SH4_PCIBCR1); mcr = __raw_readl(SH7751_MCR); mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; pci_write_reg(chan, mcr, SH4_PCIMCR); pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5); pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6); pci_write_reg(chan, 0x0c000000, SH4_PCILAR0); pci_write_reg(chan, 0x00000000, SH4_PCILAR1); return 0; }
gpl-2.0
CyanogenMod/android_kernel_asus_tf300t
drivers/staging/iio/Documentation/generic_buffer.c
2700
7944
/* Industrialio buffer test code. * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is primarily intended as an example application. * Reads the current buffer setup from sysfs and starts a short capture * from the specified device, pretty printing the result after appropriate * conversion. * * Command line parameters * generic_buffer -n <device_name> -t <trigger_name> * If trigger name is not specified the program assumes you want a dataready * trigger associated with the device and goes looking for it. * */ #include <unistd.h> #include <dirent.h> #include <fcntl.h> #include <stdio.h> #include <errno.h> #include <sys/stat.h> #include <sys/dir.h> #include <linux/types.h> #include <string.h> #include <poll.h> #include "iio_utils.h" /** * size_from_channelarray() - calculate the storage size of a scan * @channels: the channel info array * @num_channels: size of the channel info array * * Has the side effect of filling the channels[i].location values used * in processing the buffer output. **/ int size_from_channelarray(struct iio_channel_info *channels, int num_channels) { int bytes = 0; int i = 0; while (i < num_channels) { if (bytes % channels[i].bytes == 0) channels[i].location = bytes; else channels[i].location = bytes - bytes%channels[i].bytes + channels[i].bytes; bytes = channels[i].location + channels[i].bytes; i++; } return bytes; } void print2byte(int input, struct iio_channel_info *info) { /* shift before conversion to avoid sign extension of left aligned data */ input = input >> info->shift; if (info->is_signed) { int16_t val = input; val &= (1 << info->bits_used) - 1; val = (int16_t)(val << (16 - info->bits_used)) >> (16 - info->bits_used); printf("%05f ", val, (float)(val + info->offset)*info->scale); } else { uint16_t val = input; val &= (1 << info->bits_used) - 1; printf("%05f ", ((float)val + info->offset)*info->scale); } } /** * process_scan() - print out the values in SI units * @data: pointer to the start of the scan * @infoarray: information about the channels. Note * size_from_channelarray must have been called first to fill the * location offsets. * @num_channels: the number of active channels **/ void process_scan(char *data, struct iio_channel_info *infoarray, int num_channels) { int k; for (k = 0; k < num_channels; k++) switch (infoarray[k].bytes) { /* only a few cases implemented so far */ case 2: print2byte(*(uint16_t *)(data + infoarray[k].location), &infoarray[k]); break; case 8: if (infoarray[k].is_signed) { int64_t val = *(int64_t *) (data + infoarray[k].location); if ((val >> infoarray[k].bits_used) & 1) val = (val & infoarray[k].mask) | ~infoarray[k].mask; /* special case for timestamp */ if (infoarray[k].scale == 1.0f && infoarray[k].offset == 0.0f) printf(" %lld", val); else printf("%05f ", ((float)val + infoarray[k].offset)* infoarray[k].scale); } break; default: break; } printf("\n"); } int main(int argc, char **argv) { unsigned long num_loops = 2; unsigned long timedelay = 1000000; unsigned long buf_len = 128; int ret, c, i, j, toread; FILE *fp_ev; int fp; int num_channels; char *trigger_name = NULL, *device_name = NULL; char *dev_dir_name, *buf_dir_name; int datardytrigger = 1; char *data; ssize_t read_size; int dev_num, trig_num; char *buffer_access; int scan_size; int noevents = 0; char *dummy; struct iio_channel_info *infoarray; while ((c = getopt(argc, argv, "l:w:c:et:n:")) != -1) { switch (c) { case 'n': device_name = optarg; break; case 't': trigger_name = optarg; datardytrigger = 0; break; case 'e': noevents = 1; break; case 'c': num_loops = strtoul(optarg, &dummy, 10); break; case 'w': timedelay = strtoul(optarg, &dummy, 10); break; case 'l': buf_len = strtoul(optarg, &dummy, 10); break; case '?': return -1; } } if (device_name == NULL) return -1; /* Find the device requested */ dev_num = find_type_by_name(device_name, "device"); if (dev_num < 0) { printf("Failed to find the %s\n", device_name); ret = -ENODEV; goto error_ret; } printf("iio device number being used is %d\n", dev_num); asprintf(&dev_dir_name, "%sdevice%d", iio_dir, dev_num); if (trigger_name == NULL) { /* * Build the trigger name. If it is device associated it's * name is <device_name>_dev[n] where n matches the device * number found above */ ret = asprintf(&trigger_name, "%s-dev%d", device_name, dev_num); if (ret < 0) { ret = -ENOMEM; goto error_ret; } } /* Verify the trigger exists */ trig_num = find_type_by_name(trigger_name, "trigger"); if (trig_num < 0) { printf("Failed to find the trigger %s\n", trigger_name); ret = -ENODEV; goto error_free_triggername; } printf("iio trigger number being used is %d\n", trig_num); /* * Parse the files in scan_elements to identify what channels are * present */ ret = build_channel_array(dev_dir_name, &infoarray, &num_channels); if (ret) { printf("Problem reading scan element information\n"); goto error_free_triggername; } /* * Construct the directory name for the associated buffer. * As we know that the lis3l02dq has only one buffer this may * be built rather than found. */ ret = asprintf(&buf_dir_name, "%sdevice%d:buffer0", iio_dir, dev_num); if (ret < 0) { ret = -ENOMEM; goto error_free_triggername; } printf("%s %s\n", dev_dir_name, trigger_name); /* Set the device trigger to be the data rdy trigger found above */ ret = write_sysfs_string_and_verify("trigger/current_trigger", dev_dir_name, trigger_name); if (ret < 0) { printf("Failed to write current_trigger file\n"); goto error_free_buf_dir_name; } /* Setup ring buffer parameters */ ret = write_sysfs_int("length", buf_dir_name, buf_len); if (ret < 0) goto error_free_buf_dir_name; /* Enable the buffer */ ret = write_sysfs_int("enable", buf_dir_name, 1); if (ret < 0) goto error_free_buf_dir_name; scan_size = size_from_channelarray(infoarray, num_channels); data = malloc(scan_size*buf_len); if (!data) { ret = -ENOMEM; goto error_free_buf_dir_name; } ret = asprintf(&buffer_access, "/dev/device%d:buffer0", dev_num); if (ret < 0) { ret = -ENOMEM; goto error_free_data; } /* Attempt to open non blocking the access dev */ fp = open(buffer_access, O_RDONLY | O_NONBLOCK); if (fp == -1) { /*If it isn't there make the node */ printf("Failed to open %s\n", buffer_access); ret = -errno; goto error_free_buffer_access; } /* Wait for events 10 times */ for (j = 0; j < num_loops; j++) { if (!noevents) { struct pollfd pfd = { .fd = fp, .events = POLLIN, }; poll(&pfd, 1, -1); toread = buf_len; } else { usleep(timedelay); toread = 64; } read_size = read(fp, data, toread*scan_size); if (read_size == -EAGAIN) { printf("nothing available\n"); continue; } for (i = 0; i < read_size/scan_size; i++) process_scan(data + scan_size*i, infoarray, num_channels); } /* Stop the ring buffer */ ret = write_sysfs_int("enable", buf_dir_name, 0); if (ret < 0) goto error_close_buffer_access; /* Disconnect from the trigger - just write a dummy name.*/ write_sysfs_string("trigger/current_trigger", dev_dir_name, "NULL"); error_close_buffer_access: close(fp); error_free_data: free(data); error_free_buffer_access: free(buffer_access); error_free_buf_dir_name: free(buf_dir_name); error_free_triggername: if (datardytrigger) free(trigger_name); error_ret: return ret; }
gpl-2.0
htc-mirror/pyramid-ics-crc-3.0.16-c764bfb
drivers/staging/cx25821/cx25821-medusa-video.c
3212
22220
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <shu.lin@conexant.com>, <hiep.huynh@conexant.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "cx25821.h" #include "cx25821-medusa-video.h" #include "cx25821-biffuncs.h" /* * medusa_enable_bluefield_output() * * Enable the generation of blue filed output if no video * */ static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel, int enable) { int ret_val = 1; u32 value = 0; u32 tmp = 0; int out_ctrl = OUT_CTRL1; int out_ctrl_ns = OUT_CTRL_NS; switch (channel) { default: case VDEC_A: break; case VDEC_B: out_ctrl = VDEC_B_OUT_CTRL1; out_ctrl_ns = VDEC_B_OUT_CTRL_NS; break; case VDEC_C: out_ctrl = VDEC_C_OUT_CTRL1; out_ctrl_ns = VDEC_C_OUT_CTRL_NS; break; case VDEC_D: out_ctrl = VDEC_D_OUT_CTRL1; out_ctrl_ns = VDEC_D_OUT_CTRL_NS; break; case VDEC_E: out_ctrl = VDEC_E_OUT_CTRL1; out_ctrl_ns = VDEC_E_OUT_CTRL_NS; return; case VDEC_F: out_ctrl = VDEC_F_OUT_CTRL1; out_ctrl_ns = VDEC_F_OUT_CTRL_NS; return; case VDEC_G: out_ctrl = VDEC_G_OUT_CTRL1; out_ctrl_ns = VDEC_G_OUT_CTRL_NS; return; case VDEC_H: out_ctrl = VDEC_H_OUT_CTRL1; out_ctrl_ns = VDEC_H_OUT_CTRL_NS; return; } value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl, &tmp); value &= 0xFFFFFF7F; /* clear BLUE_FIELD_EN */ if (enable) value |= 0x00000080; /* set BLUE_FIELD_EN */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value); value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl_ns, &tmp); value &= 0xFFFFFF7F; if (enable) value |= 0x00000080; /* set BLUE_FIELD_EN */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value); } static int medusa_initialize_ntsc(struct cx25821_dev *dev) { int ret_val = 0; int i = 0; u32 value = 0; u32 tmp = 0; mutex_lock(&dev->lock); for (i = 0; i < MAX_DECODERS; i++) { /* set video format NTSC-M */ value = cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i), &tmp); value &= 0xFFFFFFF0; /* enable the fast locking mode bit[16] */ value |= 0x10001; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i), value); /* resolution NTSC 720x480 */ value = cx25821_i2c_read(&dev->i2c_bus[0], HORIZ_TIM_CTRL + (0x200 * i), &tmp); value &= 0x00C00C00; value |= 0x612D0074; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HORIZ_TIM_CTRL + (0x200 * i), value); value = cx25821_i2c_read(&dev->i2c_bus[0], VERT_TIM_CTRL + (0x200 * i), &tmp); value &= 0x00C00C00; value |= 0x1C1E001A; /* vblank_cnt + 2 to get camera ID */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VERT_TIM_CTRL + (0x200 * i), value); /* chroma subcarrier step size */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], SC_STEP_SIZE + (0x200 * i), 0x43E00000); /* enable VIP optional active */ value = cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL_NS + (0x200 * i), &tmp); value &= 0xFFFBFFFF; value |= 0x00040000; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL_NS + (0x200 * i), value); /* enable VIP optional active (VIP_OPT_AL) for direct output. */ value = cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i), &tmp); value &= 0xFFFBFFFF; value |= 0x00040000; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i), value); /* * clear VPRES_VERT_EN bit, fixes the chroma run away problem * when the input switching rate < 16 fields */ value = cx25821_i2c_read(&dev->i2c_bus[0], MISC_TIM_CTRL + (0x200 * i), &tmp); /* disable special play detection */ value = setBitAtPos(value, 14); value = clearBitAtPos(value, 15); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MISC_TIM_CTRL + (0x200 * i), value); /* set vbi_gate_en to 0 */ value = cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i), &tmp); value = clearBitAtPos(value, 29); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i), value); /* Enable the generation of blue field output if no video */ medusa_enable_bluefield_output(dev, i, 1); } for (i = 0; i < MAX_ENCODERS; i++) { /* NTSC hclock */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_1 + (0x100 * i), &tmp); value &= 0xF000FC00; value |= 0x06B402D0; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_1 + (0x100 * i), value); /* burst begin and burst end */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_2 + (0x100 * i), &tmp); value &= 0xFF000000; value |= 0x007E9054; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_2 + (0x100 * i), value); value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_3 + (0x100 * i), &tmp); value &= 0xFC00FE00; value |= 0x00EC00F0; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_3 + (0x100 * i), value); /* set NTSC vblank, no phase alternation, 7.5 IRE pedestal */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_4 + (0x100 * i), &tmp); value &= 0x00FCFFFF; value |= 0x13020000; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_4 + (0x100 * i), value); value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_5 + (0x100 * i), &tmp); value &= 0xFFFF0000; value |= 0x0000E575; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_5 + (0x100 * i), value); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_6 + (0x100 * i), 0x009A89C1); /* Subcarrier Increment */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_7 + (0x100 * i), 0x21F07C1F); } /* set picture resolutions */ /* 0 - 720 */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0); /* 0 - 480 */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0); /* set Bypass input format to NTSC 525 lines */ value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp); value |= 0x00080200; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value); mutex_unlock(&dev->lock); return ret_val; } static int medusa_PALCombInit(struct cx25821_dev *dev, int dec) { int ret_val = -1; u32 value = 0, tmp = 0; /* Setup for 2D threshold */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_HFS_CFG + (0x200 * dec), 0x20002861); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_HFD_CFG + (0x200 * dec), 0x20002861); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_LF_CFG + (0x200 * dec), 0x200A1023); /* Setup flat chroma and luma thresholds */ value = cx25821_i2c_read(&dev->i2c_bus[0], COMB_FLAT_THRESH_CTRL + (0x200 * dec), &tmp); value &= 0x06230000; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], COMB_FLAT_THRESH_CTRL + (0x200 * dec), value); /* set comb 2D blend */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_BLEND + (0x200 * dec), 0x210F0F0F); /* COMB MISC CONTROL */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], COMB_MISC_CTRL + (0x200 * dec), 0x41120A7F); return ret_val; } static int medusa_initialize_pal(struct cx25821_dev *dev) { int ret_val = 0; int i = 0; u32 value = 0; u32 tmp = 0; mutex_lock(&dev->lock); for (i = 0; i < MAX_DECODERS; i++) { /* set video format PAL-BDGHI */ value = cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i), &tmp); value &= 0xFFFFFFF0; /* enable the fast locking mode bit[16] */ value |= 0x10004; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i), value); /* resolution PAL 720x576 */ value = cx25821_i2c_read(&dev->i2c_bus[0], HORIZ_TIM_CTRL + (0x200 * i), &tmp); value &= 0x00C00C00; value |= 0x632D007D; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HORIZ_TIM_CTRL + (0x200 * i), value); /* vblank656_cnt=x26, vactive_cnt=240h, vblank_cnt=x24 */ value = cx25821_i2c_read(&dev->i2c_bus[0], VERT_TIM_CTRL + (0x200 * i), &tmp); value &= 0x00C00C00; value |= 0x28240026; /* vblank_cnt + 2 to get camera ID */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VERT_TIM_CTRL + (0x200 * i), value); /* chroma subcarrier step size */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], SC_STEP_SIZE + (0x200 * i), 0x5411E2D0); /* enable VIP optional active */ value = cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL_NS + (0x200 * i), &tmp); value &= 0xFFFBFFFF; value |= 0x00040000; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL_NS + (0x200 * i), value); /* enable VIP optional active (VIP_OPT_AL) for direct output. */ value = cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i), &tmp); value &= 0xFFFBFFFF; value |= 0x00040000; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i), value); /* * clear VPRES_VERT_EN bit, fixes the chroma run away problem * when the input switching rate < 16 fields */ value = cx25821_i2c_read(&dev->i2c_bus[0], MISC_TIM_CTRL + (0x200 * i), &tmp); /* disable special play detection */ value = setBitAtPos(value, 14); value = clearBitAtPos(value, 15); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MISC_TIM_CTRL + (0x200 * i), value); /* set vbi_gate_en to 0 */ value = cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i), &tmp); value = clearBitAtPos(value, 29); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i), value); medusa_PALCombInit(dev, i); /* Enable the generation of blue field output if no video */ medusa_enable_bluefield_output(dev, i, 1); } for (i = 0; i < MAX_ENCODERS; i++) { /* PAL hclock */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_1 + (0x100 * i), &tmp); value &= 0xF000FC00; value |= 0x06C002D0; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_1 + (0x100 * i), value); /* burst begin and burst end */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_2 + (0x100 * i), &tmp); value &= 0xFF000000; value |= 0x007E9754; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_2 + (0x100 * i), value); /* hblank and vactive */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_3 + (0x100 * i), &tmp); value &= 0xFC00FE00; value |= 0x00FC0120; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_3 + (0x100 * i), value); /* set PAL vblank, phase alternation, 0 IRE pedestal */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_4 + (0x100 * i), &tmp); value &= 0x00FCFFFF; value |= 0x14010000; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_4 + (0x100 * i), value); value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_5 + (0x100 * i), &tmp); value &= 0xFFFF0000; value |= 0x0000F078; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_5 + (0x100 * i), value); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_6 + (0x100 * i), 0x00A493CF); /* Subcarrier Increment */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_7 + (0x100 * i), 0x2A098ACB); } /* set picture resolutions */ /* 0 - 720 */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0); /* 0 - 576 */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0); /* set Bypass input format to PAL 625 lines */ value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp); value &= 0xFFF7FDFF; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value); mutex_unlock(&dev->lock); return ret_val; } int medusa_set_videostandard(struct cx25821_dev *dev) { int status = STATUS_SUCCESS; u32 value = 0, tmp = 0; if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK) status = medusa_initialize_pal(dev); else status = medusa_initialize_ntsc(dev); /* Enable DENC_A output */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_4, &tmp); value = setBitAtPos(value, 4); status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_4, value); /* Enable DENC_B output */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_B_REG_4, &tmp); value = setBitAtPos(value, 4); status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_B_REG_4, value); return status; } void medusa_set_resolution(struct cx25821_dev *dev, int width, int decoder_select) { int decoder = 0; int decoder_count = 0; int ret_val = 0; u32 hscale = 0x0; u32 vscale = 0x0; const int MAX_WIDTH = 720; mutex_lock(&dev->lock); /* validate the width - cannot be negative */ if (width > MAX_WIDTH) { pr_info("%s(): width %d > MAX_WIDTH %d ! resetting to MAX_WIDTH\n", __func__, width, MAX_WIDTH); width = MAX_WIDTH; } if (decoder_select <= 7 && decoder_select >= 0) { decoder = decoder_select; decoder_count = decoder_select + 1; } else { decoder = 0; decoder_count = _num_decoders; } switch (width) { case 320: hscale = 0x13E34B; vscale = 0x0; break; case 352: hscale = 0x10A273; vscale = 0x0; break; case 176: hscale = 0x3115B2; vscale = 0x1E00; break; case 160: hscale = 0x378D84; vscale = 0x1E00; break; default: /* 720 */ hscale = 0x0; vscale = 0x0; break; } for (; decoder < decoder_count; decoder++) { /* write scaling values for each decoder */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL + (0x200 * decoder), hscale); ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL + (0x200 * decoder), vscale); } mutex_unlock(&dev->lock); } static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder, int duration) { int ret_val = 0; u32 fld_cnt = 0; u32 tmp = 0; u32 disp_cnt_reg = DISP_AB_CNT; mutex_lock(&dev->lock); /* no support */ if (decoder < VDEC_A && decoder > VDEC_H) { mutex_unlock(&dev->lock); return; } switch (decoder) { default: break; case VDEC_C: case VDEC_D: disp_cnt_reg = DISP_CD_CNT; break; case VDEC_E: case VDEC_F: disp_cnt_reg = DISP_EF_CNT; break; case VDEC_G: case VDEC_H: disp_cnt_reg = DISP_GH_CNT; break; } _display_field_cnt[decoder] = duration; /* update hardware */ fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp); if (!(decoder % 2)) { /* EVEN decoder */ fld_cnt &= 0xFFFF0000; fld_cnt |= duration; } else { fld_cnt &= 0x0000FFFF; fld_cnt |= ((u32) duration) << 16; } ret_val = cx25821_i2c_write(&dev->i2c_bus[0], disp_cnt_reg, fld_cnt); mutex_unlock(&dev->lock); } /* Map to Medusa register setting */ static int mapM(int srcMin, int srcMax, int srcVal, int dstMin, int dstMax, int *dstVal) { int numerator; int denominator; int quotient; if ((srcMin == srcMax) || (srcVal < srcMin) || (srcVal > srcMax)) return -1; /* * This is the overall expression used: * *dstVal = * (srcVal - srcMin)*(dstMax - dstMin) / (srcMax - srcMin) + dstMin; * but we need to account for rounding so below we use the modulus * operator to find the remainder and increment if necessary. */ numerator = (srcVal - srcMin) * (dstMax - dstMin); denominator = srcMax - srcMin; quotient = numerator / denominator; if (2 * (numerator % denominator) >= denominator) quotient++; *dstVal = quotient + dstMin; return 0; } static unsigned long convert_to_twos(long numeric, unsigned long bits_len) { unsigned char temp; if (numeric >= 0) return numeric; else { temp = ~(abs(numeric) & 0xFF); temp += 1; return temp; } } int medusa_set_brightness(struct cx25821_dev *dev, int brightness, int decoder) { int ret_val = 0; int value = 0; u32 val = 0, tmp = 0; mutex_lock(&dev->lock); if ((brightness > VIDEO_PROCAMP_MAX) || (brightness < VIDEO_PROCAMP_MIN)) { mutex_unlock(&dev->lock); return -1; } ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, brightness, SIGNED_BYTE_MIN, SIGNED_BYTE_MAX, &value); value = convert_to_twos(value, 8); val = cx25821_i2c_read(&dev->i2c_bus[0], VDEC_A_BRITE_CTRL + (0x200 * decoder), &tmp); val &= 0xFFFFFF00; ret_val |= cx25821_i2c_write(&dev->i2c_bus[0], VDEC_A_BRITE_CTRL + (0x200 * decoder), val | value); mutex_unlock(&dev->lock); return ret_val; } int medusa_set_contrast(struct cx25821_dev *dev, int contrast, int decoder) { int ret_val = 0; int value = 0; u32 val = 0, tmp = 0; mutex_lock(&dev->lock); if ((contrast > VIDEO_PROCAMP_MAX) || (contrast < VIDEO_PROCAMP_MIN)) { mutex_unlock(&dev->lock); return -1; } ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, contrast, UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value); val = cx25821_i2c_read(&dev->i2c_bus[0], VDEC_A_CNTRST_CTRL + (0x200 * decoder), &tmp); val &= 0xFFFFFF00; ret_val |= cx25821_i2c_write(&dev->i2c_bus[0], VDEC_A_CNTRST_CTRL + (0x200 * decoder), val | value); mutex_unlock(&dev->lock); return ret_val; } int medusa_set_hue(struct cx25821_dev *dev, int hue, int decoder) { int ret_val = 0; int value = 0; u32 val = 0, tmp = 0; mutex_lock(&dev->lock); if ((hue > VIDEO_PROCAMP_MAX) || (hue < VIDEO_PROCAMP_MIN)) { mutex_unlock(&dev->lock); return -1; } ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, hue, SIGNED_BYTE_MIN, SIGNED_BYTE_MAX, &value); value = convert_to_twos(value, 8); val = cx25821_i2c_read(&dev->i2c_bus[0], VDEC_A_HUE_CTRL + (0x200 * decoder), &tmp); val &= 0xFFFFFF00; ret_val |= cx25821_i2c_write(&dev->i2c_bus[0], VDEC_A_HUE_CTRL + (0x200 * decoder), val | value); mutex_unlock(&dev->lock); return ret_val; } int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder) { int ret_val = 0; int value = 0; u32 val = 0, tmp = 0; mutex_lock(&dev->lock); if ((saturation > VIDEO_PROCAMP_MAX) || (saturation < VIDEO_PROCAMP_MIN)) { mutex_unlock(&dev->lock); return -1; } ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, saturation, UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value); val = cx25821_i2c_read(&dev->i2c_bus[0], VDEC_A_USAT_CTRL + (0x200 * decoder), &tmp); val &= 0xFFFFFF00; ret_val |= cx25821_i2c_write(&dev->i2c_bus[0], VDEC_A_USAT_CTRL + (0x200 * decoder), val | value); val = cx25821_i2c_read(&dev->i2c_bus[0], VDEC_A_VSAT_CTRL + (0x200 * decoder), &tmp); val &= 0xFFFFFF00; ret_val |= cx25821_i2c_write(&dev->i2c_bus[0], VDEC_A_VSAT_CTRL + (0x200 * decoder), val | value); mutex_unlock(&dev->lock); return ret_val; } /* Program the display sequence and monitor output. */ int medusa_video_init(struct cx25821_dev *dev) { u32 value = 0, tmp = 0; int ret_val = 0; int i = 0; mutex_lock(&dev->lock); _num_decoders = dev->_max_num_decoders; /* disable Auto source selection on all video decoders */ value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp); value &= 0xFFFFF0FF; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value); if (ret_val < 0) goto error; /* Turn off Master source switch enable */ value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp); value &= 0xFFFFFFDF; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value); if (ret_val < 0) goto error; mutex_unlock(&dev->lock); for (i = 0; i < _num_decoders; i++) medusa_set_decoderduration(dev, i, _display_field_cnt[i]); mutex_lock(&dev->lock); /* Select monitor as DENC A input, power up the DAC */ value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp); value &= 0xFF70FF70; value |= 0x00090008; /* set en_active */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_AB_CTRL, value); if (ret_val < 0) goto error; /* enable input is VIP/656 */ value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp); value |= 0x00040100; /* enable VIP */ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value); if (ret_val < 0) goto error; /* select AFE clock to output mode */ value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp); value &= 0x83FFFFFF; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, value | 0x10000000); if (ret_val < 0) goto error; /* Turn on all of the data out and control output pins. */ value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp); value &= 0xFEF0FE00; if (_num_decoders == MAX_DECODERS) { /* * Note: The octal board does not support control pins(bit16-19) * These bits are ignored in the octal board. * * disable VDEC A-C port, default to Mobilygen Interface */ value |= 0x010001F8; } else { /* disable VDEC A-C port, default to Mobilygen Interface */ value |= 0x010F0108; } value |= 7; ret_val = cx25821_i2c_write(&dev->i2c_bus[0], PIN_OE_CTRL, value); if (ret_val < 0) goto error; mutex_unlock(&dev->lock); ret_val = medusa_set_videostandard(dev); return ret_val; error: mutex_unlock(&dev->lock); return ret_val; }
gpl-2.0
oloendithas/GT-P6800_JB_Kernel
sound/pci/hda/hda_hwdep.c
3212
20297
/* * HWDEP Interface for HD-audio codec * * Copyright (c) 2007 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/compat.h> #include <linux/mutex.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/firmware.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" #include <sound/hda_hwdep.h> #include <sound/minors.h> /* hint string pair */ struct hda_hint { const char *key; const char *val; /* contained in the same alloc as key */ }; /* * write/read an out-of-bound verb */ static int verb_write_ioctl(struct hda_codec *codec, struct hda_verb_ioctl __user *arg) { u32 verb, res; if (get_user(verb, &arg->verb)) return -EFAULT; res = snd_hda_codec_read(codec, verb >> 24, 0, (verb >> 8) & 0xffff, verb & 0xff); if (put_user(res, &arg->res)) return -EFAULT; return 0; } static int get_wcap_ioctl(struct hda_codec *codec, struct hda_verb_ioctl __user *arg) { u32 verb, res; if (get_user(verb, &arg->verb)) return -EFAULT; res = get_wcaps(codec, verb >> 24); if (put_user(res, &arg->res)) return -EFAULT; return 0; } /* */ static int hda_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigned int cmd, unsigned long arg) { struct hda_codec *codec = hw->private_data; void __user *argp = (void __user *)arg; switch (cmd) { case HDA_IOCTL_PVERSION: return put_user(HDA_HWDEP_VERSION, (int __user *)argp); case HDA_IOCTL_VERB_WRITE: return verb_write_ioctl(codec, argp); case HDA_IOCTL_GET_WCAP: return get_wcap_ioctl(codec, argp); } return -ENOIOCTLCMD; } #ifdef CONFIG_COMPAT static int hda_hwdep_ioctl_compat(struct snd_hwdep *hw, struct file *file, unsigned int cmd, unsigned long arg) { return hda_hwdep_ioctl(hw, file, cmd, (unsigned long)compat_ptr(arg)); } #endif static int hda_hwdep_open(struct snd_hwdep *hw, struct file *file) { #ifndef CONFIG_SND_DEBUG_VERBOSE if (!capable(CAP_SYS_RAWIO)) return -EACCES; #endif return 0; } static void clear_hwdep_elements(struct hda_codec *codec) { int i; /* clear init verbs */ snd_array_free(&codec->init_verbs); /* clear hints */ for (i = 0; i < codec->hints.used; i++) { struct hda_hint *hint = snd_array_elem(&codec->hints, i); kfree(hint->key); /* we don't need to free hint->val */ } snd_array_free(&codec->hints); snd_array_free(&codec->user_pins); } static void hwdep_free(struct snd_hwdep *hwdep) { clear_hwdep_elements(hwdep->private_data); } int /*__devinit*/ snd_hda_create_hwdep(struct hda_codec *codec) { char hwname[16]; struct snd_hwdep *hwdep; int err; sprintf(hwname, "HDA Codec %d", codec->addr); err = snd_hwdep_new(codec->bus->card, hwname, codec->addr, &hwdep); if (err < 0) return err; codec->hwdep = hwdep; sprintf(hwdep->name, "HDA Codec %d", codec->addr); hwdep->iface = SNDRV_HWDEP_IFACE_HDA; hwdep->private_data = codec; hwdep->private_free = hwdep_free; hwdep->exclusive = 1; hwdep->ops.open = hda_hwdep_open; hwdep->ops.ioctl = hda_hwdep_ioctl; #ifdef CONFIG_COMPAT hwdep->ops.ioctl_compat = hda_hwdep_ioctl_compat; #endif snd_array_init(&codec->init_verbs, sizeof(struct hda_verb), 32); snd_array_init(&codec->hints, sizeof(struct hda_hint), 32); snd_array_init(&codec->user_pins, sizeof(struct hda_pincfg), 16); return 0; } #ifdef CONFIG_SND_HDA_POWER_SAVE static ssize_t power_on_acct_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; snd_hda_update_power_acct(codec); return sprintf(buf, "%u\n", jiffies_to_msecs(codec->power_on_acct)); } static ssize_t power_off_acct_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; snd_hda_update_power_acct(codec); return sprintf(buf, "%u\n", jiffies_to_msecs(codec->power_off_acct)); } static struct device_attribute power_attrs[] = { __ATTR_RO(power_on_acct), __ATTR_RO(power_off_acct), }; int snd_hda_hwdep_add_power_sysfs(struct hda_codec *codec) { struct snd_hwdep *hwdep = codec->hwdep; int i; for (i = 0; i < ARRAY_SIZE(power_attrs); i++) snd_add_device_sysfs_file(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device, &power_attrs[i]); return 0; } #endif /* CONFIG_SND_HDA_POWER_SAVE */ #ifdef CONFIG_SND_HDA_RECONFIG /* * sysfs interface */ static int clear_codec(struct hda_codec *codec) { int err; err = snd_hda_codec_reset(codec); if (err < 0) { snd_printk(KERN_ERR "The codec is being used, can't free.\n"); return err; } clear_hwdep_elements(codec); return 0; } static int reconfig_codec(struct hda_codec *codec) { int err; snd_hda_power_up(codec); snd_printk(KERN_INFO "hda-codec: reconfiguring\n"); err = snd_hda_codec_reset(codec); if (err < 0) { snd_printk(KERN_ERR "The codec is being used, can't reconfigure.\n"); goto error; } err = snd_hda_codec_configure(codec); if (err < 0) goto error; /* rebuild PCMs */ err = snd_hda_codec_build_pcms(codec); if (err < 0) goto error; /* rebuild mixers */ err = snd_hda_codec_build_controls(codec); if (err < 0) goto error; err = snd_card_register(codec->bus->card); error: snd_hda_power_down(codec); return err; } /* * allocate a string at most len chars, and remove the trailing EOL */ static char *kstrndup_noeol(const char *src, size_t len) { char *s = kstrndup(src, len, GFP_KERNEL); char *p; if (!s) return NULL; p = strchr(s, '\n'); if (p) *p = 0; return s; } #define CODEC_INFO_SHOW(type) \ static ssize_t type##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct snd_hwdep *hwdep = dev_get_drvdata(dev); \ struct hda_codec *codec = hwdep->private_data; \ return sprintf(buf, "0x%x\n", codec->type); \ } #define CODEC_INFO_STR_SHOW(type) \ static ssize_t type##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct snd_hwdep *hwdep = dev_get_drvdata(dev); \ struct hda_codec *codec = hwdep->private_data; \ return sprintf(buf, "%s\n", \ codec->type ? codec->type : ""); \ } CODEC_INFO_SHOW(vendor_id); CODEC_INFO_SHOW(subsystem_id); CODEC_INFO_SHOW(revision_id); CODEC_INFO_SHOW(afg); CODEC_INFO_SHOW(mfg); CODEC_INFO_STR_SHOW(vendor_name); CODEC_INFO_STR_SHOW(chip_name); CODEC_INFO_STR_SHOW(modelname); #define CODEC_INFO_STORE(type) \ static ssize_t type##_store(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct snd_hwdep *hwdep = dev_get_drvdata(dev); \ struct hda_codec *codec = hwdep->private_data; \ unsigned long val; \ int err = strict_strtoul(buf, 0, &val); \ if (err < 0) \ return err; \ codec->type = val; \ return count; \ } #define CODEC_INFO_STR_STORE(type) \ static ssize_t type##_store(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct snd_hwdep *hwdep = dev_get_drvdata(dev); \ struct hda_codec *codec = hwdep->private_data; \ char *s = kstrndup_noeol(buf, 64); \ if (!s) \ return -ENOMEM; \ kfree(codec->type); \ codec->type = s; \ return count; \ } CODEC_INFO_STORE(vendor_id); CODEC_INFO_STORE(subsystem_id); CODEC_INFO_STORE(revision_id); CODEC_INFO_STR_STORE(vendor_name); CODEC_INFO_STR_STORE(chip_name); CODEC_INFO_STR_STORE(modelname); #define CODEC_ACTION_STORE(type) \ static ssize_t type##_store(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct snd_hwdep *hwdep = dev_get_drvdata(dev); \ struct hda_codec *codec = hwdep->private_data; \ int err = 0; \ if (*buf) \ err = type##_codec(codec); \ return err < 0 ? err : count; \ } CODEC_ACTION_STORE(reconfig); CODEC_ACTION_STORE(clear); static ssize_t init_verbs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; int i, len = 0; for (i = 0; i < codec->init_verbs.used; i++) { struct hda_verb *v = snd_array_elem(&codec->init_verbs, i); len += snprintf(buf + len, PAGE_SIZE - len, "0x%02x 0x%03x 0x%04x\n", v->nid, v->verb, v->param); } return len; } static int parse_init_verbs(struct hda_codec *codec, const char *buf) { struct hda_verb *v; int nid, verb, param; if (sscanf(buf, "%i %i %i", &nid, &verb, &param) != 3) return -EINVAL; if (!nid || !verb) return -EINVAL; v = snd_array_new(&codec->init_verbs); if (!v) return -ENOMEM; v->nid = nid; v->verb = verb; v->param = param; return 0; } static ssize_t init_verbs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; int err = parse_init_verbs(codec, buf); if (err < 0) return err; return count; } static ssize_t hints_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; int i, len = 0; for (i = 0; i < codec->hints.used; i++) { struct hda_hint *hint = snd_array_elem(&codec->hints, i); len += snprintf(buf + len, PAGE_SIZE - len, "%s = %s\n", hint->key, hint->val); } return len; } static struct hda_hint *get_hint(struct hda_codec *codec, const char *key) { int i; for (i = 0; i < codec->hints.used; i++) { struct hda_hint *hint = snd_array_elem(&codec->hints, i); if (!strcmp(hint->key, key)) return hint; } return NULL; } static void remove_trail_spaces(char *str) { char *p; if (!*str) return; p = str + strlen(str) - 1; for (; isspace(*p); p--) { *p = 0; if (p == str) return; } } #define MAX_HINTS 1024 static int parse_hints(struct hda_codec *codec, const char *buf) { char *key, *val; struct hda_hint *hint; buf = skip_spaces(buf); if (!*buf || *buf == '#' || *buf == '\n') return 0; if (*buf == '=') return -EINVAL; key = kstrndup_noeol(buf, 1024); if (!key) return -ENOMEM; /* extract key and val */ val = strchr(key, '='); if (!val) { kfree(key); return -EINVAL; } *val++ = 0; val = skip_spaces(val); remove_trail_spaces(key); remove_trail_spaces(val); hint = get_hint(codec, key); if (hint) { /* replace */ kfree(hint->key); hint->key = key; hint->val = val; return 0; } /* allocate a new hint entry */ if (codec->hints.used >= MAX_HINTS) hint = NULL; else hint = snd_array_new(&codec->hints); if (!hint) { kfree(key); return -ENOMEM; } hint->key = key; hint->val = val; return 0; } static ssize_t hints_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; int err = parse_hints(codec, buf); if (err < 0) return err; return count; } static ssize_t pin_configs_show(struct hda_codec *codec, struct snd_array *list, char *buf) { int i, len = 0; for (i = 0; i < list->used; i++) { struct hda_pincfg *pin = snd_array_elem(list, i); len += sprintf(buf + len, "0x%02x 0x%08x\n", pin->nid, pin->cfg); } return len; } static ssize_t init_pin_configs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; return pin_configs_show(codec, &codec->init_pins, buf); } static ssize_t user_pin_configs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; return pin_configs_show(codec, &codec->user_pins, buf); } static ssize_t driver_pin_configs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; return pin_configs_show(codec, &codec->driver_pins, buf); } #define MAX_PIN_CONFIGS 32 static int parse_user_pin_configs(struct hda_codec *codec, const char *buf) { int nid, cfg; if (sscanf(buf, "%i %i", &nid, &cfg) != 2) return -EINVAL; if (!nid) return -EINVAL; return snd_hda_add_pincfg(codec, &codec->user_pins, nid, cfg); } static ssize_t user_pin_configs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_hwdep *hwdep = dev_get_drvdata(dev); struct hda_codec *codec = hwdep->private_data; int err = parse_user_pin_configs(codec, buf); if (err < 0) return err; return count; } #define CODEC_ATTR_RW(type) \ __ATTR(type, 0644, type##_show, type##_store) #define CODEC_ATTR_RO(type) \ __ATTR_RO(type) #define CODEC_ATTR_WO(type) \ __ATTR(type, 0200, NULL, type##_store) static struct device_attribute codec_attrs[] = { CODEC_ATTR_RW(vendor_id), CODEC_ATTR_RW(subsystem_id), CODEC_ATTR_RW(revision_id), CODEC_ATTR_RO(afg), CODEC_ATTR_RO(mfg), CODEC_ATTR_RW(vendor_name), CODEC_ATTR_RW(chip_name), CODEC_ATTR_RW(modelname), CODEC_ATTR_RW(init_verbs), CODEC_ATTR_RW(hints), CODEC_ATTR_RO(init_pin_configs), CODEC_ATTR_RW(user_pin_configs), CODEC_ATTR_RO(driver_pin_configs), CODEC_ATTR_WO(reconfig), CODEC_ATTR_WO(clear), }; /* * create sysfs files on hwdep directory */ int snd_hda_hwdep_add_sysfs(struct hda_codec *codec) { struct snd_hwdep *hwdep = codec->hwdep; int i; for (i = 0; i < ARRAY_SIZE(codec_attrs); i++) snd_add_device_sysfs_file(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device, &codec_attrs[i]); return 0; } /* * Look for hint string */ const char *snd_hda_get_hint(struct hda_codec *codec, const char *key) { struct hda_hint *hint = get_hint(codec, key); return hint ? hint->val : NULL; } EXPORT_SYMBOL_HDA(snd_hda_get_hint); int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key) { const char *p = snd_hda_get_hint(codec, key); if (!p || !*p) return -ENOENT; switch (toupper(*p)) { case 'T': /* true */ case 'Y': /* yes */ case '1': return 1; } return 0; } EXPORT_SYMBOL_HDA(snd_hda_get_bool_hint); #endif /* CONFIG_SND_HDA_RECONFIG */ #ifdef CONFIG_SND_HDA_PATCH_LOADER /* parser mode */ enum { LINE_MODE_NONE, LINE_MODE_CODEC, LINE_MODE_MODEL, LINE_MODE_PINCFG, LINE_MODE_VERB, LINE_MODE_HINT, LINE_MODE_VENDOR_ID, LINE_MODE_SUBSYSTEM_ID, LINE_MODE_REVISION_ID, LINE_MODE_CHIP_NAME, NUM_LINE_MODES, }; static inline int strmatch(const char *a, const char *b) { return strnicmp(a, b, strlen(b)) == 0; } /* parse the contents after the line "[codec]" * accept only the line with three numbers, and assign the current codec */ static void parse_codec_mode(char *buf, struct hda_bus *bus, struct hda_codec **codecp) { unsigned int vendorid, subid, caddr; struct hda_codec *codec; *codecp = NULL; if (sscanf(buf, "%i %i %i", &vendorid, &subid, &caddr) == 3) { list_for_each_entry(codec, &bus->codec_list, list) { if (codec->vendor_id == vendorid && codec->subsystem_id == subid && codec->addr == caddr) { *codecp = codec; break; } } } } /* parse the contents after the other command tags, [pincfg], [verb], * [vendor_id], [subsystem_id], [revision_id], [chip_name], [hint] and [model] * just pass to the sysfs helper (only when any codec was specified) */ static void parse_pincfg_mode(char *buf, struct hda_bus *bus, struct hda_codec **codecp) { parse_user_pin_configs(*codecp, buf); } static void parse_verb_mode(char *buf, struct hda_bus *bus, struct hda_codec **codecp) { parse_init_verbs(*codecp, buf); } static void parse_hint_mode(char *buf, struct hda_bus *bus, struct hda_codec **codecp) { parse_hints(*codecp, buf); } static void parse_model_mode(char *buf, struct hda_bus *bus, struct hda_codec **codecp) { kfree((*codecp)->modelname); (*codecp)->modelname = kstrdup(buf, GFP_KERNEL); } static void parse_chip_name_mode(char *buf, struct hda_bus *bus, struct hda_codec **codecp) { kfree((*codecp)->chip_name); (*codecp)->chip_name = kstrdup(buf, GFP_KERNEL); } #define DEFINE_PARSE_ID_MODE(name) \ static void parse_##name##_mode(char *buf, struct hda_bus *bus, \ struct hda_codec **codecp) \ { \ unsigned long val; \ if (!strict_strtoul(buf, 0, &val)) \ (*codecp)->name = val; \ } DEFINE_PARSE_ID_MODE(vendor_id); DEFINE_PARSE_ID_MODE(subsystem_id); DEFINE_PARSE_ID_MODE(revision_id); struct hda_patch_item { const char *tag; void (*parser)(char *buf, struct hda_bus *bus, struct hda_codec **retc); int need_codec; }; static struct hda_patch_item patch_items[NUM_LINE_MODES] = { [LINE_MODE_CODEC] = { "[codec]", parse_codec_mode, 0 }, [LINE_MODE_MODEL] = { "[model]", parse_model_mode, 1 }, [LINE_MODE_VERB] = { "[verb]", parse_verb_mode, 1 }, [LINE_MODE_PINCFG] = { "[pincfg]", parse_pincfg_mode, 1 }, [LINE_MODE_HINT] = { "[hint]", parse_hint_mode, 1 }, [LINE_MODE_VENDOR_ID] = { "[vendor_id]", parse_vendor_id_mode, 1 }, [LINE_MODE_SUBSYSTEM_ID] = { "[subsystem_id]", parse_subsystem_id_mode, 1 }, [LINE_MODE_REVISION_ID] = { "[revision_id]", parse_revision_id_mode, 1 }, [LINE_MODE_CHIP_NAME] = { "[chip_name]", parse_chip_name_mode, 1 }, }; /* check the line starting with '[' -- change the parser mode accodingly */ static int parse_line_mode(char *buf, struct hda_bus *bus) { int i; for (i = 0; i < ARRAY_SIZE(patch_items); i++) { if (!patch_items[i].tag) continue; if (strmatch(buf, patch_items[i].tag)) return i; } return LINE_MODE_NONE; } /* copy one line from the buffer in fw, and update the fields in fw * return zero if it reaches to the end of the buffer, or non-zero * if successfully copied a line * * the spaces at the beginning and the end of the line are stripped */ static int get_line_from_fw(char *buf, int size, struct firmware *fw) { int len; const char *p = fw->data; while (isspace(*p) && fw->size) { p++; fw->size--; } if (!fw->size) return 0; if (size < fw->size) size = fw->size; for (len = 0; len < fw->size; len++) { if (!*p) break; if (*p == '\n') { p++; len++; break; } if (len < size) *buf++ = *p++; } *buf = 0; fw->size -= len; fw->data = p; remove_trail_spaces(buf); return 1; } /* * load a "patch" firmware file and parse it */ int snd_hda_load_patch(struct hda_bus *bus, const char *patch) { int err; const struct firmware *fw; struct firmware tmp; char buf[128]; struct hda_codec *codec; int line_mode; struct device *dev = bus->card->dev; if (snd_BUG_ON(!dev)) return -ENODEV; err = request_firmware(&fw, patch, dev); if (err < 0) { printk(KERN_ERR "hda-codec: Cannot load the patch '%s'\n", patch); return err; } tmp = *fw; line_mode = LINE_MODE_NONE; codec = NULL; while (get_line_from_fw(buf, sizeof(buf) - 1, &tmp)) { if (!*buf || *buf == '#' || *buf == '\n') continue; if (*buf == '[') line_mode = parse_line_mode(buf, bus); else if (patch_items[line_mode].parser && (codec || !patch_items[line_mode].need_codec)) patch_items[line_mode].parser(buf, bus, &codec); } release_firmware(fw); return 0; } EXPORT_SYMBOL_HDA(snd_hda_load_patch); #endif /* CONFIG_SND_HDA_PATCH_LOADER */
gpl-2.0
balister/linux-omap-philip
arch/arm/kernel/dma.c
3468
5790
/* * linux/arch/arm/kernel/dma.c * * Copyright (C) 1995-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Front-end to the DMA handling. This handles the allocation/freeing * of DMA channels, and provides a unified interface to the machines * DMA facilities. */ #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <asm/dma.h> #include <asm/mach/dma.h> DEFINE_SPINLOCK(dma_spin_lock); EXPORT_SYMBOL(dma_spin_lock); static dma_t *dma_chan[MAX_DMA_CHANNELS]; static inline dma_t *dma_channel(unsigned int chan) { if (chan >= MAX_DMA_CHANNELS) return NULL; return dma_chan[chan]; } int __init isa_dma_add(unsigned int chan, dma_t *dma) { if (!dma->d_ops) return -EINVAL; sg_init_table(&dma->buf, 1); if (dma_chan[chan]) return -EBUSY; dma_chan[chan] = dma; return 0; } /* * Request DMA channel * * On certain platforms, we have to allocate an interrupt as well... */ int request_dma(unsigned int chan, const char *device_id) { dma_t *dma = dma_channel(chan); int ret; if (!dma) goto bad_dma; if (xchg(&dma->lock, 1) != 0) goto busy; dma->device_id = device_id; dma->active = 0; dma->invalid = 1; ret = 0; if (dma->d_ops->request) ret = dma->d_ops->request(chan, dma); if (ret) xchg(&dma->lock, 0); return ret; bad_dma: printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan); return -EINVAL; busy: return -EBUSY; } EXPORT_SYMBOL(request_dma); /* * Free DMA channel * * On certain platforms, we have to free interrupt as well... */ void free_dma(unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma) goto bad_dma; if (dma->active) { printk(KERN_ERR "dma%d: freeing active DMA\n", chan); dma->d_ops->disable(chan, dma); dma->active = 0; } if (xchg(&dma->lock, 0) != 0) { if (dma->d_ops->free) dma->d_ops->free(chan, dma); return; } printk(KERN_ERR "dma%d: trying to free free DMA\n", chan); return; bad_dma: printk(KERN_ERR "dma: trying to free DMA%d\n", chan); } EXPORT_SYMBOL(free_dma); /* Set DMA Scatter-Gather list */ void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA SG while " "DMA active\n", chan); dma->sg = sg; dma->sgcount = nr_sg; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_sg); /* Set DMA address * * Copy address to the structure, and set the invalid bit */ void __set_dma_addr (unsigned int chan, void *addr) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA address while " "DMA active\n", chan); dma->sg = NULL; dma->addr = addr; dma->invalid = 1; } EXPORT_SYMBOL(__set_dma_addr); /* Set DMA byte count * * Copy address to the structure, and set the invalid bit */ void set_dma_count (unsigned int chan, unsigned long count) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA count while " "DMA active\n", chan); dma->sg = NULL; dma->count = count; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_count); /* Set DMA direction mode */ void set_dma_mode (unsigned int chan, unsigned int mode) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA mode while " "DMA active\n", chan); dma->dma_mode = mode; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_mode); /* Enable DMA channel */ void enable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 0) { dma->active = 1; dma->d_ops->enable(chan, dma); } return; free_dma: printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(enable_dma); /* Disable DMA channel */ void disable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 1) { dma->active = 0; dma->d_ops->disable(chan, dma); } return; free_dma: printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(disable_dma); /* * Is the specified DMA channel active? */ int dma_channel_active(unsigned int chan) { dma_t *dma = dma_channel(chan); return dma->active; } EXPORT_SYMBOL(dma_channel_active); void set_dma_page(unsigned int chan, char pagenr) { printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan); } EXPORT_SYMBOL(set_dma_page); void set_dma_speed(unsigned int chan, int cycle_ns) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->setspeed) ret = dma->d_ops->setspeed(chan, dma, cycle_ns); dma->speed = ret; } EXPORT_SYMBOL(set_dma_speed); int get_dma_residue(unsigned int chan) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->residue) ret = dma->d_ops->residue(chan, dma); return ret; } EXPORT_SYMBOL(get_dma_residue); #ifdef CONFIG_PROC_FS static int proc_dma_show(struct seq_file *m, void *v) { int i; for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { dma_t *dma = dma_channel(i); if (dma && dma->lock) seq_printf(m, "%2d: %s\n", i, dma->device_id); } return 0; } static int proc_dma_open(struct inode *inode, struct file *file) { return single_open(file, proc_dma_show, NULL); } static const struct file_operations proc_dma_operations = { .open = proc_dma_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_dma_init(void) { proc_create("dma", 0, NULL, &proc_dma_operations); return 0; } __initcall(proc_dma_init); #endif
gpl-2.0
casinobrawl/exp_dt2w
drivers/media/video/mem2mem_testdev.c
4236
23928
/* * A virtual v4l2-mem2mem example device. * * This is a virtual device driver for testing mem-to-mem videobuf framework. * It simulates a device that uses memory buffers for both source and * destination, processes the data and issues an "irq" (simulated by a timer). * The device is capable of multi-instance, multi-buffer-per-transaction * operation (via the mem2mem framework). * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * Pawel Osciak, <pawel@osciak.com> * Marek Szyprowski, <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the * License, or (at your option) any later version */ #include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-vmalloc.h> #define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev" MODULE_DESCRIPTION("Virtual device for mem2mem framework testing"); MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1.1"); #define MIN_W 32 #define MIN_H 32 #define MAX_W 640 #define MAX_H 480 #define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */ /* Flags that indicate a format can be used for capture/output */ #define MEM2MEM_CAPTURE (1 << 0) #define MEM2MEM_OUTPUT (1 << 1) #define MEM2MEM_NAME "m2m-testdev" /* Per queue */ #define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME /* In bytes, per queue */ #define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024) /* Default transaction time in msec */ #define MEM2MEM_DEF_TRANSTIME 1000 /* Default number of buffers per transaction */ #define MEM2MEM_DEF_TRANSLEN 1 #define MEM2MEM_COLOR_STEP (0xff >> 4) #define MEM2MEM_NUM_TILES 8 #define dprintk(dev, fmt, arg...) \ v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) void m2mtest_dev_release(struct device *dev) {} static struct platform_device m2mtest_pdev = { .name = MEM2MEM_NAME, .dev.release = m2mtest_dev_release, }; struct m2mtest_fmt { char *name; u32 fourcc; int depth; /* Types the format can be used for */ u32 types; }; static struct m2mtest_fmt formats[] = { { .name = "RGB565 (BE)", .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ .depth = 16, /* Both capture and output format */ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, /* Output-only format */ .types = MEM2MEM_OUTPUT, }, }; /* Per-queue, driver-specific private data */ struct m2mtest_q_data { unsigned int width; unsigned int height; unsigned int sizeimage; struct m2mtest_fmt *fmt; }; enum { V4L2_M2M_SRC = 0, V4L2_M2M_DST = 1, }; /* Source and destination queue data */ static struct m2mtest_q_data q_data[2]; static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: return &q_data[V4L2_M2M_SRC]; case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &q_data[V4L2_M2M_DST]; default: BUG(); } return NULL; } #define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE #define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1) static struct v4l2_queryctrl m2mtest_ctrls[] = { { .id = V4L2_CID_TRANS_TIME_MSEC, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Transaction time (msec)", .minimum = 1, .maximum = 10000, .step = 100, .default_value = 1000, .flags = 0, }, { .id = V4L2_CID_TRANS_NUM_BUFS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Buffers per transaction", .minimum = 1, .maximum = MEM2MEM_DEF_NUM_BUFS, .step = 1, .default_value = 1, .flags = 0, }, }; #define NUM_FORMATS ARRAY_SIZE(formats) static struct m2mtest_fmt *find_format(struct v4l2_format *f) { struct m2mtest_fmt *fmt; unsigned int k; for (k = 0; k < NUM_FORMATS; k++) { fmt = &formats[k]; if (fmt->fourcc == f->fmt.pix.pixelformat) break; } if (k == NUM_FORMATS) return NULL; return &formats[k]; } struct m2mtest_dev { struct v4l2_device v4l2_dev; struct video_device *vfd; atomic_t num_inst; struct mutex dev_mutex; spinlock_t irqlock; struct timer_list timer; struct v4l2_m2m_dev *m2m_dev; }; struct m2mtest_ctx { struct m2mtest_dev *dev; /* Processed buffers in this transaction */ u8 num_processed; /* Transaction length (i.e. how many buffers per transaction) */ u32 translen; /* Transaction time (i.e. simulated processing time) in milliseconds */ u32 transtime; /* Abort requested by m2m */ int aborting; struct v4l2_m2m_ctx *m2m_ctx; }; static struct v4l2_queryctrl *get_ctrl(int id) { int i; for (i = 0; i < ARRAY_SIZE(m2mtest_ctrls); ++i) { if (id == m2mtest_ctrls[i].id) return &m2mtest_ctrls[i]; } return NULL; } static int device_process(struct m2mtest_ctx *ctx, struct vb2_buffer *in_vb, struct vb2_buffer *out_vb) { struct m2mtest_dev *dev = ctx->dev; struct m2mtest_q_data *q_data; u8 *p_in, *p_out; int x, y, t, w; int tile_w, bytes_left; int width, height, bytesperline; q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT); width = q_data->width; height = q_data->height; bytesperline = (q_data->width * q_data->fmt->depth) >> 3; p_in = vb2_plane_vaddr(in_vb, 0); p_out = vb2_plane_vaddr(out_vb, 0); if (!p_in || !p_out) { v4l2_err(&dev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); return -EFAULT; } if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) { v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); return -EINVAL; } tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3)) / MEM2MEM_NUM_TILES; bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES; w = 0; for (y = 0; y < height; ++y) { for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { if (w & 0x1) { for (x = 0; x < tile_w; ++x) *p_out++ = *p_in++ + MEM2MEM_COLOR_STEP; } else { for (x = 0; x < tile_w; ++x) *p_out++ = *p_in++ - MEM2MEM_COLOR_STEP; } ++w; } p_in += bytes_left; p_out += bytes_left; } return 0; } static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout) { dprintk(dev, "Scheduling a simulated irq\n"); mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout)); } /* * mem2mem callbacks */ /** * job_ready() - check whether an instance is ready to be scheduled to run */ static int job_ready(void *priv) { struct m2mtest_ctx *ctx = priv; if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) { dprintk(ctx->dev, "Not enough buffers available\n"); return 0; } return 1; } static void job_abort(void *priv) { struct m2mtest_ctx *ctx = priv; /* Will cancel the transaction in the next interrupt handler */ ctx->aborting = 1; } static void m2mtest_lock(void *priv) { struct m2mtest_ctx *ctx = priv; struct m2mtest_dev *dev = ctx->dev; mutex_lock(&dev->dev_mutex); } static void m2mtest_unlock(void *priv) { struct m2mtest_ctx *ctx = priv; struct m2mtest_dev *dev = ctx->dev; mutex_unlock(&dev->dev_mutex); } /* device_run() - prepares and starts the device * * This simulates all the immediate preparations required before starting * a device. This will be called by the framework when it decides to schedule * a particular instance. */ static void device_run(void *priv) { struct m2mtest_ctx *ctx = priv; struct m2mtest_dev *dev = ctx->dev; struct vb2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); device_process(ctx, src_buf, dst_buf); /* Run a timer, which simulates a hardware irq */ schedule_irq(dev, ctx->transtime); } static void device_isr(unsigned long priv) { struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv; struct m2mtest_ctx *curr_ctx; struct vb2_buffer *src_vb, *dst_vb; unsigned long flags; curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev); if (NULL == curr_ctx) { printk(KERN_ERR "Instance released before the end of transaction\n"); return; } src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); curr_ctx->num_processed++; spin_lock_irqsave(&m2mtest_dev->irqlock, flags); v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags); if (curr_ctx->num_processed == curr_ctx->translen || curr_ctx->aborting) { dprintk(curr_ctx->dev, "Finishing transaction\n"); curr_ctx->num_processed = 0; v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx); } else { device_run(curr_ctx); } } /* * video ioctls */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; return 0; } static int enum_fmt(struct v4l2_fmtdesc *f, u32 type) { int i, num; struct m2mtest_fmt *fmt; num = 0; for (i = 0; i < NUM_FORMATS; ++i) { if (formats[i].types & type) { /* index-th format of type type found ? */ if (num == f->index) break; /* Correct type but haven't reached our index yet, * just increment per-type index */ ++num; } } if (i < NUM_FORMATS) { /* Format found */ fmt = &formats[i]; strncpy(f->description, fmt->name, sizeof(f->description) - 1); f->pixelformat = fmt->fourcc; return 0; } /* Format not found */ return -EINVAL; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_CAPTURE); } static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_OUTPUT); } static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f) { struct vb2_queue *vq; struct m2mtest_q_data *q_data; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(f->type); f->fmt.pix.width = q_data->width; f->fmt.pix.height = q_data->height; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.pixelformat = q_data->fmt->fourcc; f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3; f->fmt.pix.sizeimage = q_data->sizeimage; return 0; } static int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(priv, f); } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(priv, f); } static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt) { enum v4l2_field field; field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) field = V4L2_FIELD_NONE; else if (V4L2_FIELD_NONE != field) return -EINVAL; /* V4L2 specification suggests the driver corrects the format struct * if any of the dimensions is unsupported */ f->fmt.pix.field = field; if (f->fmt.pix.height < MIN_H) f->fmt.pix.height = MIN_H; else if (f->fmt.pix.height > MAX_H) f->fmt.pix.height = MAX_H; if (f->fmt.pix.width < MIN_W) f->fmt.pix.width = MIN_W; else if (f->fmt.pix.width > MAX_W) f->fmt.pix.width = MAX_W; f->fmt.pix.width &= ~DIM_ALIGN_MASK; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct m2mtest_fmt *fmt; struct m2mtest_ctx *ctx = priv; fmt = find_format(f); if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt); } static int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct m2mtest_fmt *fmt; struct m2mtest_ctx *ctx = priv; fmt = find_format(f); if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt); } static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f) { struct m2mtest_q_data *q_data; struct vb2_queue *vq; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(f->type); if (!q_data) return -EINVAL; if (vb2_is_busy(vq)) { v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } q_data->fmt = find_format(f); q_data->width = f->fmt.pix.width; q_data->height = f->fmt.pix.height; q_data->sizeimage = q_data->width * q_data->height * q_data->fmt->depth >> 3; dprintk(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d\n", f->type, q_data->width, q_data->height, q_data->fmt->fourcc); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = vidioc_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return vidioc_s_fmt(priv, f); } static int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = vidioc_try_fmt_vid_out(file, priv, f); if (ret) return ret; return vidioc_s_fmt(priv, f); } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct m2mtest_ctx *ctx = priv; return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { struct v4l2_queryctrl *c; c = get_ctrl(qc->id); if (!c) return -EINVAL; *qc = *c; return 0; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct m2mtest_ctx *ctx = priv; switch (ctrl->id) { case V4L2_CID_TRANS_TIME_MSEC: ctrl->value = ctx->transtime; break; case V4L2_CID_TRANS_NUM_BUFS: ctrl->value = ctx->translen; break; default: v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n"); return -EINVAL; } return 0; } static int check_ctrl_val(struct m2mtest_ctx *ctx, struct v4l2_control *ctrl) { struct v4l2_queryctrl *c; c = get_ctrl(ctrl->id); if (!c) return -EINVAL; if (ctrl->value < c->minimum || ctrl->value > c->maximum) { v4l2_err(&ctx->dev->v4l2_dev, "Value out of range\n"); return -ERANGE; } return 0; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct m2mtest_ctx *ctx = priv; int ret = 0; ret = check_ctrl_val(ctx, ctrl); if (ret != 0) return ret; switch (ctrl->id) { case V4L2_CID_TRANS_TIME_MSEC: ctx->transtime = ctrl->value; break; case V4L2_CID_TRANS_NUM_BUFS: ctx->translen = ctrl->value; break; default: v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n"); return -EINVAL; } return 0; } static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, }; /* * Queue operations */ static int m2mtest_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq); struct m2mtest_q_data *q_data; unsigned int size, count = *nbuffers; q_data = get_q_data(vq->type); size = q_data->width * q_data->height * q_data->fmt->depth >> 3; while (size * count > MEM2MEM_VID_MEM_LIMIT) (count)--; *nplanes = 1; *nbuffers = count; sizes[0] = size; /* * videobuf2-vmalloc allocator is context-less so no need to set * alloc_ctxs array. */ dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); return 0; } static int m2mtest_buf_prepare(struct vb2_buffer *vb) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct m2mtest_q_data *q_data; dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); q_data = get_q_data(vb->vb2_queue->type); if (vb2_plane_size(vb, 0) < q_data->sizeimage) { dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage); return -EINVAL; } vb2_set_plane_payload(vb, 0, q_data->sizeimage); return 0; } static void m2mtest_buf_queue(struct vb2_buffer *vb) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); } static void m2mtest_wait_prepare(struct vb2_queue *q) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(q); m2mtest_unlock(ctx); } static void m2mtest_wait_finish(struct vb2_queue *q) { struct m2mtest_ctx *ctx = vb2_get_drv_priv(q); m2mtest_lock(ctx); } static struct vb2_ops m2mtest_qops = { .queue_setup = m2mtest_queue_setup, .buf_prepare = m2mtest_buf_prepare, .buf_queue = m2mtest_buf_queue, .wait_prepare = m2mtest_wait_prepare, .wait_finish = m2mtest_wait_finish, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct m2mtest_ctx *ctx = priv; int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &m2mtest_qops; src_vq->mem_ops = &vb2_vmalloc_memops; ret = vb2_queue_init(src_vq); if (ret) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &m2mtest_qops; dst_vq->mem_ops = &vb2_vmalloc_memops; return vb2_queue_init(dst_vq); } /* * File operations */ static int m2mtest_open(struct file *file) { struct m2mtest_dev *dev = video_drvdata(file); struct m2mtest_ctx *ctx = NULL; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; file->private_data = ctx; ctx->dev = dev; ctx->translen = MEM2MEM_DEF_TRANSLEN; ctx->transtime = MEM2MEM_DEF_TRANSTIME; ctx->num_processed = 0; ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); if (IS_ERR(ctx->m2m_ctx)) { int ret = PTR_ERR(ctx->m2m_ctx); kfree(ctx); return ret; } atomic_inc(&dev->num_inst); dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx); return 0; } static int m2mtest_release(struct file *file) { struct m2mtest_dev *dev = video_drvdata(file); struct m2mtest_ctx *ctx = file->private_data; dprintk(dev, "Releasing instance %p\n", ctx); v4l2_m2m_ctx_release(ctx->m2m_ctx); kfree(ctx); atomic_dec(&dev->num_inst); return 0; } static unsigned int m2mtest_poll(struct file *file, struct poll_table_struct *wait) { struct m2mtest_ctx *ctx = file->private_data; return v4l2_m2m_poll(file, ctx->m2m_ctx, wait); } static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma) { struct m2mtest_ctx *ctx = file->private_data; return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); } static const struct v4l2_file_operations m2mtest_fops = { .owner = THIS_MODULE, .open = m2mtest_open, .release = m2mtest_release, .poll = m2mtest_poll, .unlocked_ioctl = video_ioctl2, .mmap = m2mtest_mmap, }; static struct video_device m2mtest_videodev = { .name = MEM2MEM_NAME, .fops = &m2mtest_fops, .ioctl_ops = &m2mtest_ioctl_ops, .minor = -1, .release = video_device_release, }; static struct v4l2_m2m_ops m2m_ops = { .device_run = device_run, .job_ready = job_ready, .job_abort = job_abort, .lock = m2mtest_lock, .unlock = m2mtest_unlock, }; static int m2mtest_probe(struct platform_device *pdev) { struct m2mtest_dev *dev; struct video_device *vfd; int ret; dev = kzalloc(sizeof *dev, GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->irqlock); ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto free_dev; atomic_set(&dev->num_inst, 0); mutex_init(&dev->dev_mutex); vfd = video_device_alloc(); if (!vfd) { v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto unreg_dev; } *vfd = m2mtest_videodev; vfd->lock = &dev->dev_mutex; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto rel_vdev; } video_set_drvdata(vfd, dev); snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name); dev->vfd = vfd; v4l2_info(&dev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME "Device registered as /dev/video%d\n", vfd->num); setup_timer(&dev->timer, device_isr, (long)dev); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); goto err_m2m; } q_data[V4L2_M2M_SRC].fmt = &formats[0]; q_data[V4L2_M2M_DST].fmt = &formats[0]; return 0; v4l2_m2m_release(dev->m2m_dev); err_m2m: video_unregister_device(dev->vfd); rel_vdev: video_device_release(vfd); unreg_dev: v4l2_device_unregister(&dev->v4l2_dev); free_dev: kfree(dev); return ret; } static int m2mtest_remove(struct platform_device *pdev) { struct m2mtest_dev *dev = (struct m2mtest_dev *)platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); v4l2_m2m_release(dev->m2m_dev); del_timer_sync(&dev->timer); video_unregister_device(dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); return 0; } static struct platform_driver m2mtest_pdrv = { .probe = m2mtest_probe, .remove = m2mtest_remove, .driver = { .name = MEM2MEM_NAME, .owner = THIS_MODULE, }, }; static void __exit m2mtest_exit(void) { platform_driver_unregister(&m2mtest_pdrv); platform_device_unregister(&m2mtest_pdev); } static int __init m2mtest_init(void) { int ret; ret = platform_device_register(&m2mtest_pdev); if (ret) return ret; ret = platform_driver_register(&m2mtest_pdrv); if (ret) platform_device_unregister(&m2mtest_pdev); return 0; } module_init(m2mtest_init); module_exit(m2mtest_exit);
gpl-2.0
DJSteve/kernel_dell_streak7
drivers/isdn/hisax/bkm_a8.c
4236
11771
/* $Id: bkm_a8.c,v 1.22.2.4 2004/01/15 14:02:34 keil Exp $ * * low level stuff for Scitel Quadro (4*S0, passive) * * Author Roland Klabunde * Copyright by Roland Klabunde <R.Klabunde@Berkom.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "ipac.h" #include "hscx.h" #include "isdnl1.h" #include <linux/pci.h> #include "bkm_ax.h" #define ATTEMPT_PCI_REMAPPING /* Required for PLX rev 1 */ static const char sct_quadro_revision[] = "$Revision: 1.22.2.4 $"; static const char *sct_quadro_subtypes[] = { "", "#1", "#2", "#3", "#4" }; #define wordout(addr,val) outw(val,addr) #define wordin(addr) inw(addr) static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off) { register u_char ret; wordout(ale, off); ret = wordin(adr) & 0xFF; return (ret); } static inline void readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) { int i; wordout(ale, off); for (i = 0; i < size; i++) data[i] = wordin(adr) & 0xFF; } static inline void writereg(unsigned int ale, unsigned int adr, u_char off, u_char data) { wordout(ale, off); wordout(adr, data); } static inline void writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) { int i; wordout(ale, off); for (i = 0; i < size; i++) wordout(adr, data[i]); } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { readfifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { writefifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size); } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0))); } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0), value); } /* Set the specific ipac to active */ static void set_ipac_active(struct IsdnCardState *cs, u_int active) { /* set irq mask */ writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, active ? 0xc0 : 0xff); } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) readreg(cs->hw.ax.base, \ cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0)) #define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ax.base, \ cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0), data) #define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ax.base, \ cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ax.base, \ cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt) #include "hscx_irq.c" static irqreturn_t bkm_interrupt_ipac(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char ista, val, icnt = 5; u_long flags; spin_lock_irqsave(&cs->lock, flags); ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA); if (!(ista & 0x3f)) { /* not this IPAC */ spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } Start_IPAC: if (cs->debug & L1_DEB_IPAC) debugl1(cs, "IPAC ISTA %02X", ista); if (ista & 0x0f) { val = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, HSCX_ISTA + 0x40); if (ista & 0x01) val |= 0x01; if (ista & 0x04) val |= 0x02; if (ista & 0x08) val |= 0x04; if (val) { hscx_int_main(cs, val); } } if (ista & 0x20) { val = 0xfe & readreg(cs->hw.ax.base, cs->hw.ax.data_adr, ISAC_ISTA | 0x80); if (val) { isac_interrupt(cs, val); } } if (ista & 0x10) { val = 0x01; isac_interrupt(cs, val); } ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA); if ((ista & 0x3f) && icnt) { icnt--; goto Start_IPAC; } if (!icnt) printk(KERN_WARNING "HiSax: Scitel Quadro (%s) IRQ LOOP\n", sct_quadro_subtypes[cs->subtyp]); writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xFF); writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xC0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_sct_quadro(struct IsdnCardState *cs) { release_region(cs->hw.ax.base & 0xffffffc0, 128); if (cs->subtyp == SCT_1) release_region(cs->hw.ax.plx_adr, 64); } static void enable_bkm_int(struct IsdnCardState *cs, unsigned bEnable) { if (cs->typ == ISDN_CTYPE_SCT_QUADRO) { if (bEnable) wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) | 0x41)); else wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) & ~0x41)); } } static void reset_bkm(struct IsdnCardState *cs) { if (cs->subtyp == SCT_1) { wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) & ~4)); mdelay(10); /* Remove the soft reset */ wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) | 4)); mdelay(10); } } static int BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); /* Disable ints */ set_ipac_active(cs, 0); enable_bkm_int(cs, 0); reset_bkm(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: /* Sanity */ spin_lock_irqsave(&cs->lock, flags); set_ipac_active(cs, 0); enable_bkm_int(cs, 0); spin_unlock_irqrestore(&cs->lock, flags); release_io_sct_quadro(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); cs->debug |= L1_DEB_IPAC; set_ipac_active(cs, 1); inithscxisac(cs, 3); /* Enable ints */ enable_bkm_int(cs, 1); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } static int __devinit sct_alloc_io(u_int adr, u_int len) { if (!request_region(adr, len, "scitel")) { printk(KERN_WARNING "HiSax: Scitel port %#x-%#x already in use\n", adr, adr + len); return (1); } return(0); } static struct pci_dev *dev_a8 __devinitdata = NULL; static u16 sub_vendor_id __devinitdata = 0; static u16 sub_sys_id __devinitdata = 0; static u_char pci_bus __devinitdata = 0; static u_char pci_device_fn __devinitdata = 0; static u_char pci_irq __devinitdata = 0; int __devinit setup_sct_quadro(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; u_int found = 0; u_int pci_ioaddr1, pci_ioaddr2, pci_ioaddr3, pci_ioaddr4, pci_ioaddr5; strcpy(tmp, sct_quadro_revision); printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ == ISDN_CTYPE_SCT_QUADRO) { cs->subtyp = SCT_1; /* Preset */ } else return (0); /* Identify subtype by para[0] */ if (card->para[0] >= SCT_1 && card->para[0] <= SCT_4) cs->subtyp = card->para[0]; else { printk(KERN_WARNING "HiSax: Scitel Quadro: Invalid " "subcontroller in configuration, default to 1\n"); return (0); } if ((cs->subtyp != SCT_1) && ((sub_sys_id != PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) || (sub_vendor_id != PCI_VENDOR_ID_BERKOM))) return (0); if (cs->subtyp == SCT_1) { while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, dev_a8))) { sub_vendor_id = dev_a8->subsystem_vendor; sub_sys_id = dev_a8->subsystem_device; if ((sub_sys_id == PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) && (sub_vendor_id == PCI_VENDOR_ID_BERKOM)) { if (pci_enable_device(dev_a8)) return(0); pci_ioaddr1 = pci_resource_start(dev_a8, 1); pci_irq = dev_a8->irq; pci_bus = dev_a8->bus->number; pci_device_fn = dev_a8->devfn; found = 1; break; } } if (!found) { printk(KERN_WARNING "HiSax: Scitel Quadro (%s): " "Card not found\n", sct_quadro_subtypes[cs->subtyp]); return (0); } #ifdef ATTEMPT_PCI_REMAPPING /* HACK: PLX revision 1 bug: PLX address bit 7 must not be set */ if ((pci_ioaddr1 & 0x80) && (dev_a8->revision == 1)) { printk(KERN_WARNING "HiSax: Scitel Quadro (%s): " "PLX rev 1, remapping required!\n", sct_quadro_subtypes[cs->subtyp]); /* Restart PCI negotiation */ pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, (u_int) - 1); /* Move up by 0x80 byte */ pci_ioaddr1 += 0x80; pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK; pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, pci_ioaddr1); dev_a8->resource[ 1].start = pci_ioaddr1; } #endif /* End HACK */ } if (!pci_irq) { /* IRQ range check ?? */ printk(KERN_WARNING "HiSax: Scitel Quadro (%s): No IRQ\n", sct_quadro_subtypes[cs->subtyp]); return (0); } pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_1, &pci_ioaddr1); pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_2, &pci_ioaddr2); pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_3, &pci_ioaddr3); pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_4, &pci_ioaddr4); pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_5, &pci_ioaddr5); if (!pci_ioaddr1 || !pci_ioaddr2 || !pci_ioaddr3 || !pci_ioaddr4 || !pci_ioaddr5) { printk(KERN_WARNING "HiSax: Scitel Quadro (%s): " "No IO base address(es)\n", sct_quadro_subtypes[cs->subtyp]); return (0); } pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK; pci_ioaddr2 &= PCI_BASE_ADDRESS_IO_MASK; pci_ioaddr3 &= PCI_BASE_ADDRESS_IO_MASK; pci_ioaddr4 &= PCI_BASE_ADDRESS_IO_MASK; pci_ioaddr5 &= PCI_BASE_ADDRESS_IO_MASK; /* Take over */ cs->irq = pci_irq; cs->irq_flags |= IRQF_SHARED; /* pci_ioaddr1 is unique to all subdevices */ /* pci_ioaddr2 is for the fourth subdevice only */ /* pci_ioaddr3 is for the third subdevice only */ /* pci_ioaddr4 is for the second subdevice only */ /* pci_ioaddr5 is for the first subdevice only */ cs->hw.ax.plx_adr = pci_ioaddr1; /* Enter all ipac_base addresses */ switch(cs->subtyp) { case 1: cs->hw.ax.base = pci_ioaddr5 + 0x00; if (sct_alloc_io(pci_ioaddr1, 128)) return(0); if (sct_alloc_io(pci_ioaddr5, 64)) return(0); /* disable all IPAC */ writereg(pci_ioaddr5, pci_ioaddr5 + 4, IPAC_MASK, 0xFF); writereg(pci_ioaddr4 + 0x08, pci_ioaddr4 + 0x0c, IPAC_MASK, 0xFF); writereg(pci_ioaddr3 + 0x10, pci_ioaddr3 + 0x14, IPAC_MASK, 0xFF); writereg(pci_ioaddr2 + 0x20, pci_ioaddr2 + 0x24, IPAC_MASK, 0xFF); break; case 2: cs->hw.ax.base = pci_ioaddr4 + 0x08; if (sct_alloc_io(pci_ioaddr4, 64)) return(0); break; case 3: cs->hw.ax.base = pci_ioaddr3 + 0x10; if (sct_alloc_io(pci_ioaddr3, 64)) return(0); break; case 4: cs->hw.ax.base = pci_ioaddr2 + 0x20; if (sct_alloc_io(pci_ioaddr2, 64)) return(0); break; } /* For isac and hscx data path */ cs->hw.ax.data_adr = cs->hw.ax.base + 4; printk(KERN_INFO "HiSax: Scitel Quadro (%s) configured at " "0x%.4lX, 0x%.4lX, 0x%.4lX and IRQ %d\n", sct_quadro_subtypes[cs->subtyp], cs->hw.ax.plx_adr, cs->hw.ax.base, cs->hw.ax.data_adr, cs->irq); test_and_set_bit(HW_IPAC, &cs->HW_Flags); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &BKM_card_msg; cs->irq_func = &bkm_interrupt_ipac; printk(KERN_INFO "HiSax: Scitel Quadro (%s): IPAC Version %d\n", sct_quadro_subtypes[cs->subtyp], readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ID)); return (1); }
gpl-2.0
dev-harsh1998/android_kernel_cyanogen_msm8916
arch/unicore32/kernel/sys.c
4492
1077
/* * linux/arch/unicore32/kernel/sys.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/ipc.h> #include <linux/uaccess.h> #include <asm/syscalls.h> #include <asm/cacheflush.h> /* Provide the actual syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), #define sys_mmap2 sys_mmap_pgoff /* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */ void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls-1] = sys_ni_syscall, #include <asm/unistd.h> };
gpl-2.0
faux123/HTC_Amaze_Kernel
drivers/staging/octeon/cvmx-helper-npi.c
4748
3413
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Functions for NPI initialization, configuration, * and monitoring. */ #include <asm/octeon/octeon.h> #include "cvmx-config.h" #include "cvmx-helper.h" #include "cvmx-pip-defs.h" /** * Probe a NPI interface and determine the number of ports * connected to it. The NPI interface should still be down * after this call. * * @interface: Interface to probe * * Returns Number of ports on the interface. Zero to disable. */ int __cvmx_helper_npi_probe(int interface) { #if CVMX_PKO_QUEUES_PER_PORT_PCI > 0 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) return 4; else if (OCTEON_IS_MODEL(OCTEON_CN56XX) && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) /* The packet engines didn't exist before pass 2 */ return 4; else if (OCTEON_IS_MODEL(OCTEON_CN52XX) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) /* The packet engines didn't exist before pass 2 */ return 4; #if 0 /* * Technically CN30XX, CN31XX, and CN50XX contain packet * engines, but nobody ever uses them. Since this is the case, * we disable them here. */ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) return 2; else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) return 1; #endif #endif return 0; } /** * Bringup and enable a NPI interface. After this call packet * I/O should be fully functional. This is called with IPD * enabled but PKO disabled. * * @interface: Interface to bring up * * Returns Zero on success, negative on failure */ int __cvmx_helper_npi_enable(int interface) { /* * On CN50XX, CN52XX, and CN56XX we need to disable length * checking so packet < 64 bytes and jumbo frames don't get * errors. */ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && !OCTEON_IS_MODEL(OCTEON_CN58XX)) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = 0; port < num_ports; port++) { union cvmx_pip_prt_cfgx port_cfg; int ipd_port = cvmx_helper_get_ipd_port(interface, port); port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); port_cfg.s.maxerr_en = 0; port_cfg.s.minerr_en = 0; cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); } } /* Enables are controlled by the remote host, so nothing to do here */ return 0; }
gpl-2.0
OnePlusOSS/android_kernel_oneplus_one
drivers/xen/xenbus/xenbus_dev_frontend.c
5004
14687
/* * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Changes: * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem * and /proc/xen compatibility mount point. * Turned xenfs into a loadable module. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/uio.h> #include <linux/notifier.h> #include <linux/wait.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/uaccess.h> #include <linux/init.h> #include <linux/namei.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/miscdevice.h> #include <linux/module.h> #include "xenbus_comms.h" #include <xen/xenbus.h> #include <xen/xen.h> #include <asm/xen/hypervisor.h> MODULE_LICENSE("GPL"); /* * An element of a list of outstanding transactions, for which we're * still waiting a reply. */ struct xenbus_transaction_holder { struct list_head list; struct xenbus_transaction handle; }; /* * A buffer of data on the queue. */ struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_file_priv { /* * msgbuffer_mutex is held while partial requests are built up * and complete requests are acted on. It therefore protects * the "transactions" and "watches" lists, and the partial * request length and buffer. * * reply_mutex protects the reply being built up to return to * usermode. It nests inside msgbuffer_mutex but may be held * alone during a watch callback. */ struct mutex msgbuffer_mutex; /* In-progress transactions */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct mutex reply_mutex; struct list_head read_buffers; wait_queue_head_t read_waitq; }; /* Read out any raw xenbus messages queued up. */ static ssize_t xenbus_file_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_file_priv *u = filp->private_data; struct read_buffer *rb; unsigned i; int ret; mutex_lock(&u->reply_mutex); again: while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); i = 0; while (i < len) { unsigned sz = min((unsigned)len - i, rb->len - rb->cons); ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); i += sz - ret; rb->cons += sz - ret; if (ret != 0) { if (i == 0) i = -EFAULT; goto out; } /* Clear out buffer if it has been consumed */ if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } if (i == 0) goto again; out: mutex_unlock(&u->reply_mutex); return i; } /* * Add a buffer to the queue. Caller must hold the appropriate lock * if the queue is not local. (Commonly the caller will build up * multiple queued buffers on a temporary local list, and then add it * to the appropriate list under lock once all the buffers have een * successfully allocated.) */ static int queue_reply(struct list_head *queue, const void *data, size_t len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (rb == NULL) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } /* * Free all the read_buffer s on a list. * Caller must have sole reference to list. */ static void queue_cleanup(struct list_head *list) { struct read_buffer *rb; while (!list_empty(list)) { rb = list_entry(list->next, struct read_buffer, list); list_del(list->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_file_priv *dev_data; char *token; }; static void free_watch_adapter(struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static struct watch_adapter *alloc_watch_adapter(const char *path, const char *token) { struct watch_adapter *watch; watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) goto out_fail; watch->watch.node = kstrdup(path, GFP_KERNEL); if (watch->watch.node == NULL) goto out_free; watch->token = kstrdup(token, GFP_KERNEL); if (watch->token == NULL) goto out_free; return watch; out_free: free_watch_adapter(watch); out_fail: return NULL; } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap; struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; int ret; LIST_HEAD(staging_q); adap = container_of(watch, struct watch_adapter, watch); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); if (!ret) ret = queue_reply(&staging_q, path, path_len); if (!ret) ret = queue_reply(&staging_q, token, tok_len); if (!ret && len > 2) ret = queue_reply(&staging_q, vec[2], data_len); if (!ret) { /* success: pass reply list onto watcher */ list_splice_tail(&staging_q, &adap->dev_data->read_buffers); wake_up(&adap->dev_data->read_waitq); } else queue_cleanup(&staging_q); mutex_unlock(&adap->dev_data->reply_mutex); } static int xenbus_write_transaction(unsigned msg_type, struct xenbus_file_priv *u) { int rc; void *reply; struct xenbus_transaction_holder *trans = NULL; LIST_HEAD(staging_q); if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); if (!rc) rc = queue_reply(&staging_q, reply, u->u.msg.len); if (!rc) { list_splice_tail(&staging_q, &u->read_buffers); wake_up(&u->read_waitq); } else { queue_cleanup(&staging_q); } mutex_unlock(&u->reply_mutex); kfree(reply); out: return rc; } static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) { struct watch_adapter *watch, *tmp_watch; char *path, *token; int err, rc; LIST_HEAD(staging_q); path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = alloc_watch_adapter(path, token); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.callback = watch_fired; watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } /* Success. Synthesize a reply to say all is OK. */ { struct { struct xsd_sockmsg hdr; char body[3]; } __packed reply = { { .type = msg_type, .len = sizeof(reply.body) }, "OK" }; mutex_lock(&u->reply_mutex); rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); wake_up(&u->read_waitq); mutex_unlock(&u->reply_mutex); } out: return rc; } static ssize_t xenbus_file_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_file_priv *u = filp->private_data; uint32_t msg_type; int rc = len; int ret; LIST_HEAD(staging_q); /* * We're expecting usermode to be writing properly formed * xenbus messages. If they write an incomplete message we * buffer it up. Once it is complete, we act on it. */ /* * Make sure concurrent writers can't stomp all over each * other's messages and make a mess of our partial message * buffer. We don't make any attemppt to stop multiple * writers from making a mess of each other's incomplete * messages; we're just trying to guarantee our own internal * consistency and make sure that single writes are handled * atomically. */ mutex_lock(&u->msgbuffer_mutex); /* Get this out of the way early to avoid confusion */ if (len == 0) goto out; /* Can't write a xenbus message larger we can buffer */ if ((len + u->len) > sizeof(u->u.buffer)) { /* On error, dump existing buffer */ u->len = 0; rc = -EINVAL; goto out; } ret = copy_from_user(u->u.buffer + u->len, ubuf, len); if (ret != 0) { rc = -EFAULT; goto out; } /* Deal with a partial copy. */ len -= ret; rc = len; u->len += len; /* Return if we haven't got a full message yet */ if (u->len < sizeof(u->u.msg)) goto out; /* not even the header yet */ /* If we're expecting a message that's larger than we can possibly send, dump what we have and return an error. */ if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { rc = -E2BIG; u->len = 0; goto out; } if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) goto out; /* incomplete data portion */ /* * OK, now we have a complete message. Do something with it. */ msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: /* (Un)Ask for some path to be watched for changes */ ret = xenbus_write_watch(msg_type, u); break; default: /* Send out a transaction */ ret = xenbus_write_transaction(msg_type, u); break; } if (ret != 0) rc = ret; /* Buffered message consumed */ u->len = 0; out: mutex_unlock(&u->msgbuffer_mutex); return rc; } static int xenbus_file_open(struct inode *inode, struct file *filp) { struct xenbus_file_priv *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); mutex_init(&u->msgbuffer_mutex); filp->private_data = u; return 0; } static int xenbus_file_release(struct inode *inode, struct file *filp) { struct xenbus_file_priv *u = filp->private_data; struct xenbus_transaction_holder *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; /* * No need for locking here because there are no other users, * by definition. */ list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_file_poll(struct file *file, poll_table *wait) { struct xenbus_file_priv *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } const struct file_operations xen_xenbus_fops = { .read = xenbus_file_read, .write = xenbus_file_write, .open = xenbus_file_open, .release = xenbus_file_release, .poll = xenbus_file_poll, .llseek = no_llseek, }; EXPORT_SYMBOL_GPL(xen_xenbus_fops); static struct miscdevice xenbus_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/xenbus", .fops = &xen_xenbus_fops, }; static int __init xenbus_init(void) { int err; if (!xen_domain()) return -ENODEV; err = misc_register(&xenbus_dev); if (err) printk(KERN_ERR "Could not register xenbus frontend device\n"); return err; } static void __exit xenbus_exit(void) { misc_deregister(&xenbus_dev); } module_init(xenbus_init); module_exit(xenbus_exit);
gpl-2.0
Red--Code/Code-Red-honami
arch/arm/mach-pxa/capc7117.c
5004
4075
/* * linux/arch/arm/mach-pxa/capc7117.c * * Support for the Embedian CAPC-7117 Evaluation Kit * based on the Embedian MXM-8x10 Computer on Module * * Copyright (C) 2009 Embedian Inc. * Copyright (C) 2009 TMT Services & Supplies (Pty) Ltd. * * 2007-09-04: eric miao <eric.y.miao@gmail.com> * rewrite to align with latest kernel * * 2010-01-09: Edwin Peer <epeer@tmtservices.co.za> * Hennie van der Merwe <hvdmerwe@tmtservices.co.za> * rework for upstream merge * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/serial_8250.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa320.h> #include <mach/mxm8x10.h> #include "generic.h" /* IDE (PATA) Support */ static struct pata_platform_info pata_platform_data = { .ioport_shift = 1 }; static struct resource capc7117_ide_resources[] = { [0] = { .start = 0x11000020, .end = 0x1100003f, .flags = IORESOURCE_MEM }, [1] = { .start = 0x1100001c, .end = 0x1100001c, .flags = IORESOURCE_MEM }, [2] = { .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO76)), .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO76)), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING } }; static struct platform_device capc7117_ide_device = { .name = "pata_platform", .num_resources = ARRAY_SIZE(capc7117_ide_resources), .resource = capc7117_ide_resources, .dev = { .platform_data = &pata_platform_data, .coherent_dma_mask = ~0 /* grumble */ } }; static void __init capc7117_ide_init(void) { platform_device_register(&capc7117_ide_device); } /* TI16C752 UART support */ #define TI16C752_FLAGS (UPF_BOOT_AUTOCONF | \ UPF_IOREMAP | \ UPF_BUGGY_UART | \ UPF_SKIP_TEST) #define TI16C752_UARTCLK (22118400) static struct plat_serial8250_port ti16c752_platform_data[] = { [0] = { .mapbase = 0x14000000, .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO78)), .irqflags = IRQF_TRIGGER_RISING, .flags = TI16C752_FLAGS, .iotype = UPIO_MEM, .regshift = 1, .uartclk = TI16C752_UARTCLK }, [1] = { .mapbase = 0x14000040, .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO79)), .irqflags = IRQF_TRIGGER_RISING, .flags = TI16C752_FLAGS, .iotype = UPIO_MEM, .regshift = 1, .uartclk = TI16C752_UARTCLK }, [2] = { .mapbase = 0x14000080, .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO80)), .irqflags = IRQF_TRIGGER_RISING, .flags = TI16C752_FLAGS, .iotype = UPIO_MEM, .regshift = 1, .uartclk = TI16C752_UARTCLK }, [3] = { .mapbase = 0x140000c0, .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO81)), .irqflags = IRQF_TRIGGER_RISING, .flags = TI16C752_FLAGS, .iotype = UPIO_MEM, .regshift = 1, .uartclk = TI16C752_UARTCLK }, [4] = { /* end of array */ } }; static struct platform_device ti16c752_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = ti16c752_platform_data } }; static void __init capc7117_uarts_init(void) { platform_device_register(&ti16c752_device); } static void __init capc7117_init(void) { /* Init CoM */ mxm_8x10_barebones_init(); /* Init evaluation board peripherals */ mxm_8x10_ac97_init(); mxm_8x10_usb_host_init(); mxm_8x10_mmc_init(); capc7117_uarts_init(); capc7117_ide_init(); } MACHINE_START(CAPC7117, "Embedian CAPC-7117 evaluation kit based on the MXM-8x10 CoM") .atag_offset = 0x100, .map_io = pxa3xx_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .timer = &pxa_timer, .init_machine = capc7117_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
crpalmer/samsung_kernels
fs/ceph/ioctl.c
5004
8010
#include <linux/in.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/ceph_debug.h> #include "ioctl.h" /* * ioctls */ /* * get and set the file layout */ static long ceph_ioctl_get_layout(struct file *file, void __user *arg) { struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode); struct ceph_ioctl_layout l; int err; err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT); if (!err) { l.stripe_unit = ceph_file_layout_su(ci->i_layout); l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout); l.object_size = ceph_file_layout_object_size(ci->i_layout); l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool); l.preferred_osd = (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred); if (copy_to_user(arg, &l, sizeof(l))) return -EFAULT; } return err; } static long ceph_ioctl_set_layout(struct file *file, void __user *arg) { struct inode *inode = file->f_dentry->d_inode; struct inode *parent_inode; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_mds_request *req; struct ceph_ioctl_layout l; struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode); struct ceph_ioctl_layout nl; int err, i; if (copy_from_user(&l, arg, sizeof(l))) return -EFAULT; /* validate changed params against current layout */ err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT); if (!err) { nl.stripe_unit = ceph_file_layout_su(ci->i_layout); nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout); nl.object_size = ceph_file_layout_object_size(ci->i_layout); nl.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool); nl.preferred_osd = (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred); } else return err; if (l.stripe_count) nl.stripe_count = l.stripe_count; if (l.stripe_unit) nl.stripe_unit = l.stripe_unit; if (l.object_size) nl.object_size = l.object_size; if (l.data_pool) nl.data_pool = l.data_pool; if (l.preferred_osd) nl.preferred_osd = l.preferred_osd; if ((nl.object_size & ~PAGE_MASK) || (nl.stripe_unit & ~PAGE_MASK) || ((unsigned)nl.object_size % (unsigned)nl.stripe_unit)) return -EINVAL; /* make sure it's a valid data pool */ if (l.data_pool > 0) { mutex_lock(&mdsc->mutex); err = -EINVAL; for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++) if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) { err = 0; break; } mutex_unlock(&mdsc->mutex); if (err) return err; } req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = inode; ihold(inode); req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; req->r_args.setlayout.layout.fl_stripe_unit = cpu_to_le32(l.stripe_unit); req->r_args.setlayout.layout.fl_stripe_count = cpu_to_le32(l.stripe_count); req->r_args.setlayout.layout.fl_object_size = cpu_to_le32(l.object_size); req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32(l.preferred_osd); parent_inode = ceph_get_dentry_parent_inode(file->f_dentry); err = ceph_mdsc_do_request(mdsc, parent_inode, req); iput(parent_inode); ceph_mdsc_put_request(req); return err; } /* * Set a layout policy on a directory inode. All items in the tree * rooted at this inode will inherit this layout on creation, * (It doesn't apply retroactively ) * unless a subdirectory has its own layout policy. */ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg) { struct inode *inode = file->f_dentry->d_inode; struct ceph_mds_request *req; struct ceph_ioctl_layout l; int err, i; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; /* copy and validate */ if (copy_from_user(&l, arg, sizeof(l))) return -EFAULT; if ((l.object_size & ~PAGE_MASK) || (l.stripe_unit & ~PAGE_MASK) || !l.stripe_unit || (l.object_size && (unsigned)l.object_size % (unsigned)l.stripe_unit)) return -EINVAL; /* make sure it's a valid data pool */ if (l.data_pool > 0) { mutex_lock(&mdsc->mutex); err = -EINVAL; for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++) if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) { err = 0; break; } mutex_unlock(&mdsc->mutex); if (err) return err; } req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = inode; ihold(inode); req->r_args.setlayout.layout.fl_stripe_unit = cpu_to_le32(l.stripe_unit); req->r_args.setlayout.layout.fl_stripe_count = cpu_to_le32(l.stripe_count); req->r_args.setlayout.layout.fl_object_size = cpu_to_le32(l.object_size); req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32(l.preferred_osd); err = ceph_mdsc_do_request(mdsc, inode, req); ceph_mdsc_put_request(req); return err; } /* * Return object name, size/offset information, and location (OSD * number, network address) for a given file offset. */ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) { struct ceph_ioctl_dataloc dl; struct inode *inode = file->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->client->osdc; u64 len = 1, olen; u64 tmp; struct ceph_object_layout ol; struct ceph_pg pgid; /* copy and validate */ if (copy_from_user(&dl, arg, sizeof(dl))) return -EFAULT; down_read(&osdc->map_sem); ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len, &dl.object_no, &dl.object_offset, &olen); dl.file_offset -= dl.object_offset; dl.object_size = ceph_file_layout_object_size(ci->i_layout); dl.block_size = ceph_file_layout_su(ci->i_layout); /* block_offset = object_offset % block_size */ tmp = dl.object_offset; dl.block_offset = do_div(tmp, dl.block_size); snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx", ceph_ino(inode), dl.object_no); ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout, osdc->osdmap); pgid = ol.ol_pgid; dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid); if (dl.osd >= 0) { struct ceph_entity_addr *a = ceph_osd_addr(osdc->osdmap, dl.osd); if (a) memcpy(&dl.osd_addr, &a->in_addr, sizeof(dl.osd_addr)); } else { memset(&dl.osd_addr, 0, sizeof(dl.osd_addr)); } up_read(&osdc->map_sem); /* send result back to user */ if (copy_to_user(arg, &dl, sizeof(dl))) return -EFAULT; return 0; } static long ceph_ioctl_lazyio(struct file *file) { struct ceph_file_info *fi = file->private_data; struct inode *inode = file->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) { spin_lock(&ci->i_ceph_lock); ci->i_nr_by_mode[fi->fmode]--; fi->fmode |= CEPH_FILE_MODE_LAZY; ci->i_nr_by_mode[fi->fmode]++; spin_unlock(&ci->i_ceph_lock); dout("ioctl_layzio: file %p marked lazy\n", file); ceph_check_caps(ci, 0, NULL); } else { dout("ioctl_layzio: file %p already lazy\n", file); } return 0; } static long ceph_ioctl_syncio(struct file *file) { struct ceph_file_info *fi = file->private_data; fi->flags |= CEPH_F_SYNC; return 0; } long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg); switch (cmd) { case CEPH_IOC_GET_LAYOUT: return ceph_ioctl_get_layout(file, (void __user *)arg); case CEPH_IOC_SET_LAYOUT: return ceph_ioctl_set_layout(file, (void __user *)arg); case CEPH_IOC_SET_LAYOUT_POLICY: return ceph_ioctl_set_layout_policy(file, (void __user *)arg); case CEPH_IOC_GET_DATALOC: return ceph_ioctl_get_dataloc(file, (void __user *)arg); case CEPH_IOC_LAZYIO: return ceph_ioctl_lazyio(file); case CEPH_IOC_SYNCIO: return ceph_ioctl_syncio(file); } return -ENOTTY; }
gpl-2.0
nobooya/e975-kk-kernel
net/netfilter/ipset/ip_set_getport.c
7308
3479
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Get Layer-4 data from the packets */ #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/sctp.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/export.h> /* We must handle non-linear skbs */ static bool get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, bool src, __be16 *port, u8 *proto) { switch (protocol) { case IPPROTO_TCP: { struct tcphdr _tcph; const struct tcphdr *th; th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph); if (th == NULL) /* No choice either */ return false; *port = src ? th->source : th->dest; break; } case IPPROTO_SCTP: { sctp_sctphdr_t _sh; const sctp_sctphdr_t *sh; sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); if (sh == NULL) /* No choice either */ return false; *port = src ? sh->source : sh->dest; break; } case IPPROTO_UDP: case IPPROTO_UDPLITE: { struct udphdr _udph; const struct udphdr *uh; uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph); if (uh == NULL) /* No choice either */ return false; *port = src ? uh->source : uh->dest; break; } case IPPROTO_ICMP: { struct icmphdr _ich; const struct icmphdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16)htons((ic->type << 8) | ic->code); break; } case IPPROTO_ICMPV6: { struct icmp6hdr _ich; const struct icmp6hdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16) htons((ic->icmp6_type << 8) | ic->icmp6_code); break; } default: break; } *proto = protocol; return true; } bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { const struct iphdr *iph = ip_hdr(skb); unsigned int protooff = ip_hdrlen(skb); int protocol = iph->protocol; /* See comments at tcp_match in ip_tables.c */ if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET)) return false; return get_port(skb, protocol, protooff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip4_port); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { int protoff; u8 nexthdr; __be16 frag_off; nexthdr = ipv6_hdr(skb)->nexthdr; protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (protoff < 0) return false; return get_port(skb, nexthdr, protoff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip6_port); #endif bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port) { bool ret; u8 proto; switch (pf) { case NFPROTO_IPV4: ret = ip_set_get_ip4_port(skb, src, port, &proto); break; case NFPROTO_IPV6: ret = ip_set_get_ip6_port(skb, src, port, &proto); break; default: return false; } if (!ret) return ret; switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: return true; default: return false; } } EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
gpl-2.0
TeamSPR/kernel
block/partitions/osf.c
12940
1925
/* * fs/partitions/osf.c * * Code extracted from drivers/block/genhd.c * * Copyright (C) 1991-1998 Linus Torvalds * Re-organised Feb 1998 Russell King */ #include "check.h" #include "osf.h" #define MAX_OSF_PARTITIONS 18 int osf_partition(struct parsed_partitions *state) { int i; int slot = 1; unsigned int npartitions; Sector sect; unsigned char *data; struct disklabel { __le32 d_magic; __le16 d_type,d_subtype; u8 d_typename[16]; u8 d_packname[16]; __le32 d_secsize; __le32 d_nsectors; __le32 d_ntracks; __le32 d_ncylinders; __le32 d_secpercyl; __le32 d_secprtunit; __le16 d_sparespertrack; __le16 d_sparespercyl; __le32 d_acylinders; __le16 d_rpm, d_interleave, d_trackskew, d_cylskew; __le32 d_headswitch, d_trkseek, d_flags; __le32 d_drivedata[5]; __le32 d_spare[5]; __le32 d_magic2; __le16 d_checksum; __le16 d_npartitions; __le32 d_bbsize, d_sbsize; struct d_partition { __le32 p_size; __le32 p_offset; __le32 p_fsize; u8 p_fstype; u8 p_frag; __le16 p_cpg; } d_partitions[MAX_OSF_PARTITIONS]; } * label; struct d_partition * partition; data = read_part_sector(state, 0, &sect); if (!data) return -1; label = (struct disklabel *) (data+64); partition = label->d_partitions; if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) { put_dev_sector(sect); return 0; } if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) { put_dev_sector(sect); return 0; } npartitions = le16_to_cpu(label->d_npartitions); if (npartitions > MAX_OSF_PARTITIONS) { put_dev_sector(sect); return 0; } for (i = 0 ; i < npartitions; i++, partition++) { if (slot == state->limit) break; if (le32_to_cpu(partition->p_size)) put_partition(state, slot, le32_to_cpu(partition->p_offset), le32_to_cpu(partition->p_size)); slot++; } strlcat(state->pp_buf, "\n", PAGE_SIZE); put_dev_sector(sect); return 1; }
gpl-2.0
not404/xbox-linux
drivers/net/myri_sbus.c
141
30526
/* myri_sbus.c: MyriCOM MyriNET SBUS card driver. * * Copyright (C) 1996, 1999, 2006, 2008 David S. Miller (davem@davemloft.net) */ static char version[] = "myri_sbus.c:v2.0 June 23, 2006 David S. Miller (davem@davemloft.net)\n"; #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <net/dst.h> #include <net/arp.h> #include <net/sock.h> #include <net/ipv6.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> #include <asm/pgtable.h> #include <asm/irq.h> #include "myri_sbus.h" #include "myri_code.h" /* #define DEBUG_DETECT */ /* #define DEBUG_IRQ */ /* #define DEBUG_TRANSMIT */ /* #define DEBUG_RECEIVE */ /* #define DEBUG_HEADER */ #ifdef DEBUG_DETECT #define DET(x) printk x #else #define DET(x) #endif #ifdef DEBUG_IRQ #define DIRQ(x) printk x #else #define DIRQ(x) #endif #ifdef DEBUG_TRANSMIT #define DTX(x) printk x #else #define DTX(x) #endif #ifdef DEBUG_RECEIVE #define DRX(x) printk x #else #define DRX(x) #endif #ifdef DEBUG_HEADER #define DHDR(x) printk x #else #define DHDR(x) #endif static void myri_reset_off(void __iomem *lp, void __iomem *cregs) { /* Clear IRQ mask. */ sbus_writel(0, lp + LANAI_EIMASK); /* Turn RESET function off. */ sbus_writel(CONTROL_ROFF, cregs + MYRICTRL_CTRL); } static void myri_reset_on(void __iomem *cregs) { /* Enable RESET function. */ sbus_writel(CONTROL_RON, cregs + MYRICTRL_CTRL); /* Disable IRQ's. */ sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL); } static void myri_disable_irq(void __iomem *lp, void __iomem *cregs) { sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL); sbus_writel(0, lp + LANAI_EIMASK); sbus_writel(ISTAT_HOST, lp + LANAI_ISTAT); } static void myri_enable_irq(void __iomem *lp, void __iomem *cregs) { sbus_writel(CONTROL_EIRQ, cregs + MYRICTRL_CTRL); sbus_writel(ISTAT_HOST, lp + LANAI_EIMASK); } static inline void bang_the_chip(struct myri_eth *mp) { struct myri_shmem __iomem *shmem = mp->shmem; void __iomem *cregs = mp->cregs; sbus_writel(1, &shmem->send); sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL); } static int myri_do_handshake(struct myri_eth *mp) { struct myri_shmem __iomem *shmem = mp->shmem; void __iomem *cregs = mp->cregs; struct myri_channel __iomem *chan = &shmem->channel; int tick = 0; DET(("myri_do_handshake: ")); if (sbus_readl(&chan->state) == STATE_READY) { DET(("Already STATE_READY, failed.\n")); return -1; /* We're hosed... */ } myri_disable_irq(mp->lregs, cregs); while (tick++ < 25) { u32 softstate; /* Wake it up. */ DET(("shakedown, CONTROL_WON, ")); sbus_writel(1, &shmem->shakedown); sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL); softstate = sbus_readl(&chan->state); DET(("chanstate[%08x] ", softstate)); if (softstate == STATE_READY) { DET(("wakeup successful, ")); break; } if (softstate != STATE_WFN) { DET(("not WFN setting that, ")); sbus_writel(STATE_WFN, &chan->state); } udelay(20); } myri_enable_irq(mp->lregs, cregs); if (tick > 25) { DET(("25 ticks we lose, failure.\n")); return -1; } DET(("success\n")); return 0; } static int __devinit myri_load_lanai(struct myri_eth *mp) { struct net_device *dev = mp->dev; struct myri_shmem __iomem *shmem = mp->shmem; void __iomem *rptr; int i; myri_disable_irq(mp->lregs, mp->cregs); myri_reset_on(mp->cregs); rptr = mp->lanai; for (i = 0; i < mp->eeprom.ramsz; i++) sbus_writeb(0, rptr + i); if (mp->eeprom.cpuvers >= CPUVERS_3_0) sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL); /* Load executable code. */ for (i = 0; i < sizeof(lanai4_code); i++) sbus_writeb(lanai4_code[i], rptr + (lanai4_code_off * 2) + i); /* Load data segment. */ for (i = 0; i < sizeof(lanai4_data); i++) sbus_writeb(lanai4_data[i], rptr + (lanai4_data_off * 2) + i); /* Set device address. */ sbus_writeb(0, &shmem->addr[0]); sbus_writeb(0, &shmem->addr[1]); for (i = 0; i < 6; i++) sbus_writeb(dev->dev_addr[i], &shmem->addr[i + 2]); /* Set SBUS bursts and interrupt mask. */ sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst); sbus_writel(SHMEM_IMASK_RX, &shmem->imask); /* Release the LANAI. */ myri_disable_irq(mp->lregs, mp->cregs); myri_reset_off(mp->lregs, mp->cregs); myri_disable_irq(mp->lregs, mp->cregs); /* Wait for the reset to complete. */ for (i = 0; i < 5000; i++) { if (sbus_readl(&shmem->channel.state) != STATE_READY) break; else udelay(10); } if (i == 5000) printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n"); i = myri_do_handshake(mp); if (i) printk(KERN_ERR "myricom: Handshake with LANAI failed.\n"); if (mp->eeprom.cpuvers == CPUVERS_4_0) sbus_writel(0, mp->lregs + LANAI_VERS); return i; } static void myri_clean_rings(struct myri_eth *mp) { struct sendq __iomem *sq = mp->sq; struct recvq __iomem *rq = mp->rq; int i; sbus_writel(0, &rq->tail); sbus_writel(0, &rq->head); for (i = 0; i < (RX_RING_SIZE+1); i++) { if (mp->rx_skbs[i] != NULL) { struct myri_rxd __iomem *rxd = &rq->myri_rxd[i]; u32 dma_addr; dma_addr = sbus_readl(&rxd->myri_scatters[0].addr); dma_unmap_single(&mp->myri_op->dev, dma_addr, RX_ALLOC_SIZE, DMA_FROM_DEVICE); dev_kfree_skb(mp->rx_skbs[i]); mp->rx_skbs[i] = NULL; } } mp->tx_old = 0; sbus_writel(0, &sq->tail); sbus_writel(0, &sq->head); for (i = 0; i < TX_RING_SIZE; i++) { if (mp->tx_skbs[i] != NULL) { struct sk_buff *skb = mp->tx_skbs[i]; struct myri_txd __iomem *txd = &sq->myri_txd[i]; u32 dma_addr; dma_addr = sbus_readl(&txd->myri_gathers[0].addr); dma_unmap_single(&mp->myri_op->dev, dma_addr, (skb->len + 3) & ~3, DMA_TO_DEVICE); dev_kfree_skb(mp->tx_skbs[i]); mp->tx_skbs[i] = NULL; } } } static void myri_init_rings(struct myri_eth *mp, int from_irq) { struct recvq __iomem *rq = mp->rq; struct myri_rxd __iomem *rxd = &rq->myri_rxd[0]; struct net_device *dev = mp->dev; gfp_t gfp_flags = GFP_KERNEL; int i; if (from_irq || in_interrupt()) gfp_flags = GFP_ATOMIC; myri_clean_rings(mp); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags); u32 dma_addr; if (!skb) continue; mp->rx_skbs[i] = skb; skb->dev = dev; skb_put(skb, RX_ALLOC_SIZE); dma_addr = dma_map_single(&mp->myri_op->dev, skb->data, RX_ALLOC_SIZE, DMA_FROM_DEVICE); sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr); sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len); sbus_writel(i, &rxd[i].ctx); sbus_writel(1, &rxd[i].num_sg); } sbus_writel(0, &rq->head); sbus_writel(RX_RING_SIZE, &rq->tail); } static int myri_init(struct myri_eth *mp, int from_irq) { myri_init_rings(mp, from_irq); return 0; } static void myri_is_not_so_happy(struct myri_eth *mp) { } #ifdef DEBUG_HEADER static void dump_ehdr(struct ethhdr *ehdr) { printk("ehdr[h_dst(%pM)" "h_source(%pM)" "h_proto(%04x)]\n", ehdr->h_dest, ehdr->h_source, ehdr->h_proto); } static void dump_ehdr_and_myripad(unsigned char *stuff) { struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2); printk("pad[%02x:%02x]", stuff[0], stuff[1]); dump_ehdr(ehdr); } #endif static void myri_tx(struct myri_eth *mp, struct net_device *dev) { struct sendq __iomem *sq= mp->sq; int entry = mp->tx_old; int limit = sbus_readl(&sq->head); DTX(("entry[%d] limit[%d] ", entry, limit)); if (entry == limit) return; while (entry != limit) { struct sk_buff *skb = mp->tx_skbs[entry]; u32 dma_addr; DTX(("SKB[%d] ", entry)); dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr); dma_unmap_single(&mp->myri_op->dev, dma_addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); mp->tx_skbs[entry] = NULL; dev->stats.tx_packets++; entry = NEXT_TX(entry); } mp->tx_old = entry; } /* Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. */ static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eth; unsigned char *rawp; skb_set_mac_header(skb, MYRI_PAD_LEN); skb_pull(skb, dev->hard_header_len); eth = eth_hdr(skb); #ifdef DEBUG_HEADER DHDR(("myri_type_trans: ")); dump_ehdr(eth); #endif if (*eth->h_dest & 1) { if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) { if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) skb->pkt_type = PACKET_OTHERHOST; } if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; rawp = skb->data; /* This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *)rawp == 0xFFFF) return htons(ETH_P_802_3); /* Real 802.2 LLC */ return htons(ETH_P_802_2); } static void myri_rx(struct myri_eth *mp, struct net_device *dev) { struct recvq __iomem *rq = mp->rq; struct recvq __iomem *rqa = mp->rqack; int entry = sbus_readl(&rqa->head); int limit = sbus_readl(&rqa->tail); int drops; DRX(("entry[%d] limit[%d] ", entry, limit)); if (entry == limit) return; drops = 0; DRX(("\n")); while (entry != limit) { struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry]; u32 csum = sbus_readl(&rxdack->csum); int len = sbus_readl(&rxdack->myri_scatters[0].len); int index = sbus_readl(&rxdack->ctx); struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)]; struct sk_buff *skb = mp->rx_skbs[index]; /* Ack it. */ sbus_writel(NEXT_RX(entry), &rqa->head); /* Check for errors. */ DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum)); dma_sync_single_for_cpu(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { DRX(("ERROR[")); dev->stats.rx_errors++; if (len < (ETH_HLEN + MYRI_PAD_LEN)) { DRX(("BAD_LENGTH] ")); dev->stats.rx_length_errors++; } else { DRX(("NO_PADDING] ")); dev->stats.rx_frame_errors++; } /* Return it to the LANAI. */ drop_it: drops++; DRX(("DROP ")); dev->stats.rx_dropped++; dma_sync_single_for_device(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); goto next; } DRX(("len[%d] ", len)); if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; u32 dma_addr; DRX(("BIGBUFF ")); new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { DRX(("skb_alloc(FAILED) ")); goto drop_it; } dma_unmap_single(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); mp->rx_skbs[index] = new_skb; new_skb->dev = dev; skb_put(new_skb, RX_ALLOC_SIZE); dma_addr = dma_map_single(&mp->myri_op->dev, new_skb->data, RX_ALLOC_SIZE, DMA_FROM_DEVICE); sbus_writel(dma_addr, &rxd->myri_scatters[0].addr); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); /* Trim the original skb for the netif. */ DRX(("trim(%d) ", len)); skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len); DRX(("SMALLBUFF ")); if (copy_skb == NULL) { DRX(("dev_alloc_skb(FAILED) ")); goto drop_it; } /* DMA sync already done above. */ copy_skb->dev = dev; DRX(("resv_and_put ")); skb_put(copy_skb, len); skb_copy_from_linear_data(skb, copy_skb->data, len); /* Reuse original ring buffer. */ DRX(("reuse ")); dma_sync_single_for_device(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); skb = copy_skb; } /* Just like the happy meal we get checksums from this card. */ skb->csum = csum; skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */ skb->protocol = myri_type_trans(skb, dev); DRX(("prot[%04x] netif_rx ", skb->protocol)); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; next: DRX(("NEXT\n")); entry = NEXT_RX(entry); } } static irqreturn_t myri_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct myri_eth *mp = netdev_priv(dev); void __iomem *lregs = mp->lregs; struct myri_channel __iomem *chan = &mp->shmem->channel; unsigned long flags; u32 status; int handled = 0; spin_lock_irqsave(&mp->irq_lock, flags); status = sbus_readl(lregs + LANAI_ISTAT); DIRQ(("myri_interrupt: status[%08x] ", status)); if (status & ISTAT_HOST) { u32 softstate; handled = 1; DIRQ(("IRQ_DISAB ")); myri_disable_irq(lregs, mp->cregs); softstate = sbus_readl(&chan->state); DIRQ(("state[%08x] ", softstate)); if (softstate != STATE_READY) { DIRQ(("myri_not_so_happy ")); myri_is_not_so_happy(mp); } DIRQ(("\nmyri_rx: ")); myri_rx(mp, dev); DIRQ(("\nistat=ISTAT_HOST ")); sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT); DIRQ(("IRQ_ENAB ")); myri_enable_irq(lregs, mp->cregs); } DIRQ(("\n")); spin_unlock_irqrestore(&mp->irq_lock, flags); return IRQ_RETVAL(handled); } static int myri_open(struct net_device *dev) { struct myri_eth *mp = netdev_priv(dev); return myri_init(mp, in_interrupt()); } static int myri_close(struct net_device *dev) { struct myri_eth *mp = netdev_priv(dev); myri_clean_rings(mp); return 0; } static void myri_tx_timeout(struct net_device *dev) { struct myri_eth *mp = netdev_priv(dev); printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); dev->stats.tx_errors++; myri_init(mp, 0); netif_wake_queue(dev); } static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct myri_eth *mp = netdev_priv(dev); struct sendq __iomem *sq = mp->sq; struct myri_txd __iomem *txd; unsigned long flags; unsigned int head, tail; int len, entry; u32 dma_addr; DTX(("myri_start_xmit: ")); myri_tx(mp, dev); netif_stop_queue(dev); /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */ head = sbus_readl(&sq->head); tail = sbus_readl(&sq->tail); if (!TX_BUFFS_AVAIL(head, tail)) { DTX(("no buffs available, returning 1\n")); return 1; } spin_lock_irqsave(&mp->irq_lock, flags); DHDR(("xmit[skbdata(%p)]\n", skb->data)); #ifdef DEBUG_HEADER dump_ehdr_and_myripad(((unsigned char *) skb->data)); #endif /* XXX Maybe this can go as well. */ len = skb->len; if (len & 3) { DTX(("len&3 ")); len = (len + 4) & (~3); } entry = sbus_readl(&sq->tail); txd = &sq->myri_txd[entry]; mp->tx_skbs[entry] = skb; /* Must do this before we sbus map it. */ if (skb->data[MYRI_PAD_LEN] & 0x1) { sbus_writew(0xffff, &txd->addr[0]); sbus_writew(0xffff, &txd->addr[1]); sbus_writew(0xffff, &txd->addr[2]); sbus_writew(0xffff, &txd->addr[3]); } else { sbus_writew(0xffff, &txd->addr[0]); sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]); sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]); sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]); } dma_addr = dma_map_single(&mp->myri_op->dev, skb->data, len, DMA_TO_DEVICE); sbus_writel(dma_addr, &txd->myri_gathers[0].addr); sbus_writel(len, &txd->myri_gathers[0].len); sbus_writel(1, &txd->num_sg); sbus_writel(KERNEL_CHANNEL, &txd->chan); sbus_writel(len, &txd->len); sbus_writel((u32)-1, &txd->csum_off); sbus_writel(0, &txd->csum_field); sbus_writel(NEXT_TX(entry), &sq->tail); DTX(("BangTheChip ")); bang_the_chip(mp); DTX(("tbusy=0, returning 0\n")); netif_start_queue(dev); spin_unlock_irqrestore(&mp->irq_lock, flags); return 0; } /* Create the MyriNet MAC header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */ static int myri_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN); #ifdef DEBUG_HEADER DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1])); dump_ehdr(eth); #endif /* Set the MyriNET padding identifier. */ pad[0] = MYRI_PAD_LEN; pad[1] = 0xab; /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length * in here instead. It is up to the 802.2 layer to carry protocol information. */ if (type != ETH_P_802_3) eth->h_proto = htons(type); else eth->h_proto = htons(len); /* Set the source hardware address. */ if (saddr) memcpy(eth->h_source, saddr, dev->addr_len); else memcpy(eth->h_source, dev->dev_addr, dev->addr_len); /* Anyway, the loopback-device should never use this function... */ if (dev->flags & IFF_LOOPBACK) { int i; for (i = 0; i < dev->addr_len; i++) eth->h_dest[i] = 0; return(dev->hard_header_len); } if (daddr) { memcpy(eth->h_dest, daddr, dev->addr_len); return dev->hard_header_len; } return -dev->hard_header_len; } /* Rebuild the MyriNet MAC header. This is called after an ARP * (or in future other address resolution) has completed on this * sk_buff. We now let ARP fill in the other fields. */ static int myri_rebuild_header(struct sk_buff *skb) { unsigned char *pad = (unsigned char *) skb->data; struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN); struct net_device *dev = skb->dev; #ifdef DEBUG_HEADER DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1])); dump_ehdr(eth); #endif /* Refill MyriNet padding identifiers, this is just being anal. */ pad[0] = MYRI_PAD_LEN; pad[1] = 0xab; switch (eth->h_proto) { #ifdef CONFIG_INET case __constant_htons(ETH_P_IP): return arp_find(eth->h_dest, skb); #endif default: printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n", dev->name, (int)eth->h_proto); memcpy(eth->h_source, dev->dev_addr, dev->addr_len); return 0; break; } return 0; } static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { unsigned short type = hh->hh_type; unsigned char *pad; struct ethhdr *eth; const struct net_device *dev = neigh->dev; pad = ((unsigned char *) hh->hh_data) + HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN); eth = (struct ethhdr *) (pad + MYRI_PAD_LEN); if (type == htons(ETH_P_802_3)) return -1; /* Refill MyriNet padding identifiers, this is just being anal. */ pad[0] = MYRI_PAD_LEN; pad[1] = 0xab; eth->h_proto = type; memcpy(eth->h_source, dev->dev_addr, dev->addr_len); memcpy(eth->h_dest, neigh->ha, dev->addr_len); hh->hh_len = 16; return 0; } /* Called by Address Resolution module to notify changes in address. */ void myri_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char * haddr) { memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), haddr, dev->addr_len); } static int myri_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < (ETH_HLEN + MYRI_PAD_LEN)) || (new_mtu > MYRINET_MTU)) return -EINVAL; dev->mtu = new_mtu; return 0; } static void myri_set_multicast(struct net_device *dev) { /* Do nothing, all MyriCOM nodes transmit multicast frames * as broadcast packets... */ } static inline void set_boardid_from_idprom(struct myri_eth *mp, int num) { mp->eeprom.id[0] = 0; mp->eeprom.id[1] = idprom->id_machtype; mp->eeprom.id[2] = (idprom->id_sernum >> 16) & 0xff; mp->eeprom.id[3] = (idprom->id_sernum >> 8) & 0xff; mp->eeprom.id[4] = (idprom->id_sernum >> 0) & 0xff; mp->eeprom.id[5] = num; } static inline void determine_reg_space_size(struct myri_eth *mp) { switch(mp->eeprom.cpuvers) { case CPUVERS_2_3: case CPUVERS_3_0: case CPUVERS_3_1: case CPUVERS_3_2: mp->reg_size = (3 * 128 * 1024) + 4096; break; case CPUVERS_4_0: case CPUVERS_4_1: mp->reg_size = ((4096<<1) + mp->eeprom.ramsz); break; case CPUVERS_4_2: case CPUVERS_5_0: default: printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n", mp->eeprom.cpuvers); mp->reg_size = (3 * 128 * 1024) + 4096; }; } #ifdef DEBUG_DETECT static void dump_eeprom(struct myri_eth *mp) { printk("EEPROM: clockval[%08x] cpuvers[%04x] " "id[%02x,%02x,%02x,%02x,%02x,%02x]\n", mp->eeprom.cval, mp->eeprom.cpuvers, mp->eeprom.id[0], mp->eeprom.id[1], mp->eeprom.id[2], mp->eeprom.id[3], mp->eeprom.id[4], mp->eeprom.id[5]); printk("EEPROM: ramsz[%08x]\n", mp->eeprom.ramsz); printk("EEPROM: fvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n", mp->eeprom.fvers[0], mp->eeprom.fvers[1], mp->eeprom.fvers[2], mp->eeprom.fvers[3], mp->eeprom.fvers[4], mp->eeprom.fvers[5], mp->eeprom.fvers[6], mp->eeprom.fvers[7]); printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n", mp->eeprom.fvers[8], mp->eeprom.fvers[9], mp->eeprom.fvers[10], mp->eeprom.fvers[11], mp->eeprom.fvers[12], mp->eeprom.fvers[13], mp->eeprom.fvers[14], mp->eeprom.fvers[15]); printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n", mp->eeprom.fvers[16], mp->eeprom.fvers[17], mp->eeprom.fvers[18], mp->eeprom.fvers[19], mp->eeprom.fvers[20], mp->eeprom.fvers[21], mp->eeprom.fvers[22], mp->eeprom.fvers[23]); printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n", mp->eeprom.fvers[24], mp->eeprom.fvers[25], mp->eeprom.fvers[26], mp->eeprom.fvers[27], mp->eeprom.fvers[28], mp->eeprom.fvers[29], mp->eeprom.fvers[30], mp->eeprom.fvers[31]); printk("EEPROM: mvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n", mp->eeprom.mvers[0], mp->eeprom.mvers[1], mp->eeprom.mvers[2], mp->eeprom.mvers[3], mp->eeprom.mvers[4], mp->eeprom.mvers[5], mp->eeprom.mvers[6], mp->eeprom.mvers[7]); printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n", mp->eeprom.mvers[8], mp->eeprom.mvers[9], mp->eeprom.mvers[10], mp->eeprom.mvers[11], mp->eeprom.mvers[12], mp->eeprom.mvers[13], mp->eeprom.mvers[14], mp->eeprom.mvers[15]); printk("EEPROM: dlval[%04x] brd_type[%04x] bus_type[%04x] prod_code[%04x]\n", mp->eeprom.dlval, mp->eeprom.brd_type, mp->eeprom.bus_type, mp->eeprom.prod_code); printk("EEPROM: serial_num[%08x]\n", mp->eeprom.serial_num); } #endif static const struct header_ops myri_header_ops = { .create = myri_header, .rebuild = myri_rebuild_header, .cache = myri_header_cache, .cache_update = myri_header_cache_update, }; static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match) { struct device_node *dp = op->node; static unsigned version_printed; struct net_device *dev; struct myri_eth *mp; const void *prop; static int num; int i, len; DET(("myri_ether_init(%p,%d):\n", op, num)); dev = alloc_etherdev(sizeof(struct myri_eth)); if (!dev) return -ENOMEM; if (version_printed++ == 0) printk(version); SET_NETDEV_DEV(dev, &op->dev); mp = netdev_priv(dev); spin_lock_init(&mp->irq_lock); mp->myri_op = op; /* Clean out skb arrays. */ for (i = 0; i < (RX_RING_SIZE + 1); i++) mp->rx_skbs[i] = NULL; for (i = 0; i < TX_RING_SIZE; i++) mp->tx_skbs[i] = NULL; /* First check for EEPROM information. */ prop = of_get_property(dp, "myrinet-eeprom-info", &len); if (prop) memcpy(&mp->eeprom, prop, sizeof(struct myri_eeprom)); if (!prop) { /* No eeprom property, must cook up the values ourselves. */ DET(("No EEPROM: ")); mp->eeprom.bus_type = BUS_TYPE_SBUS; mp->eeprom.cpuvers = of_getintprop_default(dp, "cpu_version", 0); mp->eeprom.cval = of_getintprop_default(dp, "clock_value", 0); mp->eeprom.ramsz = of_getintprop_default(dp, "sram_size", 0); if (!mp->eeprom.cpuvers) mp->eeprom.cpuvers = CPUVERS_2_3; if (mp->eeprom.cpuvers < CPUVERS_3_0) mp->eeprom.cval = 0; if (!mp->eeprom.ramsz) mp->eeprom.ramsz = (128 * 1024); prop = of_get_property(dp, "myrinet-board-id", &len); if (prop) memcpy(&mp->eeprom.id[0], prop, 6); else set_boardid_from_idprom(mp, num); prop = of_get_property(dp, "fpga_version", &len); if (prop) memcpy(&mp->eeprom.fvers[0], prop, 32); else memset(&mp->eeprom.fvers[0], 0, 32); if (mp->eeprom.cpuvers == CPUVERS_4_1) { if (mp->eeprom.ramsz == (128 * 1024)) mp->eeprom.ramsz = (256 * 1024); if ((mp->eeprom.cval == 0x40414041) || (mp->eeprom.cval == 0x90449044)) mp->eeprom.cval = 0x50e450e4; } } #ifdef DEBUG_DETECT dump_eeprom(mp); #endif for (i = 0; i < 6; i++) dev->dev_addr[i] = mp->eeprom.id[i]; determine_reg_space_size(mp); /* Map in the MyriCOM register/localram set. */ if (mp->eeprom.cpuvers < CPUVERS_4_0) { /* XXX Makes no sense, if control reg is non-existant this * XXX driver cannot function at all... maybe pre-4.0 is * XXX only a valid version for PCI cards? Ask feldy... */ DET(("Mapping regs for cpuvers < CPUVERS_4_0\n")); mp->regs = of_ioremap(&op->resource[0], 0, mp->reg_size, "MyriCOM Regs"); if (!mp->regs) { printk("MyriCOM: Cannot map MyriCOM registers.\n"); goto err; } mp->lanai = mp->regs + (256 * 1024); mp->lregs = mp->lanai + (0x10000 * 2); } else { DET(("Mapping regs for cpuvers >= CPUVERS_4_0\n")); mp->cregs = of_ioremap(&op->resource[0], 0, PAGE_SIZE, "MyriCOM Control Regs"); mp->lregs = of_ioremap(&op->resource[0], (256 * 1024), PAGE_SIZE, "MyriCOM LANAI Regs"); mp->lanai = of_ioremap(&op->resource[0], (512 * 1024), mp->eeprom.ramsz, "MyriCOM SRAM"); } DET(("Registers mapped: cregs[%p] lregs[%p] lanai[%p]\n", mp->cregs, mp->lregs, mp->lanai)); if (mp->eeprom.cpuvers >= CPUVERS_4_0) mp->shmem_base = 0xf000; else mp->shmem_base = 0x8000; DET(("Shared memory base is %04x, ", mp->shmem_base)); mp->shmem = (struct myri_shmem __iomem *) (mp->lanai + (mp->shmem_base * 2)); DET(("shmem mapped at %p\n", mp->shmem)); mp->rqack = &mp->shmem->channel.recvqa; mp->rq = &mp->shmem->channel.recvq; mp->sq = &mp->shmem->channel.sendq; /* Reset the board. */ DET(("Resetting LANAI\n")); myri_reset_off(mp->lregs, mp->cregs); myri_reset_on(mp->cregs); /* Turn IRQ's off. */ myri_disable_irq(mp->lregs, mp->cregs); /* Reset once more. */ myri_reset_on(mp->cregs); /* Get the supported DVMA burst sizes from our SBUS. */ mp->myri_bursts = of_getintprop_default(dp->parent, "burst-sizes", 0x00); if (!sbus_can_burst64()) mp->myri_bursts &= ~(DMA_BURST64); DET(("MYRI bursts %02x\n", mp->myri_bursts)); /* Encode SBUS interrupt level in second control register. */ i = of_getintprop_default(dp, "interrupts", 0); if (i == 0) i = 4; DET(("prom_getint(interrupts)==%d, irqlvl set to %04x\n", i, (1 << i))); sbus_writel((1 << i), mp->cregs + MYRICTRL_IRQLVL); mp->dev = dev; dev->open = &myri_open; dev->stop = &myri_close; dev->hard_start_xmit = &myri_start_xmit; dev->tx_timeout = &myri_tx_timeout; dev->watchdog_timeo = 5*HZ; dev->set_multicast_list = &myri_set_multicast; dev->irq = op->irqs[0]; /* Register interrupt handler now. */ DET(("Requesting MYRIcom IRQ line.\n")); if (request_irq(dev->irq, &myri_interrupt, IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) { printk("MyriCOM: Cannot register interrupt handler.\n"); goto err; } dev->mtu = MYRINET_MTU; dev->change_mtu = myri_change_mtu; dev->header_ops = &myri_header_ops; dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN); /* Load code onto the LANai. */ DET(("Loading LANAI firmware\n")); myri_load_lanai(mp); if (register_netdev(dev)) { printk("MyriCOM: Cannot register device.\n"); goto err_free_irq; } dev_set_drvdata(&op->dev, mp); num++; printk("%s: MyriCOM MyriNET Ethernet %pM\n", dev->name, dev->dev_addr); return 0; err_free_irq: free_irq(dev->irq, dev); err: /* This will also free the co-allocated private data*/ free_netdev(dev); return -ENODEV; } static int __devexit myri_sbus_remove(struct of_device *op) { struct myri_eth *mp = dev_get_drvdata(&op->dev); struct net_device *net_dev = mp->dev; unregister_netdev(net_dev); free_irq(net_dev->irq, net_dev); if (mp->eeprom.cpuvers < CPUVERS_4_0) { of_iounmap(&op->resource[0], mp->regs, mp->reg_size); } else { of_iounmap(&op->resource[0], mp->cregs, PAGE_SIZE); of_iounmap(&op->resource[0], mp->lregs, (256 * 1024)); of_iounmap(&op->resource[0], mp->lanai, (512 * 1024)); } free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id myri_sbus_match[] = { { .name = "MYRICOM,mlanai", }, { .name = "myri", }, {}, }; MODULE_DEVICE_TABLE(of, myri_sbus_match); static struct of_platform_driver myri_sbus_driver = { .name = "myri", .match_table = myri_sbus_match, .probe = myri_sbus_probe, .remove = __devexit_p(myri_sbus_remove), }; static int __init myri_sbus_init(void) { return of_register_driver(&myri_sbus_driver, &of_bus_type); } static void __exit myri_sbus_exit(void) { of_unregister_driver(&myri_sbus_driver); } module_init(myri_sbus_init); module_exit(myri_sbus_exit); MODULE_LICENSE("GPL");
gpl-2.0
MeiDahua/htc-kernel-tattoo
drivers/block/aoe/aoecmd.c
141
23106
/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ /* * aoecmd.c * Filesystem request handling methods */ #include <linux/hdreg.h> #include <linux/blkdev.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/genhd.h> #include <linux/moduleparam.h> #include <net/net_namespace.h> #include <asm/unaligned.h> #include "aoe.h" static int aoe_deadsecs = 60 * 3; module_param(aoe_deadsecs, int, 0644); MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev."); static int aoe_maxout = 16; module_param(aoe_maxout, int, 0644); MODULE_PARM_DESC(aoe_maxout, "Only aoe_maxout outstanding packets for every MAC on eX.Y."); static struct sk_buff * new_skb(ulong len) { struct sk_buff *skb; skb = alloc_skb(len, GFP_ATOMIC); if (skb) { skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); skb->priority = 0; skb->next = skb->prev = NULL; /* tell the network layer not to perform IP checksums * or to get the NIC to do it */ skb->ip_summed = CHECKSUM_NONE; } return skb; } static struct frame * getframe(struct aoetgt *t, int tag) { struct frame *f, *e; f = t->frames; e = f + t->nframes; for (; f<e; f++) if (f->tag == tag) return f; return NULL; } /* * Leave the top bit clear so we have tagspace for userland. * The bottom 16 bits are the xmit tick for rexmit/rttavg processing. * This driver reserves tag -1 to mean "unused frame." */ static int newtag(struct aoetgt *t) { register ulong n; n = jiffies & 0xffff; return n |= (++t->lasttag & 0x7fff) << 16; } static int aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h) { u32 host_tag = newtag(t); memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); memcpy(h->dst, t->addr, sizeof h->dst); h->type = __constant_cpu_to_be16(ETH_P_AOE); h->verfl = AOE_HVER; h->major = cpu_to_be16(d->aoemajor); h->minor = d->aoeminor; h->cmd = AOECMD_ATA; h->tag = cpu_to_be32(host_tag); return host_tag; } static inline void put_lba(struct aoe_atahdr *ah, sector_t lba) { ah->lba0 = lba; ah->lba1 = lba >>= 8; ah->lba2 = lba >>= 8; ah->lba3 = lba >>= 8; ah->lba4 = lba >>= 8; ah->lba5 = lba >>= 8; } static void ifrotate(struct aoetgt *t) { t->ifp++; if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL) t->ifp = t->ifs; if (t->ifp->nd == NULL) { printk(KERN_INFO "aoe: no interface to rotate to\n"); BUG(); } } static void skb_pool_put(struct aoedev *d, struct sk_buff *skb) { __skb_queue_tail(&d->skbpool, skb); } static struct sk_buff * skb_pool_get(struct aoedev *d) { struct sk_buff *skb = skb_peek(&d->skbpool); if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { __skb_unlink(skb, &d->skbpool); return skb; } if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX && (skb = new_skb(ETH_ZLEN))) return skb; return NULL; } /* freeframe is where we do our load balancing so it's a little hairy. */ static struct frame * freeframe(struct aoedev *d) { struct frame *f, *e, *rf; struct aoetgt **t; struct sk_buff *skb; if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */ printk(KERN_ERR "aoe: NULL TARGETS!\n"); return NULL; } t = d->tgt; t++; if (t >= &d->targets[NTARGETS] || !*t) t = d->targets; for (;;) { if ((*t)->nout < (*t)->maxout && t != d->htgt && (*t)->ifp->nd) { rf = NULL; f = (*t)->frames; e = f + (*t)->nframes; for (; f < e; f++) { if (f->tag != FREETAG) continue; skb = f->skb; if (!skb && !(f->skb = skb = new_skb(ETH_ZLEN))) continue; if (atomic_read(&skb_shinfo(skb)->dataref) != 1) { if (!rf) rf = f; continue; } gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0; skb_trim(skb, 0); d->tgt = t; ifrotate(*t); return f; } /* Work can be done, but the network layer is holding our precious packets. Try to grab one from the pool. */ f = rf; if (f == NULL) { /* more paranoia */ printk(KERN_ERR "aoe: freeframe: %s.\n", "unexpected null rf"); d->flags |= DEVFL_KICKME; return NULL; } skb = skb_pool_get(d); if (skb) { skb_pool_put(d, f->skb); f->skb = skb; goto gotone; } (*t)->dataref++; if ((*t)->nout == 0) d->flags |= DEVFL_KICKME; } if (t == d->tgt) /* we've looped and found nada */ break; t++; if (t >= &d->targets[NTARGETS] || !*t) t = d->targets; } return NULL; } static int aoecmd_ata_rw(struct aoedev *d) { struct frame *f; struct aoe_hdr *h; struct aoe_atahdr *ah; struct buf *buf; struct bio_vec *bv; struct aoetgt *t; struct sk_buff *skb; ulong bcnt; char writebit, extbit; writebit = 0x10; extbit = 0x4; f = freeframe(d); if (f == NULL) return 0; t = *d->tgt; buf = d->inprocess; bv = buf->bv; bcnt = t->ifp->maxbcnt; if (bcnt == 0) bcnt = DEFAULTBCNT; if (bcnt > buf->bv_resid) bcnt = buf->bv_resid; /* initialize the headers & frame */ skb = f->skb; h = (struct aoe_hdr *) skb_mac_header(skb); ah = (struct aoe_atahdr *) (h+1); skb_put(skb, sizeof *h + sizeof *ah); memset(h, 0, skb->len); f->tag = aoehdr_atainit(d, t, h); t->nout++; f->waited = 0; f->buf = buf; f->bufaddr = page_address(bv->bv_page) + buf->bv_off; f->bcnt = bcnt; f->lba = buf->sector; /* set up ata header */ ah->scnt = bcnt >> 9; put_lba(ah, buf->sector); if (d->flags & DEVFL_EXT) { ah->aflags |= AOEAFL_EXT; } else { extbit = 0; ah->lba3 &= 0x0f; ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ } if (bio_data_dir(buf->bio) == WRITE) { skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt); ah->aflags |= AOEAFL_WRITE; skb->len += bcnt; skb->data_len = bcnt; t->wpkts++; } else { t->rpkts++; writebit = 0; } ah->cmdstat = WIN_READ | writebit | extbit; /* mark all tracking fields and load out */ buf->nframesout += 1; buf->bv_off += bcnt; buf->bv_resid -= bcnt; buf->resid -= bcnt; buf->sector += bcnt >> 9; if (buf->resid == 0) { d->inprocess = NULL; } else if (buf->bv_resid == 0) { buf->bv = ++bv; buf->bv_resid = bv->bv_len; WARN_ON(buf->bv_resid == 0); buf->bv_off = bv->bv_offset; } skb->dev = t->ifp->nd; skb = skb_clone(skb, GFP_ATOMIC); if (skb) __skb_queue_tail(&d->sendq, skb); return 1; } /* some callers cannot sleep, and they can call this function, * transmitting the packets later, when interrupts are on */ static void aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue) { struct aoe_hdr *h; struct aoe_cfghdr *ch; struct sk_buff *skb; struct net_device *ifp; read_lock(&dev_base_lock); for_each_netdev(&init_net, ifp) { dev_hold(ifp); if (!is_aoe_netif(ifp)) goto cont; skb = new_skb(sizeof *h + sizeof *ch); if (skb == NULL) { printk(KERN_INFO "aoe: skb alloc failure\n"); goto cont; } skb_put(skb, sizeof *h + sizeof *ch); skb->dev = ifp; __skb_queue_tail(queue, skb); h = (struct aoe_hdr *) skb_mac_header(skb); memset(h, 0, sizeof *h + sizeof *ch); memset(h->dst, 0xff, sizeof h->dst); memcpy(h->src, ifp->dev_addr, sizeof h->src); h->type = __constant_cpu_to_be16(ETH_P_AOE); h->verfl = AOE_HVER; h->major = cpu_to_be16(aoemajor); h->minor = aoeminor; h->cmd = AOECMD_CFG; cont: dev_put(ifp); } read_unlock(&dev_base_lock); } static void resend(struct aoedev *d, struct aoetgt *t, struct frame *f) { struct sk_buff *skb; struct aoe_hdr *h; struct aoe_atahdr *ah; char buf[128]; u32 n; ifrotate(t); n = newtag(t); skb = f->skb; h = (struct aoe_hdr *) skb_mac_header(skb); ah = (struct aoe_atahdr *) (h+1); snprintf(buf, sizeof buf, "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n", "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n, h->src, h->dst, t->nout); aoechr_error(buf); f->tag = n; h->tag = cpu_to_be32(n); memcpy(h->dst, t->addr, sizeof h->dst); memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); switch (ah->cmdstat) { default: break; case WIN_READ: case WIN_READ_EXT: case WIN_WRITE: case WIN_WRITE_EXT: put_lba(ah, f->lba); n = f->bcnt; if (n > DEFAULTBCNT) n = DEFAULTBCNT; ah->scnt = n >> 9; if (ah->aflags & AOEAFL_WRITE) { skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), offset_in_page(f->bufaddr), n); skb->len = sizeof *h + sizeof *ah + n; skb->data_len = n; } } skb->dev = t->ifp->nd; skb = skb_clone(skb, GFP_ATOMIC); if (skb == NULL) return; __skb_queue_tail(&d->sendq, skb); } static int tsince(int tag) { int n; n = jiffies & 0xffff; n -= tag & 0xffff; if (n < 0) n += 1<<16; return n; } static struct aoeif * getif(struct aoetgt *t, struct net_device *nd) { struct aoeif *p, *e; p = t->ifs; e = p + NAOEIFS; for (; p < e; p++) if (p->nd == nd) return p; return NULL; } static struct aoeif * addif(struct aoetgt *t, struct net_device *nd) { struct aoeif *p; p = getif(t, NULL); if (!p) return NULL; p->nd = nd; p->maxbcnt = DEFAULTBCNT; p->lost = 0; p->lostjumbo = 0; return p; } static void ejectif(struct aoetgt *t, struct aoeif *ifp) { struct aoeif *e; ulong n; e = t->ifs + NAOEIFS - 1; n = (e - ifp) * sizeof *ifp; memmove(ifp, ifp+1, n); e->nd = NULL; } static int sthtith(struct aoedev *d) { struct frame *f, *e, *nf; struct sk_buff *skb; struct aoetgt *ht = *d->htgt; f = ht->frames; e = f + ht->nframes; for (; f < e; f++) { if (f->tag == FREETAG) continue; nf = freeframe(d); if (!nf) return 0; skb = nf->skb; *nf = *f; f->skb = skb; f->tag = FREETAG; nf->waited = 0; ht->nout--; (*d->tgt)->nout++; resend(d, *d->tgt, nf); } /* he's clean, he's useless. take away his interfaces */ memset(ht->ifs, 0, sizeof ht->ifs); d->htgt = NULL; return 1; } static inline unsigned char ata_scnt(unsigned char *packet) { struct aoe_hdr *h; struct aoe_atahdr *ah; h = (struct aoe_hdr *) packet; ah = (struct aoe_atahdr *) (h+1); return ah->scnt; } static void rexmit_timer(ulong vp) { struct sk_buff_head queue; struct aoedev *d; struct aoetgt *t, **tt, **te; struct aoeif *ifp; struct frame *f, *e; register long timeout; ulong flags, n; d = (struct aoedev *) vp; /* timeout is always ~150% of the moving average */ timeout = d->rttavg; timeout += timeout >> 1; spin_lock_irqsave(&d->lock, flags); if (d->flags & DEVFL_TKILL) { spin_unlock_irqrestore(&d->lock, flags); return; } tt = d->targets; te = tt + NTARGETS; for (; tt < te && *tt; tt++) { t = *tt; f = t->frames; e = f + t->nframes; for (; f < e; f++) { if (f->tag == FREETAG || tsince(f->tag) < timeout) continue; n = f->waited += timeout; n /= HZ; if (n > aoe_deadsecs) { /* waited too long. device failure. */ aoedev_downdev(d); break; } if (n > HELPWAIT /* see if another target can help */ && (tt != d->targets || d->targets[1])) d->htgt = tt; if (t->nout == t->maxout) { if (t->maxout > 1) t->maxout--; t->lastwadj = jiffies; } ifp = getif(t, f->skb->dev); if (ifp && ++ifp->lost > (t->nframes << 1) && (ifp != t->ifs || t->ifs[1].nd)) { ejectif(t, ifp); ifp = NULL; } if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512 && ifp && ++ifp->lostjumbo > (t->nframes << 1) && ifp->maxbcnt != DEFAULTBCNT) { printk(KERN_INFO "aoe: e%ld.%d: " "too many lost jumbo on " "%s:%pm - " "falling back to %d frames.\n", d->aoemajor, d->aoeminor, ifp->nd->name, t->addr, DEFAULTBCNT); ifp->maxbcnt = 0; } resend(d, t, f); } /* window check */ if (t->nout == t->maxout && t->maxout < t->nframes && (jiffies - t->lastwadj)/HZ > 10) { t->maxout++; t->lastwadj = jiffies; } } if (!skb_queue_empty(&d->sendq)) { n = d->rttavg <<= 1; if (n > MAXTIMER) d->rttavg = MAXTIMER; } if (d->flags & DEVFL_KICKME || d->htgt) { d->flags &= ~DEVFL_KICKME; aoecmd_work(d); } __skb_queue_head_init(&queue); skb_queue_splice_init(&d->sendq, &queue); d->timer.expires = jiffies + TIMERTICK; add_timer(&d->timer); spin_unlock_irqrestore(&d->lock, flags); aoenet_xmit(&queue); } /* enters with d->lock held */ void aoecmd_work(struct aoedev *d) { struct buf *buf; loop: if (d->htgt && !sthtith(d)) return; if (d->inprocess == NULL) { if (list_empty(&d->bufq)) return; buf = container_of(d->bufq.next, struct buf, bufs); list_del(d->bufq.next); d->inprocess = buf; } if (aoecmd_ata_rw(d)) goto loop; } /* this function performs work that has been deferred until sleeping is OK */ void aoecmd_sleepwork(struct work_struct *work) { struct aoedev *d = container_of(work, struct aoedev, work); if (d->flags & DEVFL_GDALLOC) aoeblk_gdalloc(d); if (d->flags & DEVFL_NEWSIZE) { struct block_device *bd; unsigned long flags; u64 ssize; ssize = get_capacity(d->gd); bd = bdget_disk(d->gd, 0); if (bd) { mutex_lock(&bd->bd_inode->i_mutex); i_size_write(bd->bd_inode, (loff_t)ssize<<9); mutex_unlock(&bd->bd_inode->i_mutex); bdput(bd); } spin_lock_irqsave(&d->lock, flags); d->flags |= DEVFL_UP; d->flags &= ~DEVFL_NEWSIZE; spin_unlock_irqrestore(&d->lock, flags); } } static void ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) { u64 ssize; u16 n; /* word 83: command set supported */ n = get_unaligned_le16(&id[83 << 1]); /* word 86: command set/feature enabled */ n |= get_unaligned_le16(&id[86 << 1]); if (n & (1<<10)) { /* bit 10: LBA 48 */ d->flags |= DEVFL_EXT; /* word 100: number lba48 sectors */ ssize = get_unaligned_le64(&id[100 << 1]); /* set as in ide-disk.c:init_idedisk_capacity */ d->geo.cylinders = ssize; d->geo.cylinders /= (255 * 63); d->geo.heads = 255; d->geo.sectors = 63; } else { d->flags &= ~DEVFL_EXT; /* number lba28 sectors */ ssize = get_unaligned_le32(&id[60 << 1]); /* NOTE: obsolete in ATA 6 */ d->geo.cylinders = get_unaligned_le16(&id[54 << 1]); d->geo.heads = get_unaligned_le16(&id[55 << 1]); d->geo.sectors = get_unaligned_le16(&id[56 << 1]); } if (d->ssize != ssize) printk(KERN_INFO "aoe: %pm e%ld.%d v%04x has %llu sectors\n", t->addr, d->aoemajor, d->aoeminor, d->fw_ver, (long long)ssize); d->ssize = ssize; d->geo.start = 0; if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) return; if (d->gd != NULL) { set_capacity(d->gd, ssize); d->flags |= DEVFL_NEWSIZE; } else d->flags |= DEVFL_GDALLOC; schedule_work(&d->work); } static void calc_rttavg(struct aoedev *d, int rtt) { register long n; n = rtt; if (n < 0) { n = -rtt; if (n < MINTIMER) n = MINTIMER; else if (n > MAXTIMER) n = MAXTIMER; d->mintimer += (n - d->mintimer) >> 1; } else if (n < d->mintimer) n = d->mintimer; else if (n > MAXTIMER) n = MAXTIMER; /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */ n -= d->rttavg; d->rttavg += n >> 2; } static struct aoetgt * gettgt(struct aoedev *d, char *addr) { struct aoetgt **t, **e; t = d->targets; e = t + NTARGETS; for (; t < e && *t; t++) if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0) return *t; return NULL; } static inline void diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector) { unsigned long n_sect = bio->bi_size >> 9; const int rw = bio_data_dir(bio); struct hd_struct *part; int cpu; cpu = part_stat_lock(); part = disk_map_sector_rcu(disk, sector); part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); part_stat_add(cpu, part, sectors[rw], n_sect); part_stat_add(cpu, part, io_ticks, duration); part_stat_unlock(); } void aoecmd_ata_rsp(struct sk_buff *skb) { struct sk_buff_head queue; struct aoedev *d; struct aoe_hdr *hin, *hout; struct aoe_atahdr *ahin, *ahout; struct frame *f; struct buf *buf; struct aoetgt *t; struct aoeif *ifp; register long n; ulong flags; char ebuf[128]; u16 aoemajor; hin = (struct aoe_hdr *) skb_mac_header(skb); aoemajor = get_unaligned_be16(&hin->major); d = aoedev_by_aoeaddr(aoemajor, hin->minor); if (d == NULL) { snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " "for unknown device %d.%d\n", aoemajor, hin->minor); aoechr_error(ebuf); return; } spin_lock_irqsave(&d->lock, flags); n = get_unaligned_be32(&hin->tag); t = gettgt(d, hin->src); if (t == NULL) { printk(KERN_INFO "aoe: can't find target e%ld.%d:%pm\n", d->aoemajor, d->aoeminor, hin->src); spin_unlock_irqrestore(&d->lock, flags); return; } f = getframe(t, n); if (f == NULL) { calc_rttavg(d, -tsince(n)); spin_unlock_irqrestore(&d->lock, flags); snprintf(ebuf, sizeof ebuf, "%15s e%d.%d tag=%08x@%08lx\n", "unexpected rsp", get_unaligned_be16(&hin->major), hin->minor, get_unaligned_be32(&hin->tag), jiffies); aoechr_error(ebuf); return; } calc_rttavg(d, tsince(f->tag)); ahin = (struct aoe_atahdr *) (hin+1); hout = (struct aoe_hdr *) skb_mac_header(f->skb); ahout = (struct aoe_atahdr *) (hout+1); buf = f->buf; if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ printk(KERN_ERR "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n", ahout->cmdstat, ahin->cmdstat, d->aoemajor, d->aoeminor); if (buf) buf->flags |= BUFFL_FAIL; } else { if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */ d->htgt = NULL; n = ahout->scnt << 9; switch (ahout->cmdstat) { case WIN_READ: case WIN_READ_EXT: if (skb->len - sizeof *hin - sizeof *ahin < n) { printk(KERN_ERR "aoe: %s. skb->len=%d need=%ld\n", "runt data size in read", skb->len, n); /* fail frame f? just returning will rexmit. */ spin_unlock_irqrestore(&d->lock, flags); return; } memcpy(f->bufaddr, ahin+1, n); case WIN_WRITE: case WIN_WRITE_EXT: ifp = getif(t, skb->dev); if (ifp) { ifp->lost = 0; if (n > DEFAULTBCNT) ifp->lostjumbo = 0; } if (f->bcnt -= n) { f->lba += n >> 9; f->bufaddr += n; resend(d, t, f); goto xmit; } break; case WIN_IDENTIFY: if (skb->len - sizeof *hin - sizeof *ahin < 512) { printk(KERN_INFO "aoe: runt data size in ataid. skb->len=%d\n", skb->len); spin_unlock_irqrestore(&d->lock, flags); return; } ataid_complete(d, t, (char *) (ahin+1)); break; default: printk(KERN_INFO "aoe: unrecognized ata command %2.2Xh for %d.%d\n", ahout->cmdstat, get_unaligned_be16(&hin->major), hin->minor); } } if (buf && --buf->nframesout == 0 && buf->resid == 0) { diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; bio_endio(buf->bio, n); mempool_free(buf, d->bufpool); } f->buf = NULL; f->tag = FREETAG; t->nout--; aoecmd_work(d); xmit: __skb_queue_head_init(&queue); skb_queue_splice_init(&d->sendq, &queue); spin_unlock_irqrestore(&d->lock, flags); aoenet_xmit(&queue); } void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) { struct sk_buff_head queue; __skb_queue_head_init(&queue); aoecmd_cfg_pkts(aoemajor, aoeminor, &queue); aoenet_xmit(&queue); } struct sk_buff * aoecmd_ata_id(struct aoedev *d) { struct aoe_hdr *h; struct aoe_atahdr *ah; struct frame *f; struct sk_buff *skb; struct aoetgt *t; f = freeframe(d); if (f == NULL) return NULL; t = *d->tgt; /* initialize the headers & frame */ skb = f->skb; h = (struct aoe_hdr *) skb_mac_header(skb); ah = (struct aoe_atahdr *) (h+1); skb_put(skb, sizeof *h + sizeof *ah); memset(h, 0, skb->len); f->tag = aoehdr_atainit(d, t, h); t->nout++; f->waited = 0; /* set up ata header */ ah->scnt = 1; ah->cmdstat = WIN_IDENTIFY; ah->lba3 = 0xa0; skb->dev = t->ifp->nd; d->rttavg = MAXTIMER; d->timer.function = rexmit_timer; return skb_clone(skb, GFP_ATOMIC); } static struct aoetgt * addtgt(struct aoedev *d, char *addr, ulong nframes) { struct aoetgt *t, **tt, **te; struct frame *f, *e; tt = d->targets; te = tt + NTARGETS; for (; tt < te && *tt; tt++) ; if (tt == te) { printk(KERN_INFO "aoe: device addtgt failure; too many targets\n"); return NULL; } t = kcalloc(1, sizeof *t, GFP_ATOMIC); f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); if (!t || !f) { kfree(f); kfree(t); printk(KERN_INFO "aoe: cannot allocate memory to add target\n"); return NULL; } t->nframes = nframes; t->frames = f; e = f + nframes; for (; f < e; f++) f->tag = FREETAG; memcpy(t->addr, addr, sizeof t->addr); t->ifp = t->ifs; t->maxout = t->nframes; return *tt = t; } void aoecmd_cfg_rsp(struct sk_buff *skb) { struct aoedev *d; struct aoe_hdr *h; struct aoe_cfghdr *ch; struct aoetgt *t; struct aoeif *ifp; ulong flags, sysminor, aoemajor; struct sk_buff *sl; u16 n; h = (struct aoe_hdr *) skb_mac_header(skb); ch = (struct aoe_cfghdr *) (h+1); /* * Enough people have their dip switches set backwards to * warrant a loud message for this special case. */ aoemajor = get_unaligned_be16(&h->major); if (aoemajor == 0xfff) { printk(KERN_ERR "aoe: Warning: shelf address is all ones. " "Check shelf dip switches.\n"); return; } sysminor = SYSMINOR(aoemajor, h->minor); if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) { printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n", aoemajor, (int) h->minor); return; } n = be16_to_cpu(ch->bufcnt); if (n > aoe_maxout) /* keep it reasonable */ n = aoe_maxout; d = aoedev_by_sysminor_m(sysminor); if (d == NULL) { printk(KERN_INFO "aoe: device sysminor_m failure\n"); return; } spin_lock_irqsave(&d->lock, flags); t = gettgt(d, h->src); if (!t) { t = addtgt(d, h->src, n); if (!t) { spin_unlock_irqrestore(&d->lock, flags); return; } } ifp = getif(t, skb->dev); if (!ifp) { ifp = addif(t, skb->dev); if (!ifp) { printk(KERN_INFO "aoe: device addif failure; " "too many interfaces?\n"); spin_unlock_irqrestore(&d->lock, flags); return; } } if (ifp->maxbcnt) { n = ifp->nd->mtu; n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr); n /= 512; if (n > ch->scnt) n = ch->scnt; n = n ? n * 512 : DEFAULTBCNT; if (n != ifp->maxbcnt) { printk(KERN_INFO "aoe: e%ld.%d: setting %d%s%s:%pm\n", d->aoemajor, d->aoeminor, n, " byte data frames on ", ifp->nd->name, t->addr); ifp->maxbcnt = n; } } /* don't change users' perspective */ if (d->nopen) { spin_unlock_irqrestore(&d->lock, flags); return; } d->fw_ver = be16_to_cpu(ch->fwver); sl = aoecmd_ata_id(d); spin_unlock_irqrestore(&d->lock, flags); if (sl) { struct sk_buff_head queue; __skb_queue_head_init(&queue); __skb_queue_tail(&queue, sl); aoenet_xmit(&queue); } } void aoecmd_cleanslate(struct aoedev *d) { struct aoetgt **t, **te; struct aoeif *p, *e; d->mintimer = MINTIMER; t = d->targets; te = t + NTARGETS; for (; t < te && *t; t++) { (*t)->maxout = (*t)->nframes; p = (*t)->ifs; e = p + NAOEIFS; for (; p < e; p++) { p->lostjumbo = 0; p->lost = 0; p->maxbcnt = DEFAULTBCNT; } } }
gpl-2.0
greendrm/linux-2.6.29-s5pc100
drivers/isdn/mISDN/l1oip_codec.c
141
11136
/* * l1oip_codec.c generic codec using lookup table * -> conversion from a-Law to u-Law * -> conversion from u-Law to a-Law * -> compression by reducing the number of sample resolution to 4 * * NOTE: It is not compatible with any standard codec like ADPCM. * * Author Andreas Eversberg (jolly@eversberg.eu) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* How the codec works: -------------------- The volume is increased to increase the dynamic range of the audio signal. Each sample is converted to a-LAW with only 16 steps of level resolution. A pair of two samples are stored in one byte. The first byte is stored in the upper bits, the second byte is stored in the lower bits. To speed up compression and decompression, two lookup tables are formed: - 16 bits index for two samples (law encoded) with 8 bit compressed result. - 8 bits index for one compressed data with 16 bits decompressed result. NOTE: The bytes are handled as they are law-encoded. */ #include <linux/vmalloc.h> #include <linux/mISDNif.h> #include "core.h" #include "l1oip.h" /* definitions of codec. don't use calculations, code may run slower. */ static u8 *table_com; static u16 *table_dec; /* alaw -> ulaw */ static u8 alaw_to_ulaw[256] = { 0xab, 0x2b, 0xe3, 0x63, 0x8b, 0x0b, 0xc9, 0x49, 0xba, 0x3a, 0xf6, 0x76, 0x9b, 0x1b, 0xd7, 0x57, 0xa3, 0x23, 0xdd, 0x5d, 0x83, 0x03, 0xc1, 0x41, 0xb2, 0x32, 0xeb, 0x6b, 0x93, 0x13, 0xcf, 0x4f, 0xaf, 0x2f, 0xe7, 0x67, 0x8f, 0x0f, 0xcd, 0x4d, 0xbe, 0x3e, 0xfe, 0x7e, 0x9f, 0x1f, 0xdb, 0x5b, 0xa7, 0x27, 0xdf, 0x5f, 0x87, 0x07, 0xc5, 0x45, 0xb6, 0x36, 0xef, 0x6f, 0x97, 0x17, 0xd3, 0x53, 0xa9, 0x29, 0xe1, 0x61, 0x89, 0x09, 0xc7, 0x47, 0xb8, 0x38, 0xf2, 0x72, 0x99, 0x19, 0xd5, 0x55, 0xa1, 0x21, 0xdc, 0x5c, 0x81, 0x01, 0xbf, 0x3f, 0xb0, 0x30, 0xe9, 0x69, 0x91, 0x11, 0xce, 0x4e, 0xad, 0x2d, 0xe5, 0x65, 0x8d, 0x0d, 0xcb, 0x4b, 0xbc, 0x3c, 0xfa, 0x7a, 0x9d, 0x1d, 0xd9, 0x59, 0xa5, 0x25, 0xde, 0x5e, 0x85, 0x05, 0xc3, 0x43, 0xb4, 0x34, 0xed, 0x6d, 0x95, 0x15, 0xd1, 0x51, 0xac, 0x2c, 0xe4, 0x64, 0x8c, 0x0c, 0xca, 0x4a, 0xbb, 0x3b, 0xf8, 0x78, 0x9c, 0x1c, 0xd8, 0x58, 0xa4, 0x24, 0xde, 0x5e, 0x84, 0x04, 0xc2, 0x42, 0xb3, 0x33, 0xec, 0x6c, 0x94, 0x14, 0xd0, 0x50, 0xb0, 0x30, 0xe8, 0x68, 0x90, 0x10, 0xce, 0x4e, 0xbf, 0x3f, 0xfe, 0x7e, 0xa0, 0x20, 0xdc, 0x5c, 0xa8, 0x28, 0xe0, 0x60, 0x88, 0x08, 0xc6, 0x46, 0xb7, 0x37, 0xf0, 0x70, 0x98, 0x18, 0xd4, 0x54, 0xaa, 0x2a, 0xe2, 0x62, 0x8a, 0x0a, 0xc8, 0x48, 0xb9, 0x39, 0xf4, 0x74, 0x9a, 0x1a, 0xd6, 0x56, 0xa2, 0x22, 0xdd, 0x5d, 0x82, 0x02, 0xc0, 0x40, 0xb1, 0x31, 0xea, 0x6a, 0x92, 0x12, 0xcf, 0x4f, 0xae, 0x2e, 0xe6, 0x66, 0x8e, 0x0e, 0xcc, 0x4c, 0xbd, 0x3d, 0xfc, 0x7c, 0x9e, 0x1e, 0xda, 0x5a, 0xa6, 0x26, 0xdf, 0x5f, 0x86, 0x06, 0xc4, 0x44, 0xb5, 0x35, 0xee, 0x6e, 0x96, 0x16, 0xd2, 0x52 }; /* ulaw -> alaw */ static u8 ulaw_to_alaw[256] = { 0xab, 0x55, 0xd5, 0x15, 0x95, 0x75, 0xf5, 0x35, 0xb5, 0x45, 0xc5, 0x05, 0x85, 0x65, 0xe5, 0x25, 0xa5, 0x5d, 0xdd, 0x1d, 0x9d, 0x7d, 0xfd, 0x3d, 0xbd, 0x4d, 0xcd, 0x0d, 0x8d, 0x6d, 0xed, 0x2d, 0xad, 0x51, 0xd1, 0x11, 0x91, 0x71, 0xf1, 0x31, 0xb1, 0x41, 0xc1, 0x01, 0x81, 0x61, 0xe1, 0x21, 0x59, 0xd9, 0x19, 0x99, 0x79, 0xf9, 0x39, 0xb9, 0x49, 0xc9, 0x09, 0x89, 0x69, 0xe9, 0x29, 0xa9, 0xd7, 0x17, 0x97, 0x77, 0xf7, 0x37, 0xb7, 0x47, 0xc7, 0x07, 0x87, 0x67, 0xe7, 0x27, 0xa7, 0xdf, 0x9f, 0x7f, 0xff, 0x3f, 0xbf, 0x4f, 0xcf, 0x0f, 0x8f, 0x6f, 0xef, 0x2f, 0x53, 0x13, 0x73, 0x33, 0xb3, 0x43, 0xc3, 0x03, 0x83, 0x63, 0xe3, 0x23, 0xa3, 0x5b, 0xdb, 0x1b, 0x9b, 0x7b, 0xfb, 0x3b, 0xbb, 0xbb, 0x4b, 0x4b, 0xcb, 0xcb, 0x0b, 0x0b, 0x8b, 0x8b, 0x6b, 0x6b, 0xeb, 0xeb, 0x2b, 0x2b, 0xab, 0x54, 0xd4, 0x14, 0x94, 0x74, 0xf4, 0x34, 0xb4, 0x44, 0xc4, 0x04, 0x84, 0x64, 0xe4, 0x24, 0xa4, 0x5c, 0xdc, 0x1c, 0x9c, 0x7c, 0xfc, 0x3c, 0xbc, 0x4c, 0xcc, 0x0c, 0x8c, 0x6c, 0xec, 0x2c, 0xac, 0x50, 0xd0, 0x10, 0x90, 0x70, 0xf0, 0x30, 0xb0, 0x40, 0xc0, 0x00, 0x80, 0x60, 0xe0, 0x20, 0x58, 0xd8, 0x18, 0x98, 0x78, 0xf8, 0x38, 0xb8, 0x48, 0xc8, 0x08, 0x88, 0x68, 0xe8, 0x28, 0xa8, 0xd6, 0x16, 0x96, 0x76, 0xf6, 0x36, 0xb6, 0x46, 0xc6, 0x06, 0x86, 0x66, 0xe6, 0x26, 0xa6, 0xde, 0x9e, 0x7e, 0xfe, 0x3e, 0xbe, 0x4e, 0xce, 0x0e, 0x8e, 0x6e, 0xee, 0x2e, 0x52, 0x12, 0x72, 0x32, 0xb2, 0x42, 0xc2, 0x02, 0x82, 0x62, 0xe2, 0x22, 0xa2, 0x5a, 0xda, 0x1a, 0x9a, 0x7a, 0xfa, 0x3a, 0xba, 0xba, 0x4a, 0x4a, 0xca, 0xca, 0x0a, 0x0a, 0x8a, 0x8a, 0x6a, 0x6a, 0xea, 0xea, 0x2a, 0x2a }; /* alaw -> 4bit compression */ static u8 alaw_to_4bit[256] = { 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0d, 0x02, 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x01, 0x0a, 0x05, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, }; /* 4bit -> alaw decompression */ static u8 _4bit_to_alaw[16] = { 0x5d, 0x51, 0xd9, 0xd7, 0x5f, 0x53, 0xa3, 0x4b, 0x2a, 0x3a, 0x22, 0x2e, 0x26, 0x56, 0x20, 0x2c, }; /* ulaw -> 4bit compression */ static u8 ulaw_to_4bit[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, }; /* 4bit -> ulaw decompression */ static u8 _4bit_to_ulaw[16] = { 0x11, 0x21, 0x31, 0x40, 0x4e, 0x5c, 0x68, 0x71, 0xfe, 0xef, 0xe7, 0xdb, 0xcd, 0xbf, 0xaf, 0x9f, }; /* * Compresses data to the result buffer * The result size must be at least half of the input buffer. * The number of samples also must be even! */ int l1oip_law_to_4bit(u8 *data, int len, u8 *result, u32 *state) { int ii, i = 0, o = 0; if (!len) return 0; /* send saved byte and first input byte */ if (*state) { *result++ = table_com[(((*state)<<8)&0xff00) | (*data++)]; len--; o++; } ii = len >> 1; while (i < ii) { *result++ = table_com[(data[0]<<8) | (data[1])]; data += 2; i++; o++; } /* if len has an odd number, we save byte for next call */ if (len & 1) *state = 0x100 + *data; else *state = 0; return o; } /* Decompress data to the result buffer * The result size must be the number of sample in packet. (2 * input data) * The number of samples in the result are even! */ int l1oip_4bit_to_law(u8 *data, int len, u8 *result) { int i = 0; u16 r; while (i < len) { r = table_dec[*data++]; *result++ = r>>8; *result++ = r; i++; } return len << 1; } /* * law conversion */ int l1oip_alaw_to_ulaw(u8 *data, int len, u8 *result) { int i = 0; while (i < len) { *result++ = alaw_to_ulaw[*data++]; i++; } return len; } int l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result) { int i = 0; while (i < len) { *result++ = ulaw_to_alaw[*data++]; i++; } return len; } /* * generate/free compression and decompression table */ void l1oip_4bit_free(void) { if (table_dec) vfree(table_dec); if (table_com) vfree(table_com); table_com = NULL; table_dec = NULL; } int l1oip_4bit_alloc(int ulaw) { int i1, i2, c, sample; /* in case, it is called again */ if (table_dec) return 0; /* alloc conversion tables */ table_com = vmalloc(65536); table_dec = vmalloc(512); if (!table_com | !table_dec) { l1oip_4bit_free(); return -ENOMEM; } memset(table_com, 0, 65536); memset(table_dec, 0, 512); /* generate compression table */ i1 = 0; while (i1 < 256) { if (ulaw) c = ulaw_to_4bit[i1]; else c = alaw_to_4bit[i1]; i2 = 0; while (i2 < 256) { table_com[(i1<<8) | i2] |= (c<<4); table_com[(i2<<8) | i1] |= c; i2++; } i1++; } /* generate decompression table */ i1 = 0; while (i1 < 16) { if (ulaw) sample = _4bit_to_ulaw[i1]; else sample = _4bit_to_alaw[i1]; i2 = 0; while (i2 < 16) { table_dec[(i1<<4) | i2] |= (sample<<8); table_dec[(i2<<4) | i1] |= sample; i2++; } i1++; } return 0; }
gpl-2.0
rictec/huawei_s7_kernel
drivers/misc/enclosure.c
141
14611
/* * Enclosure Services * * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> * **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or ** modify it under the terms of the GNU General Public License ** version 2 as published by the Free Software Foundation. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ #include <linux/device.h> #include <linux/enclosure.h> #include <linux/err.h> #include <linux/list.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> static LIST_HEAD(container_list); static DEFINE_MUTEX(container_list_lock); static struct class enclosure_class; /** * enclosure_find - find an enclosure given a device * @dev: the device to find for * * Looks through the list of registered enclosures to see * if it can find a match for a device. Returns NULL if no * enclosure is found. Obtains a reference to the enclosure class * device which must be released with device_put(). */ struct enclosure_device *enclosure_find(struct device *dev) { struct enclosure_device *edev; mutex_lock(&container_list_lock); list_for_each_entry(edev, &container_list, node) { if (edev->edev.parent == dev) { get_device(&edev->edev); mutex_unlock(&container_list_lock); return edev; } } mutex_unlock(&container_list_lock); return NULL; } EXPORT_SYMBOL_GPL(enclosure_find); /** * enclosure_for_each_device - calls a function for each enclosure * @fn: the function to call * @data: the data to pass to each call * * Loops over all the enclosures calling the function. * * Note, this function uses a mutex which will be held across calls to * @fn, so it must have non atomic context, and @fn may (although it * should not) sleep or otherwise cause the mutex to be held for * indefinite periods */ int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), void *data) { int error = 0; struct enclosure_device *edev; mutex_lock(&container_list_lock); list_for_each_entry(edev, &container_list, node) { error = fn(edev, data); if (error) break; } mutex_unlock(&container_list_lock); return error; } EXPORT_SYMBOL_GPL(enclosure_for_each_device); /** * enclosure_register - register device as an enclosure * * @dev: device containing the enclosure * @components: number of components in the enclosure * * This sets up the device for being an enclosure. Note that @dev does * not have to be a dedicated enclosure device. It may be some other type * of device that additionally responds to enclosure services */ struct enclosure_device * enclosure_register(struct device *dev, const char *name, int components, struct enclosure_component_callbacks *cb) { struct enclosure_device *edev = kzalloc(sizeof(struct enclosure_device) + sizeof(struct enclosure_component)*components, GFP_KERNEL); int err, i; BUG_ON(!cb); if (!edev) return ERR_PTR(-ENOMEM); edev->components = components; edev->edev.class = &enclosure_class; edev->edev.parent = get_device(dev); edev->cb = cb; dev_set_name(&edev->edev, name); err = device_register(&edev->edev); if (err) goto err; for (i = 0; i < components; i++) edev->component[i].number = -1; mutex_lock(&container_list_lock); list_add_tail(&edev->node, &container_list); mutex_unlock(&container_list_lock); return edev; err: put_device(edev->edev.parent); kfree(edev); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(enclosure_register); static struct enclosure_component_callbacks enclosure_null_callbacks; /** * enclosure_unregister - remove an enclosure * * @edev: the registered enclosure to remove; */ void enclosure_unregister(struct enclosure_device *edev) { int i; mutex_lock(&container_list_lock); list_del(&edev->node); mutex_unlock(&container_list_lock); for (i = 0; i < edev->components; i++) if (edev->component[i].number != -1) device_unregister(&edev->component[i].cdev); /* prevent any callbacks into service user */ edev->cb = &enclosure_null_callbacks; device_unregister(&edev->edev); } EXPORT_SYMBOL_GPL(enclosure_unregister); #define ENCLOSURE_NAME_SIZE 64 static void enclosure_link_name(struct enclosure_component *cdev, char *name) { strcpy(name, "enclosure_device:"); strcat(name, dev_name(&cdev->cdev)); } static void enclosure_remove_links(struct enclosure_component *cdev) { char name[ENCLOSURE_NAME_SIZE]; enclosure_link_name(cdev, name); sysfs_remove_link(&cdev->dev->kobj, name); sysfs_remove_link(&cdev->cdev.kobj, "device"); } static int enclosure_add_links(struct enclosure_component *cdev) { int error; char name[ENCLOSURE_NAME_SIZE]; error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device"); if (error) return error; enclosure_link_name(cdev, name); error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name); if (error) sysfs_remove_link(&cdev->cdev.kobj, "device"); return error; } static void enclosure_release(struct device *cdev) { struct enclosure_device *edev = to_enclosure_device(cdev); put_device(cdev->parent); kfree(edev); } static void enclosure_component_release(struct device *dev) { struct enclosure_component *cdev = to_enclosure_component(dev); if (cdev->dev) { enclosure_remove_links(cdev); put_device(cdev->dev); } put_device(dev->parent); } static struct attribute_group *enclosure_groups[]; /** * enclosure_component_register - add a particular component to an enclosure * @edev: the enclosure to add the component * @num: the device number * @type: the type of component being added * @name: an optional name to appear in sysfs (leave NULL if none) * * Registers the component. The name is optional for enclosures that * give their components a unique name. If not, leave the field NULL * and a name will be assigned. * * Returns a pointer to the enclosure component or an error. */ struct enclosure_component * enclosure_component_register(struct enclosure_device *edev, unsigned int number, enum enclosure_component_type type, const char *name) { struct enclosure_component *ecomp; struct device *cdev; int err; if (number >= edev->components) return ERR_PTR(-EINVAL); ecomp = &edev->component[number]; if (ecomp->number != -1) return ERR_PTR(-EINVAL); ecomp->type = type; ecomp->number = number; cdev = &ecomp->cdev; cdev->parent = get_device(&edev->edev); if (name) dev_set_name(cdev, name); else dev_set_name(cdev, "%u", number); cdev->release = enclosure_component_release; cdev->groups = enclosure_groups; err = device_register(cdev); if (err) ERR_PTR(err); return ecomp; } EXPORT_SYMBOL_GPL(enclosure_component_register); /** * enclosure_add_device - add a device as being part of an enclosure * @edev: the enclosure device being added to. * @num: the number of the component * @dev: the device being added * * Declares a real device to reside in slot (or identifier) @num of an * enclosure. This will cause the relevant sysfs links to appear. * This function may also be used to change a device associated with * an enclosure without having to call enclosure_remove_device() in * between. * * Returns zero on success or an error. */ int enclosure_add_device(struct enclosure_device *edev, int component, struct device *dev) { struct enclosure_component *cdev; if (!edev || component >= edev->components) return -EINVAL; cdev = &edev->component[component]; if (cdev->dev) enclosure_remove_links(cdev); put_device(cdev->dev); cdev->dev = get_device(dev); return enclosure_add_links(cdev); } EXPORT_SYMBOL_GPL(enclosure_add_device); /** * enclosure_remove_device - remove a device from an enclosure * @edev: the enclosure device * @num: the number of the component to remove * * Returns zero on success or an error. * */ int enclosure_remove_device(struct enclosure_device *edev, int component) { struct enclosure_component *cdev; if (!edev || component >= edev->components) return -EINVAL; cdev = &edev->component[component]; device_del(&cdev->cdev); put_device(cdev->dev); cdev->dev = NULL; return device_add(&cdev->cdev); } EXPORT_SYMBOL_GPL(enclosure_remove_device); /* * sysfs pieces below */ static ssize_t enclosure_show_components(struct device *cdev, struct device_attribute *attr, char *buf) { struct enclosure_device *edev = to_enclosure_device(cdev); return snprintf(buf, 40, "%d\n", edev->components); } static struct device_attribute enclosure_attrs[] = { __ATTR(components, S_IRUGO, enclosure_show_components, NULL), __ATTR_NULL }; static struct class enclosure_class = { .name = "enclosure", .owner = THIS_MODULE, .dev_release = enclosure_release, .dev_attrs = enclosure_attrs, }; static const char *const enclosure_status [] = { [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported", [ENCLOSURE_STATUS_OK] = "OK", [ENCLOSURE_STATUS_CRITICAL] = "critical", [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical", [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable", [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", [ENCLOSURE_STATUS_UNKNOWN] = "unknown", [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", }; static const char *const enclosure_type [] = { [ENCLOSURE_COMPONENT_DEVICE] = "device", [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device", }; static ssize_t get_component_fault(struct device *cdev, struct device_attribute *attr, char *buf) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); if (edev->cb->get_fault) edev->cb->get_fault(edev, ecomp); return snprintf(buf, 40, "%d\n", ecomp->fault); } static ssize_t set_component_fault(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); int val = simple_strtoul(buf, NULL, 0); if (edev->cb->set_fault) edev->cb->set_fault(edev, ecomp, val); return count; } static ssize_t get_component_status(struct device *cdev, struct device_attribute *attr,char *buf) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); if (edev->cb->get_status) edev->cb->get_status(edev, ecomp); return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]); } static ssize_t set_component_status(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); int i; for (i = 0; enclosure_status[i]; i++) { if (strncmp(buf, enclosure_status[i], strlen(enclosure_status[i])) == 0 && (buf[strlen(enclosure_status[i])] == '\n' || buf[strlen(enclosure_status[i])] == '\0')) break; } if (enclosure_status[i] && edev->cb->set_status) { edev->cb->set_status(edev, ecomp, i); return count; } else return -EINVAL; } static ssize_t get_component_active(struct device *cdev, struct device_attribute *attr, char *buf) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); if (edev->cb->get_active) edev->cb->get_active(edev, ecomp); return snprintf(buf, 40, "%d\n", ecomp->active); } static ssize_t set_component_active(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); int val = simple_strtoul(buf, NULL, 0); if (edev->cb->set_active) edev->cb->set_active(edev, ecomp, val); return count; } static ssize_t get_component_locate(struct device *cdev, struct device_attribute *attr, char *buf) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); if (edev->cb->get_locate) edev->cb->get_locate(edev, ecomp); return snprintf(buf, 40, "%d\n", ecomp->locate); } static ssize_t set_component_locate(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct enclosure_device *edev = to_enclosure_device(cdev->parent); struct enclosure_component *ecomp = to_enclosure_component(cdev); int val = simple_strtoul(buf, NULL, 0); if (edev->cb->set_locate) edev->cb->set_locate(edev, ecomp, val); return count; } static ssize_t get_component_type(struct device *cdev, struct device_attribute *attr, char *buf) { struct enclosure_component *ecomp = to_enclosure_component(cdev); return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]); } static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, set_component_fault); static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status, set_component_status); static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active, set_component_active); static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, set_component_locate); static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL); static struct attribute *enclosure_component_attrs[] = { &dev_attr_fault.attr, &dev_attr_status.attr, &dev_attr_active.attr, &dev_attr_locate.attr, &dev_attr_type.attr, NULL }; static struct attribute_group enclosure_group = { .attrs = enclosure_component_attrs, }; static struct attribute_group *enclosure_groups[] = { &enclosure_group, NULL }; static int __init enclosure_init(void) { int err; err = class_register(&enclosure_class); if (err) return err; return 0; } static void __exit enclosure_exit(void) { class_unregister(&enclosure_class); } module_init(enclosure_init); module_exit(enclosure_exit); MODULE_AUTHOR("James Bottomley"); MODULE_DESCRIPTION("Enclosure Services"); MODULE_LICENSE("GPL v2");
gpl-2.0
130265/Galaxy-S4-Value-Edition-I9515L-Kernel
drivers/gpu/msm/adreno_a3xx.c
397
117429
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/sched.h> #include <mach/socinfo.h> #include "kgsl.h" #include "adreno.h" #include "kgsl_sharedmem.h" #include "kgsl_cffdump.h" #include "a3xx_reg.h" #include "adreno_a3xx_trace.h" /* * Set of registers to dump for A3XX on postmortem and snapshot. * Registers in pairs - first value is the start offset, second * is the stop offset (inclusive) */ const unsigned int a3xx_registers[] = { 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027, 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c, 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5, 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1, 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd, 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff, 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f, 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f, 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e, 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f, 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7, 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e41, 0x0e45, 0x0e64, 0x0e65, 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7, 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09, 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069, 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075, 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109, 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115, 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0, 0x2240, 0x227e, 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8, 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7, 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356, 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749, 0x2750, 0x2756, 0x2760, 0x2760, 0x300C, 0x300E, 0x301C, 0x301D, 0x302A, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, 0x3034, 0x3036, 0x303C, 0x303C, 0x305E, 0x305F, }; const unsigned int a3xx_registers_count = ARRAY_SIZE(a3xx_registers) / 2; /* Removed the following HLSQ register ranges from being read during * fault tolerance since reading the registers may cause the device to hang: */ const unsigned int a3xx_hlsq_registers[] = { 0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a, }; const unsigned int a3xx_hlsq_registers_count = ARRAY_SIZE(a3xx_hlsq_registers) / 2; /* The set of additional registers to be dumped for A330 */ const unsigned int a330_registers[] = { 0x1d0, 0x1d0, 0x1d4, 0x1d4, 0x453, 0x453, }; const unsigned int a330_registers_count = ARRAY_SIZE(a330_registers) / 2; /* Simple macro to facilitate bit setting in the gmem2sys and sys2gmem * functions. */ #define _SET(_shift, _val) ((_val) << (_shift)) /* **************************************************************************** * * Context state shadow structure: * * +---------------------+------------+-------------+---------------------+---+ * | ALU Constant Shadow | Reg Shadow | C&V Buffers | Shader Instr Shadow |Tex| * +---------------------+------------+-------------+---------------------+---+ * * 8K - ALU Constant Shadow (8K aligned) * 4K - H/W Register Shadow (8K aligned) * 5K - Command and Vertex Buffers * 8K - Shader Instruction Shadow * ~6K - Texture Constant Shadow * * *************************************************************************** */ /* Sizes of all sections in state shadow memory */ #define ALU_SHADOW_SIZE (8*1024) /* 8KB */ #define REG_SHADOW_SIZE (4*1024) /* 4KB */ #define CMD_BUFFER_SIZE (5*1024) /* 5KB */ #define TEX_SIZE_MEM_OBJECTS 896 /* bytes */ #define TEX_SIZE_MIPMAP 1936 /* bytes */ #define TEX_SIZE_SAMPLER_OBJ 256 /* bytes */ #define TEX_SHADOW_SIZE \ ((TEX_SIZE_MEM_OBJECTS + TEX_SIZE_MIPMAP + \ TEX_SIZE_SAMPLER_OBJ)*2) /* ~6KB */ #define SHADER_SHADOW_SIZE (8*1024) /* 8KB */ /* Total context size, excluding GMEM shadow */ #define CONTEXT_SIZE \ (ALU_SHADOW_SIZE+REG_SHADOW_SIZE + \ CMD_BUFFER_SIZE+SHADER_SHADOW_SIZE + \ TEX_SHADOW_SIZE) /* Offsets to different sections in context shadow memory */ #define REG_OFFSET ALU_SHADOW_SIZE #define CMD_OFFSET (REG_OFFSET+REG_SHADOW_SIZE) #define SHADER_OFFSET (CMD_OFFSET+CMD_BUFFER_SIZE) #define TEX_OFFSET (SHADER_OFFSET+SHADER_SHADOW_SIZE) #define VS_TEX_OFFSET_MEM_OBJECTS TEX_OFFSET #define VS_TEX_OFFSET_MIPMAP (VS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS) #define VS_TEX_OFFSET_SAMPLER_OBJ (VS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP) #define FS_TEX_OFFSET_MEM_OBJECTS \ (VS_TEX_OFFSET_SAMPLER_OBJ+TEX_SIZE_SAMPLER_OBJ) #define FS_TEX_OFFSET_MIPMAP (FS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS) #define FS_TEX_OFFSET_SAMPLER_OBJ (FS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP) /* The offset for fragment shader data in HLSQ context */ #define SSIZE (16*1024) #define HLSQ_SAMPLER_OFFSET 0x000 #define HLSQ_MEMOBJ_OFFSET 0x400 #define HLSQ_MIPMAP_OFFSET 0x800 /* Use shadow RAM */ #define HLSQ_SHADOW_BASE (0x10000+SSIZE*2) #define REG_TO_MEM_LOOP_COUNT_SHIFT 18 #define BUILD_PC_DRAW_INITIATOR(prim_type, source_select, index_size, \ vis_cull_mode) \ (((prim_type) << PC_DRAW_INITIATOR_PRIM_TYPE) | \ ((source_select) << PC_DRAW_INITIATOR_SOURCE_SELECT) | \ ((index_size & 1) << PC_DRAW_INITIATOR_INDEX_SIZE) | \ ((index_size >> 1) << PC_DRAW_INITIATOR_SMALL_INDEX) | \ ((vis_cull_mode) << PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE) | \ (1 << PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE)) /* * List of context registers (starting from dword offset 0x2000). * Each line contains start and end of a range of registers. */ static const unsigned int context_register_ranges[] = { A3XX_GRAS_CL_CLIP_CNTL, A3XX_GRAS_CL_CLIP_CNTL, A3XX_GRAS_CL_GB_CLIP_ADJ, A3XX_GRAS_CL_GB_CLIP_ADJ, A3XX_GRAS_CL_VPORT_XOFFSET, A3XX_GRAS_CL_VPORT_ZSCALE, A3XX_GRAS_SU_POINT_MINMAX, A3XX_GRAS_SU_POINT_SIZE, A3XX_GRAS_SU_POLY_OFFSET_SCALE, A3XX_GRAS_SU_POLY_OFFSET_OFFSET, A3XX_GRAS_SU_MODE_CONTROL, A3XX_GRAS_SU_MODE_CONTROL, A3XX_GRAS_SC_CONTROL, A3XX_GRAS_SC_CONTROL, A3XX_GRAS_SC_SCREEN_SCISSOR_TL, A3XX_GRAS_SC_SCREEN_SCISSOR_BR, A3XX_GRAS_SC_WINDOW_SCISSOR_TL, A3XX_GRAS_SC_WINDOW_SCISSOR_BR, A3XX_RB_MODE_CONTROL, A3XX_RB_MRT_BLEND_CONTROL3, A3XX_RB_BLEND_RED, A3XX_RB_COPY_DEST_INFO, A3XX_RB_DEPTH_CONTROL, A3XX_RB_DEPTH_CONTROL, A3XX_PC_VSTREAM_CONTROL, A3XX_PC_VSTREAM_CONTROL, A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, A3XX_PC_PRIM_VTX_CNTL, A3XX_PC_RESTART_INDEX, A3XX_HLSQ_CONTROL_0_REG, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG, A3XX_HLSQ_CL_NDRANGE_0_REG, A3XX_HLSQ_CL_NDRANGE_0_REG, A3XX_HLSQ_CL_NDRANGE_2_REG, A3XX_HLSQ_CL_CONTROL_1_REG, A3XX_HLSQ_CL_KERNEL_CONST_REG, A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG, A3XX_HLSQ_CL_WG_OFFSET_REG, A3XX_HLSQ_CL_WG_OFFSET_REG, A3XX_VFD_CONTROL_0, A3XX_VFD_VS_THREADING_THRESHOLD, A3XX_SP_SP_CTRL_REG, A3XX_SP_SP_CTRL_REG, A3XX_SP_VS_CTRL_REG0, A3XX_SP_VS_OUT_REG_7, A3XX_SP_VS_VPC_DST_REG_0, A3XX_SP_VS_PVT_MEM_SIZE_REG, A3XX_SP_VS_LENGTH_REG, A3XX_SP_FS_PVT_MEM_SIZE_REG, A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, A3XX_SP_FS_FLAT_SHAD_MODE_REG_1, A3XX_SP_FS_OUTPUT_REG, A3XX_SP_FS_OUTPUT_REG, A3XX_SP_FS_MRT_REG_0, A3XX_SP_FS_IMAGE_OUTPUT_REG_3, A3XX_SP_FS_LENGTH_REG, A3XX_SP_FS_LENGTH_REG, A3XX_TPL1_TP_VS_TEX_OFFSET, A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR, A3XX_VPC_ATTR, A3XX_VPC_VARY_CYLWRAP_ENABLE_1, }; /* Global registers that need to be saved separately */ static const unsigned int global_registers[] = { A3XX_GRAS_CL_USER_PLANE_X0, A3XX_GRAS_CL_USER_PLANE_Y0, A3XX_GRAS_CL_USER_PLANE_Z0, A3XX_GRAS_CL_USER_PLANE_W0, A3XX_GRAS_CL_USER_PLANE_X1, A3XX_GRAS_CL_USER_PLANE_Y1, A3XX_GRAS_CL_USER_PLANE_Z1, A3XX_GRAS_CL_USER_PLANE_W1, A3XX_GRAS_CL_USER_PLANE_X2, A3XX_GRAS_CL_USER_PLANE_Y2, A3XX_GRAS_CL_USER_PLANE_Z2, A3XX_GRAS_CL_USER_PLANE_W2, A3XX_GRAS_CL_USER_PLANE_X3, A3XX_GRAS_CL_USER_PLANE_Y3, A3XX_GRAS_CL_USER_PLANE_Z3, A3XX_GRAS_CL_USER_PLANE_W3, A3XX_GRAS_CL_USER_PLANE_X4, A3XX_GRAS_CL_USER_PLANE_Y4, A3XX_GRAS_CL_USER_PLANE_Z4, A3XX_GRAS_CL_USER_PLANE_W4, A3XX_GRAS_CL_USER_PLANE_X5, A3XX_GRAS_CL_USER_PLANE_Y5, A3XX_GRAS_CL_USER_PLANE_Z5, A3XX_GRAS_CL_USER_PLANE_W5, A3XX_VSC_BIN_SIZE, A3XX_VSC_PIPE_CONFIG_0, A3XX_VSC_PIPE_CONFIG_1, A3XX_VSC_PIPE_CONFIG_2, A3XX_VSC_PIPE_CONFIG_3, A3XX_VSC_PIPE_CONFIG_4, A3XX_VSC_PIPE_CONFIG_5, A3XX_VSC_PIPE_CONFIG_6, A3XX_VSC_PIPE_CONFIG_7, A3XX_VSC_PIPE_DATA_ADDRESS_0, A3XX_VSC_PIPE_DATA_ADDRESS_1, A3XX_VSC_PIPE_DATA_ADDRESS_2, A3XX_VSC_PIPE_DATA_ADDRESS_3, A3XX_VSC_PIPE_DATA_ADDRESS_4, A3XX_VSC_PIPE_DATA_ADDRESS_5, A3XX_VSC_PIPE_DATA_ADDRESS_6, A3XX_VSC_PIPE_DATA_ADDRESS_7, A3XX_VSC_PIPE_DATA_LENGTH_0, A3XX_VSC_PIPE_DATA_LENGTH_1, A3XX_VSC_PIPE_DATA_LENGTH_2, A3XX_VSC_PIPE_DATA_LENGTH_3, A3XX_VSC_PIPE_DATA_LENGTH_4, A3XX_VSC_PIPE_DATA_LENGTH_5, A3XX_VSC_PIPE_DATA_LENGTH_6, A3XX_VSC_PIPE_DATA_LENGTH_7, A3XX_VSC_SIZE_ADDRESS }; #define GLOBAL_REGISTER_COUNT ARRAY_SIZE(global_registers) /* A scratchpad used to build commands during context create */ static struct tmp_ctx { unsigned int *cmd; /* Next available dword in C&V buffer */ /* Addresses in comamnd buffer where registers are saved */ uint32_t reg_values[GLOBAL_REGISTER_COUNT]; uint32_t gmem_base; /* Base GPU address of GMEM */ } tmp_ctx; #ifndef GSL_CONTEXT_SWITCH_CPU_SYNC /* * Function for executing dest = ( (reg & and) ROL rol ) | or */ static unsigned int *rmw_regtomem(unsigned int *cmd, unsigned int reg, unsigned int and, unsigned int rol, unsigned int or, unsigned int dest) { /* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | reg */ *cmd++ = cp_type3_packet(CP_REG_RMW, 3); *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2; *cmd++ = 0x00000000; /* AND value */ *cmd++ = reg; /* OR address */ /* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & and) ROL rol ) | or */ *cmd++ = cp_type3_packet(CP_REG_RMW, 3); *cmd++ = (rol << 24) | A3XX_CP_SCRATCH_REG2; *cmd++ = and; /* AND value */ *cmd++ = or; /* OR value */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_CP_SCRATCH_REG2; *cmd++ = dest; return cmd; } #endif static void build_regconstantsave_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *cmd = tmp_ctx.cmd; unsigned int *start; unsigned int i; drawctxt->constant_save_commands[0].hostptr = cmd; drawctxt->constant_save_commands[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); cmd++; start = cmd; *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; #ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES /* * Context registers are already shadowed; just need to * disable shadowing to prevent corruption. */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = 4 << 16; /* regs, start=0 */ *cmd++ = 0x0; /* count = 0 */ #else /* * Make sure the HW context has the correct register values before * reading them. */ /* Write context registers into shadow */ for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) { unsigned int start = context_register_ranges[i * 2]; unsigned int end = context_register_ranges[i * 2 + 1]; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = ((end - start + 1) << REG_TO_MEM_LOOP_COUNT_SHIFT) | start; *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) + (start - 0x2000) * 4; } #endif /* Need to handle some of the global registers separately */ for (i = 0; i < ARRAY_SIZE(global_registers); i++) { *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = global_registers[i]; *cmd++ = tmp_ctx.reg_values[i]; } /* Save vertex shader constants */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2; *cmd++ = 0x0000FFFF; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); drawctxt->constant_save_commands[1].hostptr = cmd; drawctxt->constant_save_commands[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: dwords = SP_VS_CTRL_REG1.VSCONSTLENGTH / 4 src = (HLSQ_SHADOW_BASE + 0x2000) / 4 From register spec: SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits. */ *cmd++ = 0; /* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */ /* ALU constant shadow base */ *cmd++ = drawctxt->gpustate.gpuaddr & 0xfffffffc; /* Save fragment shader constants */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2; *cmd++ = 0x0000FFFF; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); drawctxt->constant_save_commands[2].hostptr = cmd; drawctxt->constant_save_commands[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: dwords = SP_FS_CTRL_REG1.FSCONSTLENGTH / 4 src = (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4 From register spec: SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits. */ *cmd++ = 0; /* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */ /* From fixup: base = drawctxt->gpustate.gpuaddr (ALU constant shadow base) offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET From register spec: SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object start offset in on chip RAM, 128bit aligned dst = base + offset Because of the base alignment we can use dst = base | offset */ *cmd++ = 0; /* dst */ /* Save VS texture memory objects */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) | ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET) / 4); *cmd++ = (drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc; /* Save VS texture mipmap pointers */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) | ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET) / 4); *cmd++ = (drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP) & 0xfffffffc; /* Save VS texture sampler objects */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) | ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET) / 4); *cmd++ = (drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc; /* Save FS texture memory objects */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) | ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET + SSIZE) / 4); *cmd++ = (drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc; /* Save FS texture mipmap pointers */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) | ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET + SSIZE) / 4); *cmd++ = (drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP) & 0xfffffffc; /* Save FS texture sampler objects */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) | ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET + SSIZE) / 4); *cmd++ = (drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc; /* Create indirect buffer command for above command sequence */ create_ib1(drawctxt, drawctxt->regconstant_save, start, cmd); tmp_ctx.cmd = cmd; } unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device *adreno_dev) { if (adreno_is_a305(adreno_dev)) return A305_RBBM_CLOCK_CTL_DEFAULT; else if (adreno_is_a320(adreno_dev)) return A320_RBBM_CLOCK_CTL_DEFAULT; else if (adreno_is_a330v2(adreno_dev)) return A330v2_RBBM_CLOCK_CTL_DEFAULT; else if (adreno_is_a330(adreno_dev)) return A330_RBBM_CLOCK_CTL_DEFAULT; BUG_ON(1); } /* Copy GMEM contents to system memory shadow. */ static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, struct gmem_shadow_t *shadow) { unsigned int *cmds = tmp_ctx.cmd; unsigned int *start = cmds; *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1); *cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL); /* RB_MODE_CONTROL */ *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RESOLVE_PASS) | _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1) | _SET(RB_MODECONTROL_PACKER_TIMER_ENABLE, 1); /* RB_RENDER_CONTROL */ *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) | _SET(RB_RENDERCONTROL_DISABLE_COLOR_PIPE, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_RB_COPY_CONTROL); /* RB_COPY_CONTROL */ *cmds++ = _SET(RB_COPYCONTROL_RESOLVE_CLEAR_MODE, RB_CLEAR_MODE_RESOLVE) | _SET(RB_COPYCONTROL_COPY_GMEM_BASE, tmp_ctx.gmem_base >> 14); /* RB_COPY_DEST_BASE */ *cmds++ = _SET(RB_COPYDESTBASE_COPY_DEST_BASE, shadow->gmemshadow.gpuaddr >> 5); /* RB_COPY_DEST_PITCH */ *cmds++ = _SET(RB_COPYDESTPITCH_COPY_DEST_PITCH, (shadow->pitch * 4) / 32); /* RB_COPY_DEST_INFO */ *cmds++ = _SET(RB_COPYDESTINFO_COPY_DEST_TILE, RB_TILINGMODE_LINEAR) | _SET(RB_COPYDESTINFO_COPY_DEST_FORMAT, RB_R8G8B8A8_UNORM) | _SET(RB_COPYDESTINFO_COPY_COMPONENT_ENABLE, 0X0F) | _SET(RB_COPYDESTINFO_COPY_DEST_ENDIAN, RB_ENDIAN_NONE); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL); /* GRAS_SC_CONTROL */ *cmds++ = _SET(GRAS_SC_CONTROL_RENDER_MODE, 2); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_VFD_CONTROL_0); /* VFD_CONTROL_0 */ *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 4) | _SET(VFD_CTRLREG0_PACKETSIZE, 2) | _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 1) | _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 1); /* VFD_CONTROL_1 */ *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 1) | _SET(VFD_CTRLREG1_REGID4VTX, 252) | _SET(VFD_CTRLREG1_REGID4INST, 252); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0); /* VFD_FETCH_INSTR_0_0 */ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) | _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) | _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1); /* VFD_FETCH_INSTR_1_0 */ *cmds++ = _SET(VFD_BASEADDR_BASEADDR, shadow->quad_vertices.gpuaddr); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0); /* VFD_DECODE_INSTR_0 */ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) | _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) | _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) | _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) | _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG); /* HLSQ_CONTROL_0_REG */ *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) | _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) | _SET(HLSQ_CTRL0REG_RESERVED2, 1) | _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1); /* HLSQ_CONTROL_1_REG */ *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) | _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1); /* HLSQ_CONTROL_2_REG */ *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31); /* HLSQ_CONTROL_3_REG */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG); /* HLSQ_VS_CONTROL_REG */ *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1); /* HLSQ_FS_CONTROL_REG */ *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) | _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) | _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 1); /* HLSQ_CONST_VSPRESV_RANGE_REG */ *cmds++ = 0x00000000; /* HLSQ_CONST_FSPRESV_RANGE_REQ */ *cmds++ = _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY, 32) | _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY, 32); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG); /* SP_FS_LENGTH_REG */ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG); /* SP_SP_CTRL_REG */ *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) | _SET(SP_SPCTRLREG_LOMODE, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12); *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0); /* SP_VS_CTRL_REG0 */ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) | _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) | _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) | _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) | _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) | _SET(SP_VSCTRLREG0_VSLENGTH, 1); /* SP_VS_CTRL_REG1 */ *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 4); /* SP_VS_PARAM_REG */ *cmds++ = _SET(SP_VSPARAMREG_PSIZEREGID, 252); /* SP_VS_OUT_REG_0 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG_1 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG_2 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG_3 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG_4 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG_5 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG_6 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG_7 */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7); *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0); /* SP_VS_VPC_DST_REG_0 */ *cmds++ = 0x00000000; /* SP_VS_VPC_DST_REG_1 */ *cmds++ = 0x00000000; /* SP_VS_VPC_DST_REG_2 */ *cmds++ = 0x00000000; /* SP_VS_VPC_DST_REG_3 */ *cmds++ = 0x00000000; /* SP_VS_OBJ_OFFSET_REG */ *cmds++ = 0x00000000; /* SP_VS_OBJ_START_REG */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6); *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG); /* SP_VS_LENGTH_REG */ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1); /* SP_FS_CTRL_REG0 */ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) | _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) | _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) | _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) | _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) | _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) | _SET(SP_FSCTRLREG0_FSLENGTH, 1); /* SP_FS_CTRL_REG1 */ *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) | _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63); /* SP_FS_OBJ_OFFSET_REG */ *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) | _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 127); /* SP_FS_OBJ_START_REG */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0); /* SP_FS_FLAT_SHAD_MODE_REG_0 */ *cmds++ = 0x00000000; /* SP_FS_FLAT_SHAD_MODE_REG_1 */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG); /* SP_FS_OUTPUT_REG */ *cmds++ = _SET(SP_IMAGEOUTPUTREG_DEPTHOUTMODE, SP_PIXEL_BASED); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0); /* SP_FS_MRT_REG_0 */ *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1); /* SP_FS_MRT_REG_1 */ *cmds++ = 0x00000000; /* SP_FS_MRT_REG_2 */ *cmds++ = 0x00000000; /* SP_FS_MRT_REG_3 */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11); *cmds++ = CP_REG(A3XX_VPC_ATTR); /* VPC_ATTR */ *cmds++ = _SET(VPC_VPCATTR_THRHDASSIGN, 1) | _SET(VPC_VPCATTR_LMSIZE, 1); /* VPC_PACK */ *cmds++ = 0x00000000; /* VPC_VARRYING_INTERUPT_MODE_0 */ *cmds++ = 0x00000000; /* VPC_VARRYING_INTERUPT_MODE_1 */ *cmds++ = 0x00000000; /* VPC_VARRYING_INTERUPT_MODE_2 */ *cmds++ = 0x00000000; /* VPC_VARRYING_INTERUPT_MODE_3 */ *cmds++ = 0x00000000; /* VPC_VARYING_PS_REPL_MODE_0 */ *cmds++ = 0x00000000; /* VPC_VARYING_PS_REPL_MODE_1 */ *cmds++ = 0x00000000; /* VPC_VARYING_PS_REPL_MODE_2 */ *cmds++ = 0x00000000; /* VPC_VARYING_PS_REPL_MODE_3 */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10); *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT) | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT) | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT) | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT); *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT) | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT); /* (sy)(rpt3)mov.f32f32 r0.y, (r)r1.y; */ *cmds++ = 0x00000000; *cmds++ = 0x13001000; /* end; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10); *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT) | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT) | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT) | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT); *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT) | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT); /* (sy)(rpt3)mov.f32f32 r0.y, (r)c0.x; */ *cmds++ = 0x00000000; *cmds++ = 0x30201b00; /* end; */ *cmds++ = 0x00000000; *cmds++ = 0x03000000; /* nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL); /* RB_MSAA_CONTROL */ *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) | _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL); /* RB_DEPTH_CONTROL */ *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_NEVER); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL); /* RB_STENCIL_CONTROL */ *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_NEVER) | _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_NEVER) | _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL); /* GRAS_SU_MODE_CONTROL */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0); /* RB_MRT_CONTROL0 */ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) | _SET(RB_MRTCONTROL_ROP_CODE, 12) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0); /* RB_MRT_BLEND_CONTROL0 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); /* RB_MRT_CONTROL1 */ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1); /* RB_MRT_BLEND_CONTROL1 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); /* RB_MRT_CONTROL2 */ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2); /* RB_MRT_BLEND_CONTROL2 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); /* RB_MRT_CONTROL3 */ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3); /* RB_MRT_BLEND_CONTROL3 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN); /* VFD_INDEX_MIN */ *cmds++ = 0x00000000; /* VFD_INDEX_MAX */ *cmds++ = 0x155; /* VFD_INSTANCEID_OFFSET */ *cmds++ = 0x00000000; /* VFD_INDEX_OFFSET */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD); /* VFD_VS_THREADING_THRESHOLD */ *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) | _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET); /* TPL1_TP_VS_TEX_OFFSET */ *cmds++ = 0; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET); /* TPL1_TP_FS_TEX_OFFSET */ *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) | _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) | _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL); /* PC_PRIM_VTX_CNTL */ *cmds++ = _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE, PC_DRAW_TRIANGLES) | _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE, PC_DRAW_TRIANGLES) | _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL); /* GRAS_SC_WINDOW_SCISSOR_TL */ *cmds++ = 0x00000000; /* GRAS_SC_WINDOW_SCISSOR_BR */ *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) | _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL); /* GRAS_SC_SCREEN_SCISSOR_TL */ *cmds++ = 0x00000000; /* GRAS_SC_SCREEN_SCISSOR_BR */ *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) | _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET); /* GRAS_CL_VPORT_XOFFSET */ *cmds++ = 0x00000000; /* GRAS_CL_VPORT_XSCALE */ *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3f800000); /* GRAS_CL_VPORT_YOFFSET */ *cmds++ = 0x00000000; /* GRAS_CL_VPORT_YSCALE */ *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3f800000); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET); /* GRAS_CL_VPORT_ZOFFSET */ *cmds++ = 0x00000000; /* GRAS_CL_VPORT_ZSCALE */ *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3f800000); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL); /* GRAS_CL_CLIP_CNTL */ *cmds++ = _SET(GRAS_CL_CLIP_CNTL_CLIP_DISABLE, 1) | _SET(GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE, 1) | _SET(GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE, 1) | _SET(GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE, 1) | _SET(GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_GRAS_CL_GB_CLIP_ADJ); /* GRAS_CL_GB_CLIP_ADJ */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmds++ = 0x00000000; /* oxili_generate_context_roll_packets */ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1); *cmds++ = 0x00000400; *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1); *cmds++ = 0x00000400; *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00008000; /* SP_VS_MEM_SIZE_REG */ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00008000; /* SP_FS_MEM_SIZE_REG */ /* Clear cache invalidate bit when re-loading the shader control regs */ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1); *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) | _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) | _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) | _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) | _SET(SP_VSCTRLREG0_VSLENGTH, 1); *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1); *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) | _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) | _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) | _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) | _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) | _SET(SP_FSCTRLREG0_FSLENGTH, 1); *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00000000; /* SP_VS_MEM_SIZE_REG */ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00000000; /* SP_FS_MEM_SIZE_REG */ /* end oxili_generate_context_roll_packets */ /* * Resolve using two draw calls with a dummy register * write in between. This is a HLM workaround * that should be removed later. */ *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6); *cmds++ = 0x00000000; /* Viz query info */ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST, PC_DI_SRC_SEL_IMMEDIATE, PC_DI_INDEX_SIZE_32_BIT, PC_DI_IGNORE_VISIBILITY); *cmds++ = 0x00000003; /* Num indices */ *cmds++ = 0x00000000; /* Index 0 */ *cmds++ = 0x00000001; /* Index 1 */ *cmds++ = 0x00000002; /* Index 2 */ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6); *cmds++ = 0x00000000; /* Viz query info */ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST, PC_DI_SRC_SEL_IMMEDIATE, PC_DI_INDEX_SIZE_32_BIT, PC_DI_IGNORE_VISIBILITY); *cmds++ = 0x00000003; /* Num indices */ *cmds++ = 0x00000002; /* Index 0 */ *cmds++ = 0x00000001; /* Index 1 */ *cmds++ = 0x00000003; /* Index 2 */ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmds++ = 0x00000000; /* Create indirect buffer command for above command sequence */ create_ib1(drawctxt, shadow->gmem_save, start, cmds); return cmds; } static void build_shader_save_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *cmd = tmp_ctx.cmd; unsigned int *start; /* Reserve space for boolean values used for COND_EXEC packet */ drawctxt->cond_execs[0].hostptr = cmd; drawctxt->cond_execs[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0; drawctxt->cond_execs[1].hostptr = cmd; drawctxt->cond_execs[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0; drawctxt->shader_save_commands[0].hostptr = cmd; drawctxt->shader_save_commands[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0; drawctxt->shader_save_commands[1].hostptr = cmd; drawctxt->shader_save_commands[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0; start = cmd; /* Save vertex shader */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2; *cmd++ = 0x0000FFFF; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); drawctxt->shader_save_commands[2].hostptr = cmd; drawctxt->shader_save_commands[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: dwords = SP_VS_CTRL_REG0.VS_LENGTH * 8 From regspec: SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits. If bit31 is 1, it means overflow or any long shader. src = (HLSQ_SHADOW_BASE + 0x1000)/4 */ *cmd++ = 0; /*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc; /* Save fragment shader */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2; *cmd++ = 0x0000FFFF; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); drawctxt->shader_save_commands[3].hostptr = cmd; drawctxt->shader_save_commands[3].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: dwords = SP_FS_CTRL_REG0.FS_LENGTH * 8 From regspec: SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits. If bit31 is 1, it means overflow or any long shader. fs_offset = SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC * 32 From regspec: SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC [31:25]: First instruction of the whole shader will be stored from the offset in instruction cache, unit = 256bits, a cache line. It can start from 0 if no VS available. src = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE + fs_offset)/4 */ *cmd++ = 0; /*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc; /* Create indirect buffer command for above command sequence */ create_ib1(drawctxt, drawctxt->shader_save, start, cmd); tmp_ctx.cmd = cmd; } /* * Make an IB to modify context save IBs with the correct shader instruction * and constant sizes and offsets. */ static void build_save_fixup_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *cmd = tmp_ctx.cmd; unsigned int *start = cmd; /* Flush HLSQ lazy updates */ *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1); *cmd++ = 0x7; /* HLSQ_FLUSH */ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2); *cmd++ = 0x00000000; /* No start addr for full invalidate */ *cmd++ = (unsigned int) UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION | UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE | 0; /* No end addr for full invalidate */ /* Make sure registers are flushed */ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1); *cmd++ = 0; #ifdef GSL_CONTEXT_SWITCH_CPU_SYNC /* Save shader sizes */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_VS_CTRL_REG0; *cmd++ = drawctxt->shader_save_commands[2].gpuaddr; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_CTRL_REG0; *cmd++ = drawctxt->shader_save_commands[3].gpuaddr; /* Save shader offsets */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG; *cmd++ = drawctxt->shader_save_commands[1].gpuaddr; /* Save constant sizes */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_VS_CTRL_REG1; *cmd++ = drawctxt->constant_save_commands[1].gpuaddr; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_CTRL_REG1; *cmd++ = drawctxt->constant_save_commands[2].gpuaddr; /* Save FS constant offset */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG; *cmd++ = drawctxt->constant_save_commands[0].gpuaddr; /* Save VS instruction store mode */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_VS_CTRL_REG0; *cmd++ = drawctxt->cond_execs[0].gpuaddr; /* Save FS instruction store mode */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_CTRL_REG0; *cmd++ = drawctxt->cond_execs[1].gpuaddr; #else /* Shader save */ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000, 11+REG_TO_MEM_LOOP_COUNT_SHIFT, (HLSQ_SHADOW_BASE + 0x1000) / 4, drawctxt->shader_save_commands[2].gpuaddr); /* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | SP_FS_CTRL_REG0 */ *cmd++ = cp_type3_packet(CP_REG_RMW, 3); *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2; *cmd++ = 0x00000000; /* AND value */ *cmd++ = A3XX_SP_FS_CTRL_REG0; /* OR address */ /* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & 0x7f000000) >> 21 ) | ((HLSQ_SHADOW_BASE+0x1000+SSIZE)/4) */ *cmd++ = cp_type3_packet(CP_REG_RMW, 3); *cmd++ = ((11 + REG_TO_MEM_LOOP_COUNT_SHIFT) << 24) | A3XX_CP_SCRATCH_REG2; *cmd++ = 0x7f000000; /* AND value */ *cmd++ = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE) / 4; /* OR value */ /* * CP_SCRATCH_REG3 = (CP_SCRATCH_REG3 & 0x00000000) | * SP_FS_OBJ_OFFSET_REG */ *cmd++ = cp_type3_packet(CP_REG_RMW, 3); *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG3; *cmd++ = 0x00000000; /* AND value */ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG; /* OR address */ /* * CP_SCRATCH_REG3 = ( (CP_SCRATCH_REG3 & 0xfe000000) >> 25 ) | * 0x00000000 */ *cmd++ = cp_type3_packet(CP_REG_RMW, 3); *cmd++ = A3XX_CP_SCRATCH_REG3; *cmd++ = 0xfe000000; /* AND value */ *cmd++ = 0x00000000; /* OR value */ /* * CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0xffffffff) | CP_SCRATCH_REG3 */ *cmd++ = cp_type3_packet(CP_REG_RMW, 3); *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2; *cmd++ = 0xffffffff; /* AND value */ *cmd++ = A3XX_CP_SCRATCH_REG3; /* OR address */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_CP_SCRATCH_REG2; *cmd++ = drawctxt->shader_save_commands[3].gpuaddr; /* Constant save */ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff, 2 + REG_TO_MEM_LOOP_COUNT_SHIFT, (HLSQ_SHADOW_BASE + 0x2000) / 4, drawctxt->constant_save_commands[1].gpuaddr); cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff, 2 + REG_TO_MEM_LOOP_COUNT_SHIFT, (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4, drawctxt->constant_save_commands[2].gpuaddr); cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000, 18, drawctxt->gpustate.gpuaddr & 0xfffffe00, drawctxt->constant_save_commands[2].gpuaddr + sizeof(unsigned int)); /* Modify constant save conditionals */ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff, 0, 0, drawctxt->cond_execs[2].gpuaddr); cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff, 0, 0, drawctxt->cond_execs[3].gpuaddr); /* Save VS instruction store mode */ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x00000002, 31, 0, drawctxt->cond_execs[0].gpuaddr); /* Save FS instruction store mode */ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x00000002, 31, 0, drawctxt->cond_execs[1].gpuaddr); #endif create_ib1(drawctxt, drawctxt->save_fixup, start, cmd); tmp_ctx.cmd = cmd; } /****************************************************************************/ /* Functions to build context restore IBs */ /****************************************************************************/ static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, struct gmem_shadow_t *shadow) { unsigned int *cmds = tmp_ctx.cmd; unsigned int *start = cmds; *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1); *cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG); /* HLSQ_CONTROL_0_REG */ *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) | _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) | _SET(HLSQ_CTRL0REG_SPSHADERRESTART, 1) | _SET(HLSQ_CTRL0REG_CHUNKDISABLE, 1) | _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1); /* HLSQ_CONTROL_1_REG */ *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) | _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1); /* HLSQ_CONTROL_2_REG */ *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31); /* HLSQ_CONTROL3_REG */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MRT_BUF_INFO0); /* RB_MRT_BUF_INFO0 */ *cmds++ = _SET(RB_MRTBUFINFO_COLOR_FORMAT, RB_R8G8B8A8_UNORM) | _SET(RB_MRTBUFINFO_COLOR_TILE_MODE, RB_TILINGMODE_32X32) | _SET(RB_MRTBUFINFO_COLOR_BUF_PITCH, (shadow->gmem_pitch * 4 * 8) / 256); /* RB_MRT_BUF_BASE0 */ *cmds++ = _SET(RB_MRTBUFBASE_COLOR_BUF_BASE, tmp_ctx.gmem_base >> 5); /* Texture samplers */ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4); *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT) | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT) | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT) | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT); *cmds++ = (HLSQ_TP_TEX_SAMPLERS << CP_LOADSTATE_STATETYPE_SHIFT) | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT); *cmds++ = 0x00000240; *cmds++ = 0x00000000; *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1); *cmds++ = 0x00000000; /* Texture memobjs */ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 6); *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT) | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT) | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT) | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT); *cmds++ = (HLSQ_TP_TEX_MEMOBJ << CP_LOADSTATE_STATETYPE_SHIFT) | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT); *cmds++ = 0x4cc06880; *cmds++ = shadow->height | (shadow->width << 14); *cmds++ = (shadow->pitch*4*8) << 9; *cmds++ = 0x00000000; *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1); *cmds++ = 0x00000000; /* Mipmap bases */ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 16); *cmds++ = (224 << CP_LOADSTATE_DSTOFFSET_SHIFT) | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT) | (HLSQ_BLOCK_ID_TP_MIPMAP << CP_LOADSTATE_STATEBLOCKID_SHIFT) | (14 << CP_LOADSTATE_NUMOFUNITS_SHIFT); *cmds++ = (HLSQ_TP_MIPMAP_BASE << CP_LOADSTATE_STATETYPE_SHIFT) | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT); *cmds++ = shadow->gmemshadow.gpuaddr; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG); /* HLSQ_VS_CONTROL_REG */ *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1); /* HLSQ_FS_CONTROL_REG */ *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) | _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) | _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 2); /* HLSQ_CONST_VSPRESV_RANGE_REG */ *cmds++ = 0x00000000; /* HLSQ_CONST_FSPRESV_RANGE_REG */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG); /* SP_FS_LENGTH_REG */ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 2); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12); *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0); /* SP_VS_CTRL_REG0 */ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) | _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) | _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) | _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) | _SET(SP_VSCTRLREG0_VSLENGTH, 1); /* SP_VS_CTRL_REG1 */ *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 8); /* SP_VS_PARAM_REG */ *cmds++ = _SET(SP_VSPARAMREG_POSREGID, 4) | _SET(SP_VSPARAMREG_PSIZEREGID, 252) | _SET(SP_VSPARAMREG_TOTALVSOUTVAR, 1); /* SP_VS_OUT_REG0 */ *cmds++ = _SET(SP_VSOUTREG_COMPMASK0, 3); /* SP_VS_OUT_REG1 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG2 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG3 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG4 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG5 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG6 */ *cmds++ = 0x00000000; /* SP_VS_OUT_REG7 */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7); *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0); /* SP_VS_VPC_DST_REG0 */ *cmds++ = _SET(SP_VSVPCDSTREG_OUTLOC0, 8); /* SP_VS_VPC_DST_REG1 */ *cmds++ = 0x00000000; /* SP_VS_VPC_DST_REG2 */ *cmds++ = 0x00000000; /* SP_VS_VPC_DST_REG3 */ *cmds++ = 0x00000000; /* SP_VS_OBJ_OFFSET_REG */ *cmds++ = 0x00000000; /* SP_VS_OBJ_START_REG */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6); *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG); /* SP_VS_LENGTH_REG */ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1); /* SP_FS_CTRL_REG0 */ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) | _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) | _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) | _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) | _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) | _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) | _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) | _SET(SP_FSCTRLREG0_PIXLODENABLE, 1) | _SET(SP_FSCTRLREG0_FSLENGTH, 2); /* SP_FS_CTRL_REG1 */ *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) | _SET(SP_FSCTRLREG1_FSINITIALOUTSTANDING, 2) | _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63); /* SP_FS_OBJ_OFFSET_REG */ *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) | _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 126); /* SP_FS_OBJ_START_REG */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0); /* SP_FS_FLAT_SHAD_MODE_REG0 */ *cmds++ = 0x00000000; /* SP_FS_FLAT_SHAD_MODE_REG1 */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG); /* SP_FS_OUT_REG */ *cmds++ = _SET(SP_FSOUTREG_PAD0, SP_PIXEL_BASED); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0); /* SP_FS_MRT_REG0 */ *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1); /* SP_FS_MRT_REG1 */ *cmds++ = 0; /* SP_FS_MRT_REG2 */ *cmds++ = 0; /* SP_FS_MRT_REG3 */ *cmds++ = 0; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11); *cmds++ = CP_REG(A3XX_VPC_ATTR); /* VPC_ATTR */ *cmds++ = _SET(VPC_VPCATTR_TOTALATTR, 2) | _SET(VPC_VPCATTR_THRHDASSIGN, 1) | _SET(VPC_VPCATTR_LMSIZE, 1); /* VPC_PACK */ *cmds++ = _SET(VPC_VPCPACK_NUMFPNONPOSVAR, 2) | _SET(VPC_VPCPACK_NUMNONPOSVSVAR, 2); /* VPC_VARYING_INTERP_MODE_0 */ *cmds++ = 0x00000000; /* VPC_VARYING_INTERP_MODE1 */ *cmds++ = 0x00000000; /* VPC_VARYING_INTERP_MODE2 */ *cmds++ = 0x00000000; /* VPC_VARYING_IINTERP_MODE3 */ *cmds++ = 0x00000000; /* VPC_VARRYING_PS_REPL_MODE_0 */ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2); /* VPC_VARRYING_PS_REPL_MODE_1 */ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2); /* VPC_VARRYING_PS_REPL_MODE_2 */ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2); /* VPC_VARRYING_PS_REPL_MODE_3 */ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) | _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG); /* SP_SP_CTRL_REG */ *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) | _SET(SP_SPCTRLREG_LOMODE, 1); /* Load vertex shader */ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10); *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT) | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT) | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT) | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT); *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT) | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT); /* (sy)end; */ *cmds++ = 0x00000000; *cmds++ = 0x13001000; /* nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmds++ = 0x00000000; /* Load fragment shader */ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 18); *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT) | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT) | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT) | (2 << CP_LOADSTATE_NUMOFUNITS_SHIFT); *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT) | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT); /* (sy)(rpt1)bary.f (ei)r0.z, (r)0, r0.x; */ *cmds++ = 0x00002000; *cmds++ = 0x57309902; /* (rpt5)nop; */ *cmds++ = 0x00000000; *cmds++ = 0x00000500; /* sam (f32)r0.xyzw, r0.z, s#0, t#0; */ *cmds++ = 0x00000005; *cmds++ = 0xa0c01f00; /* (sy)mov.f32f32 r1.x, r0.x; */ *cmds++ = 0x00000000; *cmds++ = 0x30040b00; /* mov.f32f32 r1.y, r0.y; */ *cmds++ = 0x00000000; *cmds++ = 0x03000000; /* mov.f32f32 r1.z, r0.z; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* mov.f32f32 r1.w, r0.w; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; /* end; */ *cmds++ = 0x00000000; *cmds++ = 0x00000000; *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_VFD_CONTROL_0); /* VFD_CONTROL_0 */ *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 8) | _SET(VFD_CTRLREG0_PACKETSIZE, 2) | _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 2) | _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 2); /* VFD_CONTROL_1 */ *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 2) | _SET(VFD_CTRLREG1_REGID4VTX, 252) | _SET(VFD_CTRLREG1_REGID4INST, 252); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0); /* VFD_FETCH_INSTR_0_0 */ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 7) | _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 8) | _SET(VFD_FETCHINSTRUCTIONS_SWITCHNEXT, 1) | _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1); /* VFD_FETCH_INSTR_1_0 */ *cmds++ = _SET(VFD_BASEADDR_BASEADDR, shadow->quad_vertices_restore.gpuaddr); /* VFD_FETCH_INSTR_0_1 */ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) | _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) | _SET(VFD_FETCHINSTRUCTIONS_INDEXDECODE, 1) | _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1); /* VFD_FETCH_INSTR_1_1 */ *cmds++ = _SET(VFD_BASEADDR_BASEADDR, shadow->quad_vertices_restore.gpuaddr + 16); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0); /* VFD_DECODE_INSTR_0 */ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) | _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) | _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 1) | _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 8) | _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1) | _SET(VFD_DECODEINSTRUCTIONS_SWITCHNEXT, 1); /* VFD_DECODE_INSTR_1 */ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) | _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) | _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) | _SET(VFD_DECODEINSTRUCTIONS_REGID, 4) | _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) | _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL); /* RB_DEPTH_CONTROL */ *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_LESS); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL); /* RB_STENCIL_CONTROL */ *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_ALWAYS) | _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_ALWAYS) | _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) | _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL); /* RB_MODE_CONTROL */ *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RENDERING_PASS) | _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_RENDER_CONTROL); /* RB_RENDER_CONTROL */ *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) | _SET(RB_RENDERCONTROL_ALPHA_TEST_FUNC, 7); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL); /* RB_MSAA_CONTROL */ *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) | _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0); /* RB_MRT_CONTROL0 */ *cmds++ = _SET(RB_MRTCONTROL_ROP_CODE, 12) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0); /* RB_MRT_BLENDCONTROL0 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); /* RB_MRT_CONTROL1 */ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) | _SET(RB_MRTCONTROL_ROP_CODE, 12) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1); /* RB_MRT_BLENDCONTROL1 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); /* RB_MRT_CONTROL2 */ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) | _SET(RB_MRTCONTROL_ROP_CODE, 12) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2); /* RB_MRT_BLENDCONTROL2 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); /* RB_MRT_CONTROL3 */ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) | _SET(RB_MRTCONTROL_ROP_CODE, 12) | _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) | _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3); /* RB_MRT_BLENDCONTROL3 */ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) | _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) | _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) | _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN); /* VFD_INDEX_MIN */ *cmds++ = 0x00000000; /* VFD_INDEX_MAX */ *cmds++ = 340; /* VFD_INDEX_OFFSET */ *cmds++ = 0x00000000; /* TPL1_TP_VS_TEX_OFFSET */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD); /* VFD_VS_THREADING_THRESHOLD */ *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) | _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET); /* TPL1_TP_VS_TEX_OFFSET */ *cmds++ = 0x00000000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET); /* TPL1_TP_FS_TEX_OFFSET */ *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) | _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) | _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL); /* GRAS_SC_CONTROL */ /*cmds++ = _SET(GRAS_SC_CONTROL_RASTER_MODE, 1); *cmds++ = _SET(GRAS_SC_CONTROL_RASTER_MODE, 1) |*/ *cmds++ = 0x04001000; *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL); /* GRAS_SU_MODE_CONTROL */ *cmds++ = _SET(GRAS_SU_CTRLMODE_LINEHALFWIDTH, 2); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL); /* GRAS_SC_WINDOW_SCISSOR_TL */ *cmds++ = 0x00000000; /* GRAS_SC_WINDOW_SCISSOR_BR */ *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) | _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL); /* GRAS_SC_SCREEN_SCISSOR_TL */ *cmds++ = 0x00000000; /* GRAS_SC_SCREEN_SCISSOR_BR */ *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) | _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5); *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET); /* GRAS_CL_VPORT_XOFFSET */ *cmds++ = 0x00000000; /* GRAS_CL_VPORT_XSCALE */ *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3F800000); /* GRAS_CL_VPORT_YOFFSET */ *cmds++ = 0x00000000; /* GRAS_CL_VPORT_YSCALE */ *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3F800000); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET); /* GRAS_CL_VPORT_ZOFFSET */ *cmds++ = 0x00000000; /* GRAS_CL_VPORT_ZSCALE */ *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3F800000); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL); /* GRAS_CL_CLIP_CNTL */ *cmds++ = _SET(GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER, 1); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_SP_FS_IMAGE_OUTPUT_REG_0); /* SP_FS_IMAGE_OUTPUT_REG_0 */ *cmds++ = _SET(SP_IMAGEOUTPUTREG_MRTFORMAT, SP_R8G8B8A8_UNORM); *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL); /* PC_PRIM_VTX_CONTROL */ *cmds++ = _SET(PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC, 2) | _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE, PC_DRAW_TRIANGLES) | _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE, PC_DRAW_TRIANGLES) | _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1); /* oxili_generate_context_roll_packets */ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1); *cmds++ = 0x00000400; *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1); *cmds++ = 0x00000400; *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00008000; /* SP_VS_MEM_SIZE_REG */ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00008000; /* SP_FS_MEM_SIZE_REG */ /* Clear cache invalidate bit when re-loading the shader control regs */ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1); *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) | _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) | _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) | _SET(SP_VSCTRLREG0_VSLENGTH, 1); *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1); *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) | _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) | _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) | _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) | _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) | _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) | _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) | _SET(SP_FSCTRLREG0_FSLENGTH, 2); *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00000000; /* SP_VS_MEM_SIZE_REG */ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1); *cmds++ = 0x00000000; /* SP_FS_MEM_SIZE_REG */ /* end oxili_generate_context_roll_packets */ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3); *cmds++ = 0x00000000; /* Viz query info */ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_RECTLIST, PC_DI_SRC_SEL_AUTO_INDEX, PC_DI_INDEX_SIZE_16_BIT, PC_DI_IGNORE_VISIBILITY); *cmds++ = 0x00000002; /* Num indices */ /* Create indirect buffer command for above command sequence */ create_ib1(drawctxt, shadow->gmem_restore, start, cmds); return cmds; } static void build_regrestore_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *start = tmp_ctx.cmd; unsigned int *cmd = start; unsigned int *lcc_start; int i; /* Flush HLSQ lazy updates */ *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1); *cmd++ = 0x7; /* HLSQ_FLUSH */ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); *cmd++ = 0; *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2); *cmd++ = 0x00000000; /* No start addr for full invalidate */ *cmd++ = (unsigned int) UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION | UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE | 0; /* No end addr for full invalidate */ lcc_start = cmd; /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */ cmd++; #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES /* Force mismatch */ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1; #else *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; #endif for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) { cmd = reg_range(cmd, context_register_ranges[i * 2], context_register_ranges[i * 2 + 1]); } lcc_start[0] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - lcc_start) - 1); #ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES lcc_start[2] |= (0 << 24) | (4 << 16); /* Disable shadowing. */ #else lcc_start[2] |= (1 << 24) | (4 << 16); #endif for (i = 0; i < ARRAY_SIZE(global_registers); i++) { *cmd++ = cp_type0_packet(global_registers[i], 1); tmp_ctx.reg_values[i] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; } create_ib1(drawctxt, drawctxt->reg_restore, start, cmd); tmp_ctx.cmd = cmd; } static void build_constantrestore_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *cmd = tmp_ctx.cmd; unsigned int *start = cmd; unsigned int mode = 4; /* Indirect mode */ unsigned int stateblock; unsigned int numunits; unsigned int statetype; drawctxt->cond_execs[2].hostptr = cmd; drawctxt->cond_execs[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0; drawctxt->cond_execs[3].hostptr = cmd; drawctxt->cond_execs[3].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0; #ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = 4 << 16; *cmd++ = 0x0; #endif /* HLSQ full update */ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG); *cmd++ = 0x68000240; /* A3XX_HLSQ_CONTROL_0_REG */ #ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES /* Re-enable shadowing */ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; *cmd++ = (4 << 16) | (1 << 24); *cmd++ = 0x0; #endif /* Load vertex shader constants */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2; *cmd++ = 0x0000ffff; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); drawctxt->constant_load_commands[0].hostptr = cmd; drawctxt->constant_load_commands[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: mode = 4 (indirect) stateblock = 4 (Vertex constants) numunits = SP_VS_CTRL_REG1.VSCONSTLENGTH * 2; (256bit units) From register spec: SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits. ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16); */ *cmd++ = 0; /* ord1 */ *cmd++ = ((drawctxt->gpustate.gpuaddr) & 0xfffffffc) | 1; /* Load fragment shader constants */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2; *cmd++ = 0x0000ffff; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); drawctxt->constant_load_commands[1].hostptr = cmd; drawctxt->constant_load_commands[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: mode = 4 (indirect) stateblock = 6 (Fragment constants) numunits = SP_FS_CTRL_REG1.FSCONSTLENGTH * 2; (256bit units) From register spec: SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits. ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16); */ *cmd++ = 0; /* ord1 */ drawctxt->constant_load_commands[2].hostptr = cmd; drawctxt->constant_load_commands[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: base = drawctxt->gpustate.gpuaddr (ALU constant shadow base) offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET From register spec: SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object start offset in on chip RAM, 128bit aligned ord2 = base + offset | 1 Because of the base alignment we can use ord2 = base | offset | 1 */ *cmd++ = 0; /* ord2 */ /* Restore VS texture memory objects */ stateblock = 0; statetype = 1; numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4; *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16); *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc) | statetype; /* Restore VS texture mipmap addresses */ stateblock = 1; statetype = 1; numunits = TEX_SIZE_MIPMAP / 4; *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16); *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP) & 0xfffffffc) | statetype; /* Restore VS texture sampler objects */ stateblock = 0; statetype = 0; numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4; *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16); *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc) | statetype; /* Restore FS texture memory objects */ stateblock = 2; statetype = 1; numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4; *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16); *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc) | statetype; /* Restore FS texture mipmap addresses */ stateblock = 3; statetype = 1; numunits = TEX_SIZE_MIPMAP / 4; *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16); *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP) & 0xfffffffc) | statetype; /* Restore FS texture sampler objects */ stateblock = 2; statetype = 0; numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4; *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16); *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc) | statetype; create_ib1(drawctxt, drawctxt->constant_restore, start, cmd); tmp_ctx.cmd = cmd; } static void build_shader_restore_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *cmd = tmp_ctx.cmd; unsigned int *start = cmd; /* Vertex shader */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2; *cmd++ = 1; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); drawctxt->shader_load_commands[0].hostptr = cmd; drawctxt->shader_load_commands[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: mode = 4 (indirect) stateblock = 4 (Vertex shader) numunits = SP_VS_CTRL_REG0.VS_LENGTH From regspec: SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits. If bit31 is 1, it means overflow or any long shader. ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11) */ *cmd++ = 0; /*ord1 */ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc; /* Fragment shader */ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4); *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2; *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2; *cmd++ = 1; *cmd++ = 3; /* EXEC_COUNT */ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2); drawctxt->shader_load_commands[1].hostptr = cmd; drawctxt->shader_load_commands[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); /* From fixup: mode = 4 (indirect) stateblock = 6 (Fragment shader) numunits = SP_FS_CTRL_REG0.FS_LENGTH From regspec: SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits. If bit31 is 1, it means overflow or any long shader. ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11) */ *cmd++ = 0; /*ord1 */ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc; create_ib1(drawctxt, drawctxt->shader_restore, start, cmd); tmp_ctx.cmd = cmd; } static void build_hlsqcontrol_restore_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *cmd = tmp_ctx.cmd; unsigned int *start = cmd; *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2); *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG); drawctxt->hlsqcontrol_restore_commands[0].hostptr = cmd; drawctxt->hlsqcontrol_restore_commands[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0; /* Create indirect buffer command for above command sequence */ create_ib1(drawctxt, drawctxt->hlsqcontrol_restore, start, cmd); tmp_ctx.cmd = cmd; } /* IB that modifies the shader and constant sizes and offsets in restore IBs. */ static void build_restore_fixup_cmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { unsigned int *cmd = tmp_ctx.cmd; unsigned int *start = cmd; #ifdef GSL_CONTEXT_SWITCH_CPU_SYNC /* Save shader sizes */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_VS_CTRL_REG0; *cmd++ = drawctxt->shader_load_commands[0].gpuaddr; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_CTRL_REG0; *cmd++ = drawctxt->shader_load_commands[1].gpuaddr; /* Save constant sizes */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_VS_CTRL_REG1; *cmd++ = drawctxt->constant_load_commands[0].gpuaddr; *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_CTRL_REG1; *cmd++ = drawctxt->constant_load_commands[1].gpuaddr; /* Save constant offsets */ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG; *cmd++ = drawctxt->constant_load_commands[2].gpuaddr; #else /* Save shader sizes */ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000, 30, (4 << 19) | (4 << 16), drawctxt->shader_load_commands[0].gpuaddr); cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x7f000000, 30, (6 << 19) | (4 << 16), drawctxt->shader_load_commands[1].gpuaddr); /* Save constant sizes */ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff, 23, (4 << 19) | (4 << 16), drawctxt->constant_load_commands[0].gpuaddr); cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff, 23, (6 << 19) | (4 << 16), drawctxt->constant_load_commands[1].gpuaddr); /* Modify constant restore conditionals */ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff, 0, 0, drawctxt->cond_execs[2].gpuaddr); cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff, 0, 0, drawctxt->cond_execs[3].gpuaddr); /* Save fragment constant shadow offset */ cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000, 18, (drawctxt->gpustate.gpuaddr & 0xfffffe00) | 1, drawctxt->constant_load_commands[2].gpuaddr); #endif /* Use mask value to avoid flushing HLSQ which would cause the HW to discard all the shader data */ cmd = rmw_regtomem(cmd, A3XX_HLSQ_CONTROL_0_REG, 0x9ffffdff, 0, 0, drawctxt->hlsqcontrol_restore_commands[0].gpuaddr); create_ib1(drawctxt, drawctxt->restore_fixup, start, cmd); tmp_ctx.cmd = cmd; } static int a3xx_create_gpustate_shadow(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW; build_regrestore_cmds(adreno_dev, drawctxt); build_constantrestore_cmds(adreno_dev, drawctxt); build_hlsqcontrol_restore_cmds(adreno_dev, drawctxt); build_regconstantsave_cmds(adreno_dev, drawctxt); build_shader_save_cmds(adreno_dev, drawctxt); build_shader_restore_cmds(adreno_dev, drawctxt); build_restore_fixup_cmds(adreno_dev, drawctxt); build_save_fixup_cmds(adreno_dev, drawctxt); return 0; } /* create buffers for saving/restoring registers, constants, & GMEM */ static int a3xx_create_gmem_shadow(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { int result; calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size); tmp_ctx.gmem_base = adreno_dev->gmem_base; result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow, drawctxt->pagetable, drawctxt->context_gmem_shadow.size); if (result) return result; build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow, &tmp_ctx.cmd); tmp_ctx.cmd = build_gmem2sys_cmds(adreno_dev, drawctxt, &drawctxt->context_gmem_shadow); tmp_ctx.cmd = build_sys2gmem_cmds(adreno_dev, drawctxt, &drawctxt->context_gmem_shadow); kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow, KGSL_CACHE_OP_FLUSH); drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW; return 0; } static int a3xx_drawctxt_create(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { int ret; /* * Allocate memory for the GPU state and the context commands. * Despite the name, this is much more then just storage for * the gpustate. This contains command space for gmem save * and texture and vertex buffer storage too */ ret = kgsl_allocate(&drawctxt->gpustate, drawctxt->pagetable, CONTEXT_SIZE); if (ret) return ret; kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE); tmp_ctx.cmd = drawctxt->gpustate.hostptr + CMD_OFFSET; if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) { ret = a3xx_create_gpustate_shadow(adreno_dev, drawctxt); if (ret) goto done; drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE; } if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) ret = a3xx_create_gmem_shadow(adreno_dev, drawctxt); done: if (ret) kgsl_sharedmem_free(&drawctxt->gpustate); return ret; } static void a3xx_drawctxt_save(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED)) return; if (context->flags & CTXT_FLAGS_GPU_HANG) KGSL_CTXT_WARN(device, "Current active context has caused gpu hang\n"); if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { /* Fixup self modifying IBs for save operations */ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->save_fixup, 3); /* save registers and constants. */ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->regconstant_save, 3); if (context->flags & CTXT_FLAGS_SHADER_SAVE) { /* Save shader instructions */ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, context->shader_save, 3); context->flags |= CTXT_FLAGS_SHADER_RESTORE; } } if ((context->flags & CTXT_FLAGS_GMEM_SAVE) && (context->flags & CTXT_FLAGS_GMEM_SHADOW)) { /* * Save GMEM (note: changes shader. shader must * already be saved.) */ kgsl_cffdump_syncmem(NULL, &context->gpustate, context->context_gmem_shadow.gmem_save[1], context->context_gmem_shadow.gmem_save[2] << 2, true); adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow. gmem_save, 3); context->flags |= CTXT_FLAGS_GMEM_RESTORE; } } static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev, struct adreno_context *context) { struct kgsl_device *device = &adreno_dev->dev; unsigned int cmds[5]; if (context == NULL) { /* No context - set the default pagetable and thats it */ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable, adreno_dev->drawctxt_active->id); return; } KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags); cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[4] = context->id; adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, cmds, 5); kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id); /* * Restore GMEM. (note: changes shader. * Shader must not already be restored.) */ if (context->flags & CTXT_FLAGS_GMEM_RESTORE) { kgsl_cffdump_syncmem(NULL, &context->gpustate, context->context_gmem_shadow.gmem_restore[1], context->context_gmem_shadow.gmem_restore[2] << 2, true); adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE, context->context_gmem_shadow. gmem_restore, 3); context->flags &= ~CTXT_FLAGS_GMEM_RESTORE; } if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->reg_restore, 3); /* Fixup self modifying IBs for restore operations */ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->restore_fixup, 3); adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->constant_restore, 3); if (context->flags & CTXT_FLAGS_SHADER_RESTORE) adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->shader_restore, 3); /* Restore HLSQ_CONTROL_0 register */ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, context->hlsqcontrol_restore, 3); } } static int a3xx_rb_init(struct adreno_device *adreno_dev, struct adreno_ringbuffer *rb) { unsigned int *cmds, cmds_gpu; cmds = adreno_ringbuffer_allocspace(rb, NULL, 18); if (cmds == NULL) return -ENOMEM; cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - 18); GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 17)); GSL_RB_WRITE(cmds, cmds_gpu, 0x000003f7); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000080); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000100); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000180); GSL_RB_WRITE(cmds, cmds_gpu, 0x00006600); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000150); GSL_RB_WRITE(cmds, cmds_gpu, 0x0000014e); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000154); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); /* Enable protected mode registers for A3XX */ GSL_RB_WRITE(cmds, cmds_gpu, 0x20000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); adreno_ringbuffer_submit(rb); return 0; } static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit) { struct kgsl_device *device = &adreno_dev->dev; const char *err = ""; switch (bit) { case A3XX_INT_RBBM_AHB_ERROR: { unsigned int reg; adreno_regread(device, A3XX_RBBM_AHB_ERROR_STATUS, &reg); /* * Return the word address of the erroring register so that it * matches the register specification */ KGSL_DRV_CRIT(device, "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n", reg & (1 << 28) ? "WRITE" : "READ", (reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3, (reg >> 24) & 0x3); /* Clear the error */ adreno_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3)); return; } case A3XX_INT_RBBM_REG_TIMEOUT: err = "RBBM: AHB register timeout"; break; case A3XX_INT_RBBM_ME_MS_TIMEOUT: err = "RBBM: ME master split timeout"; break; case A3XX_INT_RBBM_PFP_MS_TIMEOUT: err = "RBBM: PFP master split timeout"; break; case A3XX_INT_RBBM_ATB_BUS_OVERFLOW: err = "RBBM: ATB bus oveflow"; break; case A3XX_INT_VFD_ERROR: err = "VFD: Out of bounds access"; break; case A3XX_INT_CP_T0_PACKET_IN_IB: err = "ringbuffer TO packet in IB interrupt"; break; case A3XX_INT_CP_OPCODE_ERROR: err = "ringbuffer opcode error interrupt"; break; case A3XX_INT_CP_RESERVED_BIT_ERROR: err = "ringbuffer reserved bit error interrupt"; break; case A3XX_INT_CP_HW_FAULT: err = "ringbuffer hardware fault"; break; case A3XX_INT_CP_REG_PROTECT_FAULT: { unsigned int reg; kgsl_regread(device, A3XX_CP_PROTECT_STATUS, &reg); KGSL_DRV_CRIT(device, "CP | Protected mode error| %s | addr=%x\n", reg & (1 << 24) ? "WRITE" : "READ", (reg & 0x1FFFF) >> 2); return; } case A3XX_INT_CP_AHB_ERROR_HALT: err = "ringbuffer AHB error interrupt"; break; case A3XX_INT_MISC_HANG_DETECT: err = "MISC: GPU hang detected"; break; case A3XX_INT_UCHE_OOB_ACCESS: err = "UCHE: Out of bounds access"; break; } KGSL_DRV_CRIT(device, "%s\n", err); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); } static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq) { struct kgsl_device *device = &adreno_dev->dev; /* Wake up everybody waiting for the interrupt */ wake_up_interruptible_all(&device->wait_queue); /* Schedule work to free mem and issue ibs */ queue_work(device->work_queue, &device->ts_expired_ws); } /** * struct a3xx_perfcounter_register - Define a performance counter register * @load_bit: the bit to set in RBBM_LOAD_CMD0/RBBM_LOAD_CMD1 to force the RBBM * to load the reset value into the appropriate counter * @select: The dword offset of the register to write the selected * countable into */ struct a3xx_perfcounter_register { unsigned int load_bit; unsigned int select; }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_cp[] = { { 0, A3XX_CP_PERFCOUNTER_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rbbm[] = { { 1, A3XX_RBBM_PERFCOUNTER0_SELECT }, { 2, A3XX_RBBM_PERFCOUNTER1_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_pc[] = { { 3, A3XX_PC_PERFCOUNTER0_SELECT }, { 4, A3XX_PC_PERFCOUNTER1_SELECT }, { 5, A3XX_PC_PERFCOUNTER2_SELECT }, { 6, A3XX_PC_PERFCOUNTER3_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vfd[] = { { 7, A3XX_VFD_PERFCOUNTER0_SELECT }, { 8, A3XX_VFD_PERFCOUNTER1_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_hlsq[] = { { 9, A3XX_HLSQ_PERFCOUNTER0_SELECT }, { 10, A3XX_HLSQ_PERFCOUNTER1_SELECT }, { 11, A3XX_HLSQ_PERFCOUNTER2_SELECT }, { 12, A3XX_HLSQ_PERFCOUNTER3_SELECT }, { 13, A3XX_HLSQ_PERFCOUNTER4_SELECT }, { 14, A3XX_HLSQ_PERFCOUNTER5_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vpc[] = { { 15, A3XX_VPC_PERFCOUNTER0_SELECT }, { 16, A3XX_VPC_PERFCOUNTER1_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tse[] = { { 17, A3XX_GRAS_PERFCOUNTER0_SELECT }, { 18, A3XX_GRAS_PERFCOUNTER1_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_ras[] = { { 19, A3XX_GRAS_PERFCOUNTER2_SELECT }, { 20, A3XX_GRAS_PERFCOUNTER3_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_uche[] = { { 21, A3XX_UCHE_PERFCOUNTER0_SELECT }, { 22, A3XX_UCHE_PERFCOUNTER1_SELECT }, { 23, A3XX_UCHE_PERFCOUNTER2_SELECT }, { 24, A3XX_UCHE_PERFCOUNTER3_SELECT }, { 25, A3XX_UCHE_PERFCOUNTER4_SELECT }, { 26, A3XX_UCHE_PERFCOUNTER5_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tp[] = { { 27, A3XX_TP_PERFCOUNTER0_SELECT }, { 28, A3XX_TP_PERFCOUNTER1_SELECT }, { 29, A3XX_TP_PERFCOUNTER2_SELECT }, { 30, A3XX_TP_PERFCOUNTER3_SELECT }, { 31, A3XX_TP_PERFCOUNTER4_SELECT }, { 32, A3XX_TP_PERFCOUNTER5_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_sp[] = { { 33, A3XX_SP_PERFCOUNTER0_SELECT }, { 34, A3XX_SP_PERFCOUNTER1_SELECT }, { 35, A3XX_SP_PERFCOUNTER2_SELECT }, { 36, A3XX_SP_PERFCOUNTER3_SELECT }, { 37, A3XX_SP_PERFCOUNTER4_SELECT }, { 38, A3XX_SP_PERFCOUNTER5_SELECT }, { 39, A3XX_SP_PERFCOUNTER6_SELECT }, { 40, A3XX_SP_PERFCOUNTER7_SELECT }, }; static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rb[] = { { 41, A3XX_RB_PERFCOUNTER0_SELECT }, { 42, A3XX_RB_PERFCOUNTER1_SELECT }, }; #define REGCOUNTER_GROUP(_x) { (_x), ARRAY_SIZE((_x)) } static struct { struct a3xx_perfcounter_register *regs; int count; } a3xx_perfcounter_reglist[] = { REGCOUNTER_GROUP(a3xx_perfcounter_reg_cp), REGCOUNTER_GROUP(a3xx_perfcounter_reg_rbbm), REGCOUNTER_GROUP(a3xx_perfcounter_reg_pc), REGCOUNTER_GROUP(a3xx_perfcounter_reg_vfd), REGCOUNTER_GROUP(a3xx_perfcounter_reg_hlsq), REGCOUNTER_GROUP(a3xx_perfcounter_reg_vpc), REGCOUNTER_GROUP(a3xx_perfcounter_reg_tse), REGCOUNTER_GROUP(a3xx_perfcounter_reg_ras), REGCOUNTER_GROUP(a3xx_perfcounter_reg_uche), REGCOUNTER_GROUP(a3xx_perfcounter_reg_tp), REGCOUNTER_GROUP(a3xx_perfcounter_reg_sp), REGCOUNTER_GROUP(a3xx_perfcounter_reg_rb), }; static void a3xx_perfcounter_enable_pwr(struct kgsl_device *device, unsigned int countable) { unsigned int in, out; adreno_regread(device, A3XX_RBBM_RBBM_CTL, &in); if (countable == 0) out = in | RBBM_RBBM_CTL_RESET_PWR_CTR0; else out = in | RBBM_RBBM_CTL_RESET_PWR_CTR1; adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, out); if (countable == 0) out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR0; else out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR1; adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, out); return; } static void a3xx_perfcounter_enable_vbif(struct kgsl_device *device, unsigned int counter, unsigned int countable) { unsigned int in, out, bit, sel; if (counter > 1 || countable > 0x7f) return; adreno_regread(device, A3XX_VBIF_PERF_CNT_EN, &in); adreno_regread(device, A3XX_VBIF_PERF_CNT_SEL, &sel); if (counter == 0) { bit = VBIF_PERF_CNT_0; sel = (sel & ~VBIF_PERF_CNT_0_SEL_MASK) | countable; } else { bit = VBIF_PERF_CNT_1; sel = (sel & ~VBIF_PERF_CNT_1_SEL_MASK) | (countable << VBIF_PERF_CNT_1_SEL); } out = in | bit; adreno_regwrite(device, A3XX_VBIF_PERF_CNT_SEL, sel); adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit); adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0); adreno_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out); } static void a3xx_perfcounter_enable_vbif_pwr(struct kgsl_device *device, unsigned int countable) { unsigned int in, out, bit; adreno_regread(device, A3XX_VBIF_PERF_CNT_EN, &in); if (countable == 0) bit = VBIF_PERF_PWR_CNT_0; else if (countable == 1) bit = VBIF_PERF_PWR_CNT_1; else bit = VBIF_PERF_PWR_CNT_2; out = in | bit; adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit); adreno_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0); adreno_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out); } /* * a3xx_perfcounter_enable - Configure a performance counter for a countable * @adreno_dev - Adreno device to configure * @group - Desired performance counter group * @counter - Desired performance counter in the group * @countable - Desired countable * * Physically set up a counter within a group with the desired countable */ static void a3xx_perfcounter_enable(struct adreno_device *adreno_dev, unsigned int group, unsigned int counter, unsigned int countable) { struct kgsl_device *device = &adreno_dev->dev; unsigned int val = 0; struct a3xx_perfcounter_register *reg; if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist)) return; if (counter >= a3xx_perfcounter_reglist[group].count) return; /* Special cases */ if (group == KGSL_PERFCOUNTER_GROUP_PWR) return a3xx_perfcounter_enable_pwr(device, countable); else if (group == KGSL_PERFCOUNTER_GROUP_VBIF) return a3xx_perfcounter_enable_vbif(device, counter, countable); else if (group == KGSL_PERFCOUNTER_GROUP_VBIF_PWR) return a3xx_perfcounter_enable_vbif_pwr(device, countable); reg = &(a3xx_perfcounter_reglist[group].regs[counter]); /* Select the desired perfcounter */ adreno_regwrite(device, reg->select, countable); if (reg->load_bit < 32) { val = 1 << reg->load_bit; adreno_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, val); } else { val = 1 << (reg->load_bit - 32); adreno_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, val); } } static uint64_t a3xx_perfcounter_read(struct adreno_device *adreno_dev, unsigned int group, unsigned int counter, unsigned int offset) { struct kgsl_device *device = &adreno_dev->dev; struct a3xx_perfcounter_register *reg = NULL; unsigned int lo = 0, hi = 0; unsigned int val; if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist)) return 0; if (counter >= a3xx_perfcounter_reglist[group].count) return 0; reg = &(a3xx_perfcounter_reglist[group].regs[counter]); /* Freeze the counter */ adreno_regread(device, A3XX_RBBM_PERFCTR_CTL, &val); val &= ~reg->load_bit; adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, val); /* Read the values */ adreno_regread(device, offset, &lo); adreno_regread(device, offset + 1, &hi); /* Re-Enable the counter */ val |= reg->load_bit; adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, val); return (((uint64_t) hi) << 32) | lo; } #define A3XX_IRQ_CALLBACK(_c) { .func = _c } #define A3XX_INT_MASK \ ((1 << A3XX_INT_RBBM_AHB_ERROR) | \ (1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW) | \ (1 << A3XX_INT_CP_T0_PACKET_IN_IB) | \ (1 << A3XX_INT_CP_OPCODE_ERROR) | \ (1 << A3XX_INT_CP_RESERVED_BIT_ERROR) | \ (1 << A3XX_INT_CP_HW_FAULT) | \ (1 << A3XX_INT_CP_IB1_INT) | \ (1 << A3XX_INT_CP_IB2_INT) | \ (1 << A3XX_INT_CP_RB_INT) | \ (1 << A3XX_INT_CP_REG_PROTECT_FAULT) | \ (1 << A3XX_INT_CP_AHB_ERROR_HALT) | \ (1 << A3XX_INT_UCHE_OOB_ACCESS)) static struct { void (*func)(struct adreno_device *, int); } a3xx_irq_funcs[] = { A3XX_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 1 - RBBM_AHB_ERROR */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 2 - RBBM_REG_TIMEOUT */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 3 - RBBM_ME_MS_TIMEOUT */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 4 - RBBM_PFP_MS_TIMEOUT */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 5 - RBBM_ATB_BUS_OVERFLOW */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 6 - RBBM_VFD_ERROR */ A3XX_IRQ_CALLBACK(NULL), /* 7 - CP_SW */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 8 - CP_T0_PACKET_IN_IB */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 9 - CP_OPCODE_ERROR */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 10 - CP_RESERVED_BIT_ERROR */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 11 - CP_HW_FAULT */ A3XX_IRQ_CALLBACK(NULL), /* 12 - CP_DMA */ A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 13 - CP_IB2_INT */ A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 14 - CP_IB1_INT */ A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 15 - CP_RB_INT */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 16 - CP_REG_PROTECT_FAULT */ A3XX_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */ A3XX_IRQ_CALLBACK(NULL), /* 18 - CP_VS_DONE_TS */ A3XX_IRQ_CALLBACK(NULL), /* 19 - CP_PS_DONE_TS */ A3XX_IRQ_CALLBACK(NULL), /* 20 - CP_CACHE_FLUSH_TS */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 21 - CP_AHB_ERROR_FAULT */ A3XX_IRQ_CALLBACK(NULL), /* 22 - Unused */ A3XX_IRQ_CALLBACK(NULL), /* 23 - Unused */ A3XX_IRQ_CALLBACK(NULL), /* 24 - MISC_HANG_DETECT */ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 25 - UCHE_OOB_ACCESS */ /* 26 to 31 - Unused */ }; static irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev) { struct kgsl_device *device = &adreno_dev->dev; irqreturn_t ret = IRQ_NONE; unsigned int status, tmp; int i; adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status); for (tmp = status, i = 0; tmp && i < ARRAY_SIZE(a3xx_irq_funcs); i++) { if (tmp & 1) { if (a3xx_irq_funcs[i].func != NULL) { a3xx_irq_funcs[i].func(adreno_dev, i); ret = IRQ_HANDLED; } else { KGSL_DRV_CRIT(device, "Unhandled interrupt bit %x\n", i); } } tmp >>= 1; } trace_kgsl_a3xx_irq_status(device, status); if (status) adreno_regwrite(&adreno_dev->dev, A3XX_RBBM_INT_CLEAR_CMD, status); return ret; } static void a3xx_irq_control(struct adreno_device *adreno_dev, int state) { struct kgsl_device *device = &adreno_dev->dev; if (state) adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK); else adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0); } static unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev) { unsigned int status; adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status); return (status & A3XX_INT_MASK) ? 1 : 0; } static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev) { struct kgsl_device *device = &adreno_dev->dev; unsigned int val; unsigned int ret = 0; /* Read the value */ adreno_regread(device, A3XX_RBBM_PERFCTR_PWR_1_LO, &val); /* Return 0 for the first read */ if (adreno_dev->gpu_cycles != 0) { if (val < adreno_dev->gpu_cycles) ret = (0xFFFFFFFF - adreno_dev->gpu_cycles) + val; else ret = val - adreno_dev->gpu_cycles; } adreno_dev->gpu_cycles = val; return ret; } struct a3xx_vbif_data { unsigned int reg; unsigned int val; }; /* VBIF registers start after 0x3000 so use 0x0 as end of list marker */ static struct a3xx_vbif_data a305_vbif[] = { /* Set up 16 deep read/write request queues */ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 }, { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 }, { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 }, /* Enable WR-REQ */ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF }, /* Set up round robin arbitration between both AXI ports */ { A3XX_VBIF_ARB_CTL, 0x00000030 }, /* Set up AOOO */ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C }, { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C }, {0, 0}, }; static struct a3xx_vbif_data a320_vbif[] = { /* Set up 16 deep read/write request queues */ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 }, { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 }, { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 }, { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 }, /* Enable WR-REQ */ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF }, /* Set up round robin arbitration between both AXI ports */ { A3XX_VBIF_ARB_CTL, 0x00000030 }, /* Set up AOOO */ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C }, { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C }, /* Enable 1K sort */ { A3XX_VBIF_ABIT_SORT, 0x000000FF }, { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 }, {0, 0}, }; static struct a3xx_vbif_data a330_vbif[] = { /* Set up 16 deep read/write request queues */ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 }, { A3XX_VBIF_IN_RD_LIM_CONF1, 0x00001818 }, { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00001818 }, { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00001818 }, { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 }, { A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 }, { A3XX_VBIF_IN_WR_LIM_CONF1, 0x00001818 }, /* Enable WR-REQ */ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F }, /* Set up round robin arbitration between both AXI ports */ { A3XX_VBIF_ARB_CTL, 0x00000030 }, /* Set up VBIF_ROUND_ROBIN_QOS_ARB */ { A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001 }, /* Set up AOOO */ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003F }, { A3XX_VBIF_OUT_AXI_AOOO, 0x003F003F }, /* Enable 1K sort */ { A3XX_VBIF_ABIT_SORT, 0x0001003F }, { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 }, /* Disable VBIF clock gating. This is to enable AXI running * higher frequency than GPU. */ { A3XX_VBIF_CLKON, 1 }, {0, 0}, }; /* * Most of the VBIF registers on 8974v2 have the correct values at power on, so * we won't modify those if we don't need to */ static struct a3xx_vbif_data a330v2_vbif[] = { /* Enable 1k sort */ { A3XX_VBIF_ABIT_SORT, 0x0001003F }, { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 }, /* Enable WR-REQ */ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F }, { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 }, /* Set up VBIF_ROUND_ROBIN_QOS_ARB */ { A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 }, /* Disable VBIF clock gating. This is to enable AXI running * higher frequency than GPU. */ { A3XX_VBIF_CLKON, 1 }, {0, 0}, }; static struct { int(*devfunc)(struct adreno_device *); struct a3xx_vbif_data *vbif; } a3xx_vbif_platforms[] = { { adreno_is_a305, a305_vbif }, { adreno_is_a320, a320_vbif }, /* A330v2 needs to be ahead of A330 so the right device matches */ { adreno_is_a330v2, a330v2_vbif }, { adreno_is_a330, a330_vbif }, }; static void a3xx_perfcounter_init(struct adreno_device *adreno_dev) { /* * Set SP to count SP_ALU_ACTIVE_CYCLES, it includes * all ALU instruction execution regardless precision or shader ID. * Set SP to count SP0_ICL1_MISSES, It counts * USP L1 instruction miss request. * Set SP to count SP_FS_FULL_ALU_INSTRUCTIONS, it * counts USP flow control instruction execution. * we will use this to augment our hang detection */ if (adreno_dev->fast_hang_detect) { adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP, SP_ALU_ACTIVE_CYCLES, &ft_detect_regs[6], PERFCOUNTER_FLAG_KERNEL); ft_detect_regs[7] = ft_detect_regs[6] + 1; adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP, SP0_ICL1_MISSES, &ft_detect_regs[8], PERFCOUNTER_FLAG_KERNEL); ft_detect_regs[9] = ft_detect_regs[8] + 1; adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP, SP_FS_CFLOW_INSTRUCTIONS, &ft_detect_regs[10], PERFCOUNTER_FLAG_KERNEL); ft_detect_regs[11] = ft_detect_regs[10] + 1; } adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP, SP_FS_FULL_ALU_INSTRUCTIONS, NULL, PERFCOUNTER_FLAG_KERNEL); /* Reserve and start countable 1 in the PWR perfcounter group */ adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1, NULL, PERFCOUNTER_FLAG_KERNEL); } /** * a3xx_protect_init() - Initializes register protection on a3xx * @device: Pointer to the device structure * Performs register writes to enable protected access to sensitive * registers */ static void a3xx_protect_init(struct kgsl_device *device) { int index = 0; /* enable access protection to privileged registers */ kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007); /* RBBM registers */ adreno_set_protected_registers(device, &index, 0x18, 0); adreno_set_protected_registers(device, &index, 0x20, 2); adreno_set_protected_registers(device, &index, 0x33, 0); adreno_set_protected_registers(device, &index, 0x42, 0); adreno_set_protected_registers(device, &index, 0x50, 4); adreno_set_protected_registers(device, &index, 0x63, 0); adreno_set_protected_registers(device, &index, 0x100, 4); /* CP registers */ adreno_set_protected_registers(device, &index, 0x1C0, 5); adreno_set_protected_registers(device, &index, 0x1EC, 1); adreno_set_protected_registers(device, &index, 0x1F6, 1); adreno_set_protected_registers(device, &index, 0x1F8, 2); adreno_set_protected_registers(device, &index, 0x45E, 2); adreno_set_protected_registers(device, &index, 0x460, 4); /* RB registers */ adreno_set_protected_registers(device, &index, 0xCC0, 0); /* VBIF registers */ adreno_set_protected_registers(device, &index, 0x3000, 6); /* SMMU registers */ adreno_set_protected_registers(device, &index, 0x4000, 14); } static void a3xx_start(struct adreno_device *adreno_dev) { struct kgsl_device *device = &adreno_dev->dev; struct a3xx_vbif_data *vbif = NULL; int i; for (i = 0; i < ARRAY_SIZE(a3xx_vbif_platforms); i++) { if (a3xx_vbif_platforms[i].devfunc(adreno_dev)) { vbif = a3xx_vbif_platforms[i].vbif; break; } } BUG_ON(vbif == NULL); while (vbif->reg != 0) { adreno_regwrite(device, vbif->reg, vbif->val); vbif++; } /* Make all blocks contribute to the GPU BUSY perf counter */ adreno_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF); /* Tune the hystersis counters for SP and CP idle detection */ adreno_regwrite(device, A3XX_RBBM_SP_HYST_CNT, 0x10); adreno_regwrite(device, A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); /* Enable the RBBM error reporting bits. This lets us get useful information on failure */ adreno_regwrite(device, A3XX_RBBM_AHB_CTL0, 0x00000001); /* Enable AHB error reporting */ adreno_regwrite(device, A3XX_RBBM_AHB_CTL1, 0xA6FFFFFF); /* Turn on the power counters */ adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, 0x00030000); /* Turn on hang detection - this spews a lot of useful information * into the RBBM registers on a hang */ adreno_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL, (1 << 16) | 0xFFF); /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0). */ adreno_regwrite(device, A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); /* Enable Clock gating */ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, adreno_a3xx_rbbm_clock_ctl_default(adreno_dev)); if (adreno_is_a330v2(adreno_dev)) adreno_regwrite(device, A3XX_RBBM_GPR0_CTL, A330v2_RBBM_GPR0_CTL_DEFAULT); else if (adreno_is_a330(adreno_dev)) adreno_regwrite(device, A3XX_RBBM_GPR0_CTL, A330_RBBM_GPR0_CTL_DEFAULT); /* Set the OCMEM base address for A330 */ if (adreno_is_a330(adreno_dev)) { adreno_regwrite(device, A3XX_RB_GMEM_BASE_ADDR, (unsigned int)(adreno_dev->ocmem_base >> 14)); } /* Turn on protection */ a3xx_protect_init(device); /* Turn on performance counters */ adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01); /* Turn on the GPU busy counter and let it run free */ adreno_dev->gpu_cycles = 0; } /* * Define the available perfcounter groups - these get used by * adreno_perfcounter_get and adreno_perfcounter_put */ static struct adreno_perfcount_register a3xx_perfcounters_cp[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_CP_0_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_pc[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_1_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_2_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_3_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_1_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_1_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_tse[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_1_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_ras[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_1_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_uche[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_tp[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_1_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_2_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_3_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_4_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_5_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_sp[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_1_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_2_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_3_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_4_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_5_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_6_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_7_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_rb[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_1_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_0_LO, 0 }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_1_LO, 0 }, }; static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT0_LO }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_CNT1_LO }, }; static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = { { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT0_LO }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT1_LO }, { KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_VBIF_PERF_PWR_CNT2_LO }, }; static struct adreno_perfcount_group a3xx_perfcounter_groups[] = { { a3xx_perfcounters_cp, ARRAY_SIZE(a3xx_perfcounters_cp) }, { a3xx_perfcounters_rbbm, ARRAY_SIZE(a3xx_perfcounters_rbbm) }, { a3xx_perfcounters_pc, ARRAY_SIZE(a3xx_perfcounters_pc) }, { a3xx_perfcounters_vfd, ARRAY_SIZE(a3xx_perfcounters_vfd) }, { a3xx_perfcounters_hlsq, ARRAY_SIZE(a3xx_perfcounters_hlsq) }, { a3xx_perfcounters_vpc, ARRAY_SIZE(a3xx_perfcounters_vpc) }, { a3xx_perfcounters_tse, ARRAY_SIZE(a3xx_perfcounters_tse) }, { a3xx_perfcounters_ras, ARRAY_SIZE(a3xx_perfcounters_ras) }, { a3xx_perfcounters_uche, ARRAY_SIZE(a3xx_perfcounters_uche) }, { a3xx_perfcounters_tp, ARRAY_SIZE(a3xx_perfcounters_tp) }, { a3xx_perfcounters_sp, ARRAY_SIZE(a3xx_perfcounters_sp) }, { a3xx_perfcounters_rb, ARRAY_SIZE(a3xx_perfcounters_rb) }, { a3xx_perfcounters_pwr, ARRAY_SIZE(a3xx_perfcounters_pwr) }, { a3xx_perfcounters_vbif, ARRAY_SIZE(a3xx_perfcounters_vbif) }, { a3xx_perfcounters_vbif_pwr, ARRAY_SIZE(a3xx_perfcounters_vbif_pwr) }, }; static struct adreno_perfcounters a3xx_perfcounters = { a3xx_perfcounter_groups, ARRAY_SIZE(a3xx_perfcounter_groups), }; /* Defined in adreno_a3xx_snapshot.c */ void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot, int *remain, int hang); struct adreno_gpudev adreno_a3xx_gpudev = { .reg_rbbm_status = A3XX_RBBM_STATUS, .reg_cp_pfp_ucode_addr = A3XX_CP_PFP_UCODE_ADDR, .reg_cp_pfp_ucode_data = A3XX_CP_PFP_UCODE_DATA, .perfcounters = &a3xx_perfcounters, .ctxt_create = a3xx_drawctxt_create, .ctxt_save = a3xx_drawctxt_save, .ctxt_restore = a3xx_drawctxt_restore, .ctxt_draw_workaround = NULL, .rb_init = a3xx_rb_init, .perfcounter_init = a3xx_perfcounter_init, .irq_control = a3xx_irq_control, .irq_handler = a3xx_irq_handler, .irq_pending = a3xx_irq_pending, .busy_cycles = a3xx_busy_cycles, .start = a3xx_start, .snapshot = a3xx_snapshot, .perfcounter_enable = a3xx_perfcounter_enable, .perfcounter_read = a3xx_perfcounter_read, };
gpl-2.0
tiagovignatti/drm-intel
drivers/firmware/efi/runtime-map.c
909
4643
/* * linux/drivers/efi/runtime-map.c * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com> * * This file is released under the GPLv2. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/efi.h> #include <linux/slab.h> #include <asm/setup.h> static void *efi_runtime_map; static int nr_efi_runtime_map; static u32 efi_memdesc_size; struct efi_runtime_map_entry { efi_memory_desc_t md; struct kobject kobj; /* kobject for each entry */ }; static struct efi_runtime_map_entry **map_entries; struct map_attribute { struct attribute attr; ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf); }; static inline struct map_attribute *to_map_attr(struct attribute *attr) { return container_of(attr, struct map_attribute, attr); } static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); } #define EFI_RUNTIME_FIELD(var) entry->md.var #define EFI_RUNTIME_U64_ATTR_SHOW(name) \ static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \ { \ return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \ } EFI_RUNTIME_U64_ATTR_SHOW(phys_addr); EFI_RUNTIME_U64_ATTR_SHOW(virt_addr); EFI_RUNTIME_U64_ATTR_SHOW(num_pages); EFI_RUNTIME_U64_ATTR_SHOW(attribute); static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj) { return container_of(kobj, struct efi_runtime_map_entry, kobj); } static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct efi_runtime_map_entry *entry = to_map_entry(kobj); struct map_attribute *map_attr = to_map_attr(attr); return map_attr->show(entry, buf); } static struct map_attribute map_type_attr = __ATTR_RO(type); static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr); static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr); static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages); static struct map_attribute map_attribute_attr = __ATTR_RO(attribute); /* * These are default attributes that are added for every memmap entry. */ static struct attribute *def_attrs[] = { &map_type_attr.attr, &map_phys_addr_attr.attr, &map_virt_addr_attr.attr, &map_num_pages_attr.attr, &map_attribute_attr.attr, NULL }; static const struct sysfs_ops map_attr_ops = { .show = map_attr_show, }; static void map_release(struct kobject *kobj) { struct efi_runtime_map_entry *entry; entry = to_map_entry(kobj); kfree(entry); } static struct kobj_type __refdata map_ktype = { .sysfs_ops = &map_attr_ops, .default_attrs = def_attrs, .release = map_release, }; static struct kset *map_kset; static struct efi_runtime_map_entry * add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) { int ret; struct efi_runtime_map_entry *entry; if (!map_kset) { map_kset = kset_create_and_add("runtime-map", NULL, kobj); if (!map_kset) return ERR_PTR(-ENOMEM); } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { kset_unregister(map_kset); map_kset = NULL; return ERR_PTR(-ENOMEM); } memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, sizeof(efi_memory_desc_t)); kobject_init(&entry->kobj, &map_ktype); entry->kobj.kset = map_kset; ret = kobject_add(&entry->kobj, NULL, "%d", nr); if (ret) { kobject_put(&entry->kobj); kset_unregister(map_kset); map_kset = NULL; return ERR_PTR(ret); } return entry; } int efi_get_runtime_map_size(void) { return nr_efi_runtime_map * efi_memdesc_size; } int efi_get_runtime_map_desc_size(void) { return efi_memdesc_size; } int efi_runtime_map_copy(void *buf, size_t bufsz) { size_t sz = efi_get_runtime_map_size(); if (sz > bufsz) sz = bufsz; memcpy(buf, efi_runtime_map, sz); return 0; } void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) { efi_runtime_map = map; nr_efi_runtime_map = nr_entries; efi_memdesc_size = desc_size; } int __init efi_runtime_map_init(struct kobject *efi_kobj) { int i, j, ret = 0; struct efi_runtime_map_entry *entry; if (!efi_runtime_map) return 0; map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL); if (!map_entries) { ret = -ENOMEM; goto out; } for (i = 0; i < nr_efi_runtime_map; i++) { entry = add_sysfs_runtime_map_entry(efi_kobj, i); if (IS_ERR(entry)) { ret = PTR_ERR(entry); goto out_add_entry; } *(map_entries + i) = entry; } return 0; out_add_entry: for (j = i - 1; j >= 0; j--) { entry = *(map_entries + j); kobject_put(&entry->kobj); } out: return ret; }
gpl-2.0
KDGDev/meizu-mx-kernel
drivers/staging/iio/trigger/iio-trig-sysfs.c
2701
4309
/* * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/list.h> #include "../iio.h" #include "../trigger.h" struct iio_sysfs_trig { struct iio_trigger *trig; int id; struct list_head l; }; static LIST_HEAD(iio_sysfs_trig_list); static DEFINE_MUTEX(iio_syfs_trig_list_mut); static int iio_sysfs_trigger_probe(int id); static ssize_t iio_sysfs_trig_add(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; unsigned long input; ret = strict_strtoul(buf, 10, &input); if (ret) return ret; ret = iio_sysfs_trigger_probe(input); if (ret) return ret; return len; } static DEVICE_ATTR(add_trigger, S_IWUSR, NULL, &iio_sysfs_trig_add); static int iio_sysfs_trigger_remove(int id); static ssize_t iio_sysfs_trig_remove(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; unsigned long input; ret = strict_strtoul(buf, 10, &input); if (ret) return ret; ret = iio_sysfs_trigger_remove(input); if (ret) return ret; return len; } static DEVICE_ATTR(remove_trigger, S_IWUSR, NULL, &iio_sysfs_trig_remove); static struct attribute *iio_sysfs_trig_attrs[] = { &dev_attr_add_trigger.attr, &dev_attr_remove_trigger.attr, NULL, }; static const struct attribute_group iio_sysfs_trig_group = { .attrs = iio_sysfs_trig_attrs, }; static const struct attribute_group *iio_sysfs_trig_groups[] = { &iio_sysfs_trig_group, NULL }; static struct device iio_sysfs_trig_dev = { .bus = &iio_bus_type, .groups = iio_sysfs_trig_groups, }; static ssize_t iio_sysfs_trigger_poll(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_trigger *trig = dev_get_drvdata(dev); iio_trigger_poll_chained(trig, 0); return count; } static DEVICE_ATTR(trigger_now, S_IWUSR, NULL, iio_sysfs_trigger_poll); static struct attribute *iio_sysfs_trigger_attrs[] = { &dev_attr_trigger_now.attr, NULL, }; static const struct attribute_group iio_sysfs_trigger_attr_group = { .attrs = iio_sysfs_trigger_attrs, }; static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = { &iio_sysfs_trigger_attr_group, NULL }; static int iio_sysfs_trigger_probe(int id) { struct iio_sysfs_trig *t; int ret; bool foundit = false; mutex_lock(&iio_syfs_trig_list_mut); list_for_each_entry(t, &iio_sysfs_trig_list, l) if (id == t->id) { foundit = true; break; } if (foundit) { ret = -EINVAL; goto out1; } t = kmalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { ret = -ENOMEM; goto out1; } t->id = id; t->trig = iio_allocate_trigger("sysfstrig%d", id); if (!t->trig) { ret = -ENOMEM; goto free_t; } t->trig->dev.groups = iio_sysfs_trigger_attr_groups; t->trig->owner = THIS_MODULE; t->trig->dev.parent = &iio_sysfs_trig_dev; ret = iio_trigger_register(t->trig); if (ret) goto out2; list_add(&t->l, &iio_sysfs_trig_list); __module_get(THIS_MODULE); mutex_unlock(&iio_syfs_trig_list_mut); return 0; out2: iio_put_trigger(t->trig); free_t: kfree(t); out1: mutex_unlock(&iio_syfs_trig_list_mut); return ret; } static int iio_sysfs_trigger_remove(int id) { bool foundit = false; struct iio_sysfs_trig *t; mutex_lock(&iio_syfs_trig_list_mut); list_for_each_entry(t, &iio_sysfs_trig_list, l) if (id == t->id) { foundit = true; break; } if (!foundit) { mutex_unlock(&iio_syfs_trig_list_mut); return -EINVAL; } iio_trigger_unregister(t->trig); iio_free_trigger(t->trig); list_del(&t->l); kfree(t); module_put(THIS_MODULE); mutex_unlock(&iio_syfs_trig_list_mut); return 0; } static int __init iio_sysfs_trig_init(void) { device_initialize(&iio_sysfs_trig_dev); dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger"); return device_add(&iio_sysfs_trig_dev); } module_init(iio_sysfs_trig_init); static void __exit iio_sysfs_trig_exit(void) { device_unregister(&iio_sysfs_trig_dev); } module_exit(iio_sysfs_trig_exit); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Sysfs based trigger for the iio subsystem"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:iio-trig-sysfs");
gpl-2.0
Frontier314/frontkernel_kitkat
fs/ocfs2/dlm/dlmlock.c
2957
20105
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmlock.c * * underlying calls for lock creation * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include <linux/delay.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmconvert.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" static struct kmem_cache *dlm_lock_cache = NULL; static DEFINE_SPINLOCK(dlm_cookie_lock); static u64 dlm_next_cookie = 1; static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags); static void dlm_init_lock(struct dlm_lock *newlock, int type, u8 node, u64 cookie); static void dlm_lock_release(struct kref *kref); static void dlm_lock_detach_lockres(struct dlm_lock *lock); int dlm_init_lock_cache(void) { dlm_lock_cache = kmem_cache_create("o2dlm_lock", sizeof(struct dlm_lock), 0, SLAB_HWCACHE_ALIGN, NULL); if (dlm_lock_cache == NULL) return -ENOMEM; return 0; } void dlm_destroy_lock_cache(void) { if (dlm_lock_cache) kmem_cache_destroy(dlm_lock_cache); } /* Tell us whether we can grant a new lock request. * locking: * caller needs: res->spinlock * taken: none * held on exit: none * returns: 1 if the lock can be granted, 0 otherwise. */ static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, struct dlm_lock *lock) { struct list_head *iter; struct dlm_lock *tmplock; list_for_each(iter, &res->granted) { tmplock = list_entry(iter, struct dlm_lock, list); if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) return 0; } list_for_each(iter, &res->converting) { tmplock = list_entry(iter, struct dlm_lock, list); if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) return 0; if (!dlm_lock_compatible(tmplock->ml.convert_type, lock->ml.type)) return 0; } return 1; } /* performs lock creation at the lockres master site * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: DLM_NORMAL, DLM_NOTQUEUED */ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) { int call_ast = 0, kick_thread = 0; enum dlm_status status = DLM_NORMAL; mlog(0, "type=%d\n", lock->ml.type); spin_lock(&res->spinlock); /* if called from dlm_create_lock_handler, need to * ensure it will not sleep in dlm_wait_on_lockres */ status = __dlm_lockres_state_to_status(res); if (status != DLM_NORMAL && lock->ml.node != dlm->node_num) { /* erf. state changed after lock was dropped. */ spin_unlock(&res->spinlock); dlm_error(status); return status; } __dlm_wait_on_lockres(res); __dlm_lockres_reserve_ast(res); if (dlm_can_grant_new_lock(res, lock)) { mlog(0, "I can grant this lock right away\n"); /* got it right away */ lock->lksb->status = DLM_NORMAL; status = DLM_NORMAL; dlm_lock_get(lock); list_add_tail(&lock->list, &res->granted); /* for the recovery lock, we can't allow the ast * to be queued since the dlmthread is already * frozen. but the recovery lock is always locked * with LKM_NOQUEUE so we do not need the ast in * this special case */ if (!dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { kick_thread = 1; call_ast = 1; } else { mlog(0, "%s: returning DLM_NORMAL to " "node %u for reco lock\n", dlm->name, lock->ml.node); } } else { /* for NOQUEUE request, unless we get the * lock right away, return DLM_NOTQUEUED */ if (flags & LKM_NOQUEUE) { status = DLM_NOTQUEUED; if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { mlog(0, "%s: returning NOTQUEUED to " "node %u for reco lock\n", dlm->name, lock->ml.node); } } else { dlm_lock_get(lock); list_add_tail(&lock->list, &res->blocked); kick_thread = 1; } } /* reduce the inflight count, this may result in the lockres * being purged below during calc_usage */ if (lock->ml.node == dlm->node_num) dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); /* either queue the ast or release it */ if (call_ast) dlm_queue_ast(dlm, lock); else dlm_lockres_release_ast(dlm, res); dlm_lockres_calc_usage(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); return status; } void dlm_revert_pending_lock(struct dlm_lock_resource *res, struct dlm_lock *lock) { /* remove from local queue if it failed */ list_del_init(&lock->list); lock->lksb->flags &= ~DLM_LKSB_GET_LVB; } /* * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: DLM_DENIED, DLM_RECOVERING, or net status */ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) { enum dlm_status status = DLM_DENIED; int lockres_changed = 1; mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n", lock->ml.type, res->lockname.len, res->lockname.name, flags); spin_lock(&res->spinlock); /* will exit this call with spinlock held */ __dlm_wait_on_lockres(res); res->state |= DLM_LOCK_RES_IN_PROGRESS; /* add lock to local (secondary) queue */ dlm_lock_get(lock); list_add_tail(&lock->list, &res->blocked); lock->lock_pending = 1; spin_unlock(&res->spinlock); /* spec seems to say that you will get DLM_NORMAL when the lock * has been queued, meaning we need to wait for a reply here. */ status = dlm_send_remote_lock_request(dlm, res, lock, flags); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->lock_pending = 0; if (status != DLM_NORMAL) { if (status == DLM_RECOVERING && dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { /* recovery lock was mastered by dead node. * we need to have calc_usage shoot down this * lockres and completely remaster it. */ mlog(0, "%s: recovery lock was owned by " "dead node %u, remaster it now.\n", dlm->name, res->owner); } else if (status != DLM_NOTQUEUED) { /* * DO NOT call calc_usage, as this would unhash * the remote lockres before we ever get to use * it. treat as if we never made any change to * the lockres. */ lockres_changed = 0; dlm_error(status); } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); } else if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { /* special case for the $RECOVERY lock. * there will never be an AST delivered to put * this lock on the proper secondary queue * (granted), so do it manually. */ mlog(0, "%s: $RECOVERY lock for this node (%u) is " "mastered by %u; got lock, manually granting (no ast)\n", dlm->name, dlm->node_num, res->owner); list_move_tail(&lock->list, &res->granted); } spin_unlock(&res->spinlock); if (lockres_changed) dlm_lockres_calc_usage(dlm, res); wake_up(&res->wq); return status; } /* for remote lock creation. * locking: * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS * taken: none * held on exit: none * returns: DLM_NOLOCKMGR, or net status */ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) { struct dlm_create_lock create; int tmpret, status = 0; enum dlm_status ret; memset(&create, 0, sizeof(create)); create.node_idx = dlm->node_num; create.requested_type = lock->ml.type; create.cookie = lock->ml.cookie; create.namelen = res->lockname.len; create.flags = cpu_to_be32(flags); memcpy(create.name, res->lockname.name, create.namelen); tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, sizeof(create), res->owner, &status); if (tmpret >= 0) { // successfully sent and received ret = status; // this is already a dlm_status if (ret == DLM_REJECTED) { mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " "no longer owned by %u. that node is coming back " "up currently.\n", dlm->name, create.namelen, create.name, res->owner); dlm_print_one_lock_resource(res); BUG(); } } else { mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, res->owner); if (dlm_is_host_down(tmpret)) { ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " "from lock message!\n", res->owner); } else { ret = dlm_err_to_dlm_status(tmpret); } } return ret; } void dlm_lock_get(struct dlm_lock *lock) { kref_get(&lock->lock_refs); } void dlm_lock_put(struct dlm_lock *lock) { kref_put(&lock->lock_refs, dlm_lock_release); } static void dlm_lock_release(struct kref *kref) { struct dlm_lock *lock; lock = container_of(kref, struct dlm_lock, lock_refs); BUG_ON(!list_empty(&lock->list)); BUG_ON(!list_empty(&lock->ast_list)); BUG_ON(!list_empty(&lock->bast_list)); BUG_ON(lock->ast_pending); BUG_ON(lock->bast_pending); dlm_lock_detach_lockres(lock); if (lock->lksb_kernel_allocated) { mlog(0, "freeing kernel-allocated lksb\n"); kfree(lock->lksb); } kmem_cache_free(dlm_lock_cache, lock); } /* associate a lock with it's lockres, getting a ref on the lockres */ void dlm_lock_attach_lockres(struct dlm_lock *lock, struct dlm_lock_resource *res) { dlm_lockres_get(res); lock->lockres = res; } /* drop ref on lockres, if there is still one associated with lock */ static void dlm_lock_detach_lockres(struct dlm_lock *lock) { struct dlm_lock_resource *res; res = lock->lockres; if (res) { lock->lockres = NULL; mlog(0, "removing lock's lockres reference\n"); dlm_lockres_put(res); } } static void dlm_init_lock(struct dlm_lock *newlock, int type, u8 node, u64 cookie) { INIT_LIST_HEAD(&newlock->list); INIT_LIST_HEAD(&newlock->ast_list); INIT_LIST_HEAD(&newlock->bast_list); spin_lock_init(&newlock->spinlock); newlock->ml.type = type; newlock->ml.convert_type = LKM_IVMODE; newlock->ml.highest_blocked = LKM_IVMODE; newlock->ml.node = node; newlock->ml.pad1 = 0; newlock->ml.list = 0; newlock->ml.flags = 0; newlock->ast = NULL; newlock->bast = NULL; newlock->astdata = NULL; newlock->ml.cookie = cpu_to_be64(cookie); newlock->ast_pending = 0; newlock->bast_pending = 0; newlock->convert_pending = 0; newlock->lock_pending = 0; newlock->unlock_pending = 0; newlock->cancel_pending = 0; newlock->lksb_kernel_allocated = 0; kref_init(&newlock->lock_refs); } struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, struct dlm_lockstatus *lksb) { struct dlm_lock *lock; int kernel_allocated = 0; lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS); if (!lock) return NULL; if (!lksb) { /* zero memory only if kernel-allocated */ lksb = kzalloc(sizeof(*lksb), GFP_NOFS); if (!lksb) { kfree(lock); return NULL; } kernel_allocated = 1; } dlm_init_lock(lock, type, node, cookie); if (kernel_allocated) lock->lksb_kernel_allocated = 1; lock->lksb = lksb; lksb->lockid = lock; return lock; } /* handler for lock creation net message * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED */ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; struct dlm_lock_resource *res = NULL; struct dlm_lock *newlock = NULL; struct dlm_lockstatus *lksb = NULL; enum dlm_status status = DLM_NORMAL; char *name; unsigned int namelen; BUG_ON(!dlm); if (!dlm_grab(dlm)) return DLM_REJECTED; name = create->name; namelen = create->namelen; status = DLM_REJECTED; if (!dlm_domain_fully_joined(dlm)) { mlog(ML_ERROR, "Domain %s not fully joined, but node %u is " "sending a create_lock message for lock %.*s!\n", dlm->name, create->node_idx, namelen, name); dlm_error(status); goto leave; } status = DLM_IVBUFLEN; if (namelen > DLM_LOCKID_NAME_MAX) { dlm_error(status); goto leave; } status = DLM_SYSERR; newlock = dlm_new_lock(create->requested_type, create->node_idx, be64_to_cpu(create->cookie), NULL); if (!newlock) { dlm_error(status); goto leave; } lksb = newlock->lksb; if (be32_to_cpu(create->flags) & LKM_GET_LVB) { lksb->flags |= DLM_LKSB_GET_LVB; mlog(0, "set DLM_LKSB_GET_LVB flag\n"); } status = DLM_IVLOCKID; res = dlm_lookup_lockres(dlm, name, namelen); if (!res) { dlm_error(status); goto leave; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); spin_unlock(&res->spinlock); if (status != DLM_NORMAL) { mlog(0, "lockres recovering/migrating/in-progress\n"); goto leave; } dlm_lock_attach_lockres(newlock, res); status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); leave: if (status != DLM_NORMAL) if (newlock) dlm_lock_put(newlock); if (res) dlm_lockres_put(res); dlm_put(dlm); return status; } /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */ static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) { u64 tmpnode = node_num; /* shift single byte of node num into top 8 bits */ tmpnode <<= 56; spin_lock(&dlm_cookie_lock); *cookie = (dlm_next_cookie | tmpnode); if (++dlm_next_cookie & 0xff00000000000000ull) { mlog(0, "This node's cookie will now wrap!\n"); dlm_next_cookie = 1; } spin_unlock(&dlm_cookie_lock); } enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, struct dlm_lockstatus *lksb, int flags, const char *name, int namelen, dlm_astlockfunc_t *ast, void *data, dlm_bastlockfunc_t *bast) { enum dlm_status status; struct dlm_lock_resource *res = NULL; struct dlm_lock *lock = NULL; int convert = 0, recovery = 0; /* yes this function is a mess. * TODO: clean this up. lots of common code in the * lock and convert paths, especially in the retry blocks */ if (!lksb) { dlm_error(DLM_BADARGS); return DLM_BADARGS; } status = DLM_BADPARAM; if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) { dlm_error(status); goto error; } if (flags & ~LKM_VALID_FLAGS) { dlm_error(status); goto error; } convert = (flags & LKM_CONVERT); recovery = (flags & LKM_RECOVERY); if (recovery && (!dlm_is_recovery_lock(name, namelen) || convert) ) { dlm_error(status); goto error; } if (convert && (flags & LKM_LOCAL)) { mlog(ML_ERROR, "strange LOCAL convert request!\n"); goto error; } if (convert) { /* CONVERT request */ /* if converting, must pass in a valid dlm_lock */ lock = lksb->lockid; if (!lock) { mlog(ML_ERROR, "NULL lock pointer in convert " "request\n"); goto error; } res = lock->lockres; if (!res) { mlog(ML_ERROR, "NULL lockres pointer in convert " "request\n"); goto error; } dlm_lockres_get(res); /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are * static after the original lock call. convert requests will * ensure that everything is the same, or return DLM_BADARGS. * this means that DLM_DENIED_NOASTS will never be returned. */ if (lock->lksb != lksb || lock->ast != ast || lock->bast != bast || lock->astdata != data) { status = DLM_BADARGS; mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, " "astdata=%p\n", lksb, ast, bast, data); mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, " "astdata=%p\n", lock->lksb, lock->ast, lock->bast, lock->astdata); goto error; } retry_convert: dlm_wait_for_recovery(dlm); if (res->owner == dlm->node_num) status = dlmconvert_master(dlm, res, lock, flags, mode); else status = dlmconvert_remote(dlm, res, lock, flags, mode); if (status == DLM_RECOVERING || status == DLM_MIGRATING || status == DLM_FORWARD) { /* for now, see how this works without sleeping * and just retry right away. I suspect the reco * or migration will complete fast enough that * no waiting will be necessary */ mlog(0, "retrying convert with migration/recovery/" "in-progress\n"); msleep(100); goto retry_convert; } } else { u64 tmpcookie; /* LOCK request */ status = DLM_BADARGS; if (!name) { dlm_error(status); goto error; } status = DLM_IVBUFLEN; if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) { dlm_error(status); goto error; } dlm_get_next_cookie(dlm->node_num, &tmpcookie); lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); if (!lock) { dlm_error(status); goto error; } if (!recovery) dlm_wait_for_recovery(dlm); /* find or create the lock resource */ res = dlm_get_lock_resource(dlm, name, namelen, flags); if (!res) { status = DLM_IVLOCKID; dlm_error(status); goto error; } mlog(0, "type=%d, flags = 0x%x\n", mode, flags); mlog(0, "creating lock: lock=%p res=%p\n", lock, res); dlm_lock_attach_lockres(lock, res); lock->ast = ast; lock->bast = bast; lock->astdata = data; retry_lock: if (flags & LKM_VALBLK) { mlog(0, "LKM_VALBLK passed by caller\n"); /* LVB requests for non PR, PW or EX locks are * ignored. */ if (mode < LKM_PRMODE) flags &= ~LKM_VALBLK; else { flags |= LKM_GET_LVB; lock->lksb->flags |= DLM_LKSB_GET_LVB; } } if (res->owner == dlm->node_num) status = dlmlock_master(dlm, res, lock, flags); else status = dlmlock_remote(dlm, res, lock, flags); if (status == DLM_RECOVERING || status == DLM_MIGRATING || status == DLM_FORWARD) { mlog(0, "retrying lock with migration/" "recovery/in progress\n"); msleep(100); /* no waiting for dlm_reco_thread */ if (recovery) { if (status != DLM_RECOVERING) goto retry_lock; mlog(0, "%s: got RECOVERING " "for $RECOVERY lock, master " "was %u\n", dlm->name, res->owner); /* wait to see the node go down, then * drop down and allow the lockres to * get cleaned up. need to remaster. */ dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); } else { dlm_wait_for_recovery(dlm); goto retry_lock; } } if (status != DLM_NORMAL) { lock->lksb->flags &= ~DLM_LKSB_GET_LVB; if (status != DLM_NOTQUEUED) dlm_error(status); goto error; } } error: if (status != DLM_NORMAL) { if (lock && !convert) dlm_lock_put(lock); // this is kind of unnecessary lksb->status = status; } /* put lockres ref from the convert path * or from dlm_get_lock_resource */ if (res) dlm_lockres_put(res); return status; } EXPORT_SYMBOL_GPL(dlmlock);
gpl-2.0
litepro/DK_S2_ICS_KERNEL
arch/x86/boot/compressed/mkpiggy.c
3213
2833
/* ----------------------------------------------------------------------- * * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * H. Peter Anvin <hpa@linux.intel.com> * * ----------------------------------------------------------------------- */ /* * Compute the desired load offset from a compressed program; outputs * a small assembly wrapper with the appropriate symbols defined. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <inttypes.h> static uint32_t getle32(const void *p) { const uint8_t *cp = p; return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) + ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24); } int main(int argc, char *argv[]) { uint32_t olen; long ilen; unsigned long offs; FILE *f; if (argc < 2) { fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); return 1; } /* Get the information for the compressed kernel image first */ f = fopen(argv[1], "r"); if (!f) { perror(argv[1]); return 1; } if (fseek(f, -4L, SEEK_END)) { perror(argv[1]); } if (fread(&olen, sizeof(olen), 1, f) != 1) { perror(argv[1]); return 1; } ilen = ftell(f); olen = getle32(&olen); fclose(f); /* * Now we have the input (compressed) and output (uncompressed) * sizes, compute the necessary decompression offset... */ offs = (olen > ilen) ? olen - ilen : 0; offs += olen >> 12; /* Add 8 bytes for each 32K block */ offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); printf("z_input_len = %lu\n", ilen); printf(".globl z_output_len\n"); printf("z_output_len = %lu\n", (unsigned long)olen); printf(".globl z_extract_offset\n"); printf("z_extract_offset = 0x%lx\n", offs); /* z_extract_offset_negative allows simplification of head_32.S */ printf(".globl z_extract_offset_negative\n"); printf("z_extract_offset_negative = -0x%lx\n", offs); printf(".globl input_data, input_data_end\n"); printf("input_data:\n"); printf(".incbin \"%s\"\n", argv[1]); printf("input_data_end:\n"); return 0; }
gpl-2.0
NoelMacwan/SXDNickiSS
drivers/net/wireless/p54/txrx.c
3981
25900
/* * Common code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de> * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/export.h> #include <linux/init.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <asm/div64.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" #ifdef P54_MM_DEBUG static void p54_dump_tx_queue(struct p54_common *priv) { unsigned long flags; struct ieee80211_tx_info *info; struct p54_tx_info *range; struct sk_buff *skb; struct p54_hdr *hdr; unsigned int i = 0; u32 prev_addr; u32 largest_hole = 0, free; spin_lock_irqsave(&priv->tx_queue.lock, flags); wiphy_debug(priv->hw->wiphy, "/ --- tx queue dump (%d entries) ---\n", skb_queue_len(&priv->tx_queue)); prev_addr = priv->rx_start; skb_queue_walk(&priv->tx_queue, skb) { info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; hdr = (void *) skb->data; free = range->start_addr - prev_addr; wiphy_debug(priv->hw->wiphy, "| [%02d] => [skb:%p skb_len:0x%04x " "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} " "mem:{start:%04x end:%04x, free:%d}]\n", i++, skb, skb->len, le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len), le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type), range->start_addr, range->end_addr, free); prev_addr = range->end_addr; largest_hole = max(largest_hole, free); } free = priv->rx_end - prev_addr; largest_hole = max(largest_hole, free); wiphy_debug(priv->hw->wiphy, "\\ --- [free: %d], largest free block: %d ---\n", free, largest_hole); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); } #endif /* P54_MM_DEBUG */ /* * So, the firmware is somewhat stupid and doesn't know what places in its * memory incoming data should go to. By poking around in the firmware, we * can find some unused memory to upload our packets to. However, data that we * want the card to TX needs to stay intact until the card has told us that * it is done with it. This function finds empty places we can upload to and * marks allocated areas as reserved if necessary. p54_find_and_unlink_skb or * p54_free_skb frees allocated areas. */ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb) { struct sk_buff *entry, *target_skb = NULL; struct ieee80211_tx_info *info; struct p54_tx_info *range; struct p54_hdr *data = (void *) skb->data; unsigned long flags; u32 last_addr = priv->rx_start; u32 target_addr = priv->rx_start; u16 len = priv->headroom + skb->len + priv->tailroom + 3; info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; len = (range->extra_len + len) & ~0x3; spin_lock_irqsave(&priv->tx_queue.lock, flags); if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) { /* * The tx_queue is now really full. * * TODO: check if the device has crashed and reset it. */ spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return -EBUSY; } skb_queue_walk(&priv->tx_queue, entry) { u32 hole_size; info = IEEE80211_SKB_CB(entry); range = (void *) info->rate_driver_data; hole_size = range->start_addr - last_addr; if (!target_skb && hole_size >= len) { target_skb = entry->prev; hole_size -= len; target_addr = last_addr; break; } last_addr = range->end_addr; } if (unlikely(!target_skb)) { if (priv->rx_end - last_addr >= len) { target_skb = priv->tx_queue.prev; if (!skb_queue_empty(&priv->tx_queue)) { info = IEEE80211_SKB_CB(target_skb); range = (void *)info->rate_driver_data; target_addr = range->end_addr; } } else { spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return -ENOSPC; } } info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; range->start_addr = target_addr; range->end_addr = target_addr + len; data->req_id = cpu_to_le32(target_addr + priv->headroom); if (IS_DATA_FRAME(skb) && unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) priv->beacon_req_id = data->req_id; __skb_queue_after(&priv->tx_queue, target_skb, skb); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return 0; } static void p54_tx_pending(struct p54_common *priv) { struct sk_buff *skb; int ret; skb = skb_dequeue(&priv->tx_pending); if (unlikely(!skb)) return ; ret = p54_assign_address(priv, skb); if (unlikely(ret)) skb_queue_head(&priv->tx_pending, skb); else priv->tx(priv->hw, skb); } static void p54_wake_queues(struct p54_common *priv) { unsigned long flags; unsigned int i; if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; p54_tx_pending(priv); spin_lock_irqsave(&priv->tx_stats_lock, flags); for (i = 0; i < priv->hw->queues; i++) { if (priv->tx_stats[i + P54_QUEUE_DATA].len < priv->tx_stats[i + P54_QUEUE_DATA].limit) ieee80211_wake_queue(priv->hw, i); } spin_unlock_irqrestore(&priv->tx_stats_lock, flags); } static int p54_tx_qos_accounting_alloc(struct p54_common *priv, struct sk_buff *skb, const u16 p54_queue) { struct p54_tx_queue_stats *queue; unsigned long flags; if (WARN_ON(p54_queue >= P54_QUEUE_NUM)) return -EINVAL; queue = &priv->tx_stats[p54_queue]; spin_lock_irqsave(&priv->tx_stats_lock, flags); if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) { spin_unlock_irqrestore(&priv->tx_stats_lock, flags); return -ENOSPC; } queue->len++; queue->count++; if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) { u16 ac_queue = p54_queue - P54_QUEUE_DATA; ieee80211_stop_queue(priv->hw, ac_queue); } spin_unlock_irqrestore(&priv->tx_stats_lock, flags); return 0; } static void p54_tx_qos_accounting_free(struct p54_common *priv, struct sk_buff *skb) { if (IS_DATA_FRAME(skb)) { unsigned long flags; spin_lock_irqsave(&priv->tx_stats_lock, flags); priv->tx_stats[GET_HW_QUEUE(skb)].len--; spin_unlock_irqrestore(&priv->tx_stats_lock, flags); if (unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) { if (priv->beacon_req_id == GET_REQ_ID(skb)) { /* this is the active beacon set anymore */ priv->beacon_req_id = 0; } complete(&priv->beacon_comp); } } p54_wake_queues(priv); } void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; if (unlikely(!skb)) return ; skb_unlink(skb, &priv->tx_queue); p54_tx_qos_accounting_free(priv, skb); ieee80211_free_txskb(dev, skb); } EXPORT_SYMBOL_GPL(p54_free_skb); static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv, const __le32 req_id) { struct sk_buff *entry; unsigned long flags; spin_lock_irqsave(&priv->tx_queue.lock, flags); skb_queue_walk(&priv->tx_queue, entry) { struct p54_hdr *hdr = (struct p54_hdr *) entry->data; if (hdr->req_id == req_id) { __skb_unlink(entry, &priv->tx_queue); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); p54_tx_qos_accounting_free(priv, entry); return entry; } } spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return NULL; } void p54_tx(struct p54_common *priv, struct sk_buff *skb) { skb_queue_tail(&priv->tx_pending, skb); p54_tx_pending(priv); } static int p54_rssi_to_dbm(struct p54_common *priv, int rssi) { if (priv->rxhw != 5) { return ((rssi * priv->cur_rssi->mul) / 64 + priv->cur_rssi->add) / 4; } else { /* * TODO: find the correct formula */ return rssi / 2 - 110; } } /* * Even if the firmware is capable of dealing with incoming traffic, * while dozing, we have to prepared in case mac80211 uses PS-POLL * to retrieve outstanding frames from our AP. * (see comment in net/mac80211/mlme.c @ line 1993) */ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *) skb->data; struct ieee80211_tim_ie *tim_ie; u8 *tim; u8 tim_len; bool new_psm; /* only beacons have a TIM IE */ if (!ieee80211_is_beacon(hdr->frame_control)) return; if (!priv->aid) return; /* only consider beacons from the associated BSSID */ if (compare_ether_addr(hdr->addr3, priv->bssid)) return; tim = p54_find_ie(skb, WLAN_EID_TIM); if (!tim) return; tim_len = tim[1]; tim_ie = (struct ieee80211_tim_ie *) &tim[2]; new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid); if (new_psm != priv->powersave_override) { priv->powersave_override = new_psm; p54_set_ps(priv); } } static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) { struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); u16 freq = le16_to_cpu(hdr->freq); size_t header_len = sizeof(*hdr); u32 tsf32; u8 rate = hdr->rate & 0xf; /* * If the device is in a unspecified state we have to * ignore all data frames. Else we could end up with a * nasty crash. */ if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return 0; if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD))) return 0; if (hdr->decrypt_status == P54_DECRYPT_OK) rx_status->flag |= RX_FLAG_DECRYPTED; if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) || (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP)) rx_status->flag |= RX_FLAG_MMIC_ERROR; rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); if (hdr->rate & 0x10) rx_status->flag |= RX_FLAG_SHORTPRE; if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ) rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; else rx_status->rate_idx = rate; rx_status->freq = freq; rx_status->band = priv->hw->conf.channel->band; rx_status->antenna = hdr->antenna; tsf32 = le32_to_cpu(hdr->tsf32); if (tsf32 < priv->tsf_low32) priv->tsf_high32++; rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; priv->tsf_low32 = tsf32; rx_status->flag |= RX_FLAG_MACTIME_MPDU; if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) header_len += hdr->align[0]; skb_pull(skb, header_len); skb_trim(skb, le16_to_cpu(hdr->len)); if (unlikely(priv->hw->conf.flags & IEEE80211_CONF_PS)) p54_pspoll_workaround(priv, skb); ieee80211_rx_irqsafe(priv->hw, skb); ieee80211_queue_delayed_work(priv->hw, &priv->work, msecs_to_jiffies(P54_STATISTICS_UPDATE)); return -1; } static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data; struct ieee80211_tx_info *info; struct p54_hdr *entry_hdr; struct p54_tx_data *entry_data; struct sk_buff *entry; unsigned int pad = 0, frame_len; int count, idx; entry = p54_find_and_unlink_skb(priv, hdr->req_id); if (unlikely(!entry)) return ; frame_len = entry->len; info = IEEE80211_SKB_CB(entry); entry_hdr = (struct p54_hdr *) entry->data; entry_data = (struct p54_tx_data *) entry_hdr->data; priv->stats.dot11ACKFailureCount += payload->tries - 1; /* * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are * generated by the driver. Therefore tx_status is bogus * and we don't want to confuse the mac80211 stack. */ if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) { dev_kfree_skb_any(entry); return ; } /* * Clear manually, ieee80211_tx_info_clear_status would * clear the counts too and we need them. */ memset(&info->status.ampdu_ack_len, 0, sizeof(struct ieee80211_tx_info) - offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23); if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) pad = entry_data->align[0]; /* walk through the rates array and adjust the counts */ count = payload->tries; for (idx = 0; idx < 4; idx++) { if (count >= info->status.rates[idx].count) { count -= info->status.rates[idx].count; } else if (count > 0) { info->status.rates[idx].count = count; count = 0; } else { info->status.rates[idx].idx = -1; info->status.rates[idx].count = 0; } } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !(payload->status & P54_TX_FAILED)) info->flags |= IEEE80211_TX_STAT_ACK; if (payload->status & P54_TX_PSM_CANCELLED) info->flags |= IEEE80211_TX_STAT_TX_FILTERED; info->status.ack_signal = p54_rssi_to_dbm(priv, (int)payload->ack_rssi); /* Undo all changes to the frame. */ switch (entry_data->key_type) { case P54_CRYPTO_TKIPMICHAEL: { u8 *iv = (u8 *)(entry_data->align + pad + entry_data->crypt_offset); /* Restore the original TKIP IV. */ iv[2] = iv[0]; iv[0] = iv[1]; iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */ break; } case P54_CRYPTO_AESCCMP: frame_len -= 8; /* remove CCMP_MIC */ break; case P54_CRYPTO_WEP: frame_len -= 4; /* remove WEP_ICV */ break; } skb_trim(entry, frame_len); skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); ieee80211_tx_status_irqsafe(priv->hw, entry); } static void p54_rx_eeprom_readback(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data; struct sk_buff *tmp; if (!priv->eeprom) return ; if (priv->fw_var >= 0x509) { memcpy(priv->eeprom, eeprom->v2.data, le16_to_cpu(eeprom->v2.len)); } else { memcpy(priv->eeprom, eeprom->v1.data, le16_to_cpu(eeprom->v1.len)); } priv->eeprom = NULL; tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); complete(&priv->eeprom_comp); } static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_statistics *stats = (struct p54_statistics *) hdr->data; struct sk_buff *tmp; struct ieee80211_channel *chan; unsigned int i, rssi, tx, cca, dtime, dtotal, dcca, dtx, drssi, unit; u32 tsf32; if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; tsf32 = le32_to_cpu(stats->tsf32); if (tsf32 < priv->tsf_low32) priv->tsf_high32++; priv->tsf_low32 = tsf32; priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail); priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success); priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs); priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise)); /* * STSW450X LMAC API page 26 - 3.8 Statistics * "The exact measurement period can be derived from the * timestamp member". */ dtime = tsf32 - priv->survey_raw.timestamp; /* * STSW450X LMAC API page 26 - 3.8.1 Noise histogram * The LMAC samples RSSI, CCA and transmit state at regular * periods (typically 8 times per 1k [as in 1024] usec). */ cca = le32_to_cpu(stats->sample_cca); tx = le32_to_cpu(stats->sample_tx); rssi = 0; for (i = 0; i < ARRAY_SIZE(stats->sample_noise); i++) rssi += le32_to_cpu(stats->sample_noise[i]); dcca = cca - priv->survey_raw.cached_cca; drssi = rssi - priv->survey_raw.cached_rssi; dtx = tx - priv->survey_raw.cached_tx; dtotal = dcca + drssi + dtx; /* * update statistics when more than a second is over since the * last call, or when a update is badly needed. */ if (dtotal && (priv->update_stats || dtime >= USEC_PER_SEC) && dtime >= dtotal) { priv->survey_raw.timestamp = tsf32; priv->update_stats = false; unit = dtime / dtotal; if (dcca) { priv->survey_raw.cca += dcca * unit; priv->survey_raw.cached_cca = cca; } if (dtx) { priv->survey_raw.tx += dtx * unit; priv->survey_raw.cached_tx = tx; } if (drssi) { priv->survey_raw.rssi += drssi * unit; priv->survey_raw.cached_rssi = rssi; } /* 1024 usec / 8 times = 128 usec / time */ if (!(priv->phy_ps || priv->phy_idle)) priv->survey_raw.active += dtotal * unit; else priv->survey_raw.active += (dcca + dtx) * unit; } chan = priv->curchan; if (chan) { struct survey_info *survey = &priv->survey[chan->hw_value]; survey->noise = clamp_t(s8, priv->noise, -128, 127); survey->channel_time = priv->survey_raw.active; survey->channel_time_tx = priv->survey_raw.tx; survey->channel_time_busy = priv->survey_raw.tx + priv->survey_raw.cca; do_div(survey->channel_time, 1024); do_div(survey->channel_time_tx, 1024); do_div(survey->channel_time_busy, 1024); } tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); complete(&priv->stat_comp); } static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_trap *trap = (struct p54_trap *) hdr->data; u16 event = le16_to_cpu(trap->event); u16 freq = le16_to_cpu(trap->frequency); switch (event) { case P54_TRAP_BEACON_TX: break; case P54_TRAP_RADAR: wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq); break; case P54_TRAP_NO_BEACON: if (priv->vif) ieee80211_beacon_loss(priv->vif); break; case P54_TRAP_SCAN: break; case P54_TRAP_TBTT: break; case P54_TRAP_TIMER: break; case P54_TRAP_FAA_RADIO_OFF: wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); break; case P54_TRAP_FAA_RADIO_ON: wiphy_rfkill_set_hw_state(priv->hw->wiphy, false); break; default: wiphy_info(priv->hw->wiphy, "received event:%x freq:%d\n", event, freq); break; } } static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; switch (le16_to_cpu(hdr->type)) { case P54_CONTROL_TYPE_TXDONE: p54_rx_frame_sent(priv, skb); break; case P54_CONTROL_TYPE_TRAP: p54_rx_trap(priv, skb); break; case P54_CONTROL_TYPE_BBP: break; case P54_CONTROL_TYPE_STAT_READBACK: p54_rx_stats(priv, skb); break; case P54_CONTROL_TYPE_EEPROM_READBACK: p54_rx_eeprom_readback(priv, skb); break; default: wiphy_debug(priv->hw->wiphy, "not handling 0x%02x type control frame\n", le16_to_cpu(hdr->type)); break; } return 0; } /* returns zero if skb can be reused */ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; u16 type = le16_to_cpu(*((__le16 *)skb->data)); if (type & P54_HDR_FLAG_CONTROL) return p54_rx_control(priv, skb); else return p54_rx_data(priv, skb); } EXPORT_SYMBOL_GPL(p54_rx); static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, struct ieee80211_tx_info *info, u8 *queue, u32 *extra_len, u16 *flags, u16 *aid, bool *burst_possible) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(hdr->frame_control)) *burst_possible = true; else *burst_possible = false; if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA; switch (priv->mode) { case NL80211_IFTYPE_MONITOR: /* * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for * every frame in promiscuous/monitor mode. * see STSW45x0C LMAC API - page 12. */ *aid = 0; *flags |= P54_HDR_FLAG_DATA_OUT_PROMISC; break; case NL80211_IFTYPE_STATION: *aid = 1; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { *aid = 0; *queue = P54_QUEUE_CAB; return; } if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) { if (ieee80211_is_probe_resp(hdr->frame_control)) { *aid = 0; *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP | P54_HDR_FLAG_DATA_OUT_NOCANCEL; return; } else if (ieee80211_is_beacon(hdr->frame_control)) { *aid = 0; if (info->flags & IEEE80211_TX_CTL_INJECTED) { /* * Injecting beacons on top of a AP is * not a good idea... nevertheless, * it should be doable. */ return; } *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP; *queue = P54_QUEUE_BEACON; *extra_len = IEEE80211_MAX_TIM_LEN; return; } } if (info->control.sta) *aid = info->control.sta->aid; break; } } static u8 p54_convert_algo(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return P54_CRYPTO_WEP; case WLAN_CIPHER_SUITE_TKIP: return P54_CRYPTO_TKIPMICHAEL; case WLAN_CIPHER_SUITE_CCMP: return P54_CRYPTO_AESCCMP; default: return 0; } } void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct p54_tx_info *p54info; struct p54_hdr *hdr; struct p54_tx_data *txhdr; unsigned int padding, len, extra_len = 0; int i, j, ridx; u16 hdr_flags = 0, aid = 0; u8 rate, queue = 0, crypt_offset = 0; u8 cts_rate = 0x20; u8 rc_flags; u8 calculated_tries[4]; u8 nrates = 0, nremaining = 8; bool burst_allowed = false; p54_tx_80211_header(priv, skb, info, &queue, &extra_len, &hdr_flags, &aid, &burst_allowed); if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { ieee80211_free_txskb(dev, skb); return; } padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; len = skb->len; if (info->control.hw_key) { crypt_offset = ieee80211_get_hdrlen_from_skb(skb); if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { u8 *iv = (u8 *)(skb->data + crypt_offset); /* * The firmware excepts that the IV has to have * this special format */ iv[1] = iv[0]; iv[0] = iv[2]; iv[2] = 0; } } txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding); hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr)); if (padding) hdr_flags |= P54_HDR_FLAG_DATA_ALIGN; hdr->type = cpu_to_le16(aid); hdr->rts_tries = info->control.rates[0].count; /* * we register the rates in perfect order, and * RTS/CTS won't happen on 5 GHz */ cts_rate = info->control.rts_cts_rate_idx; memset(&txhdr->rateset, 0, sizeof(txhdr->rateset)); /* see how many rates got used */ for (i = 0; i < dev->max_rates; i++) { if (info->control.rates[i].idx < 0) break; nrates++; } /* limit tries to 8/nrates per rate */ for (i = 0; i < nrates; i++) { /* * The magic expression here is equivalent to 8/nrates for * all values that matter, but avoids division and jumps. * Note that nrates can only take the values 1 through 4. */ calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1, info->control.rates[i].count); nremaining -= calculated_tries[i]; } /* if there are tries left, distribute from back to front */ for (i = nrates - 1; nremaining > 0 && i >= 0; i--) { int tmp = info->control.rates[i].count - calculated_tries[i]; if (tmp <= 0) continue; /* RC requested more tries at this rate */ tmp = min_t(int, tmp, nremaining); calculated_tries[i] += tmp; nremaining -= tmp; } ridx = 0; for (i = 0; i < nrates && ridx < 8; i++) { /* we register the rates in perfect order */ rate = info->control.rates[i].idx; if (info->band == IEEE80211_BAND_5GHZ) rate += 4; /* store the count we actually calculated for TX status */ info->control.rates[i].count = calculated_tries[i]; rc_flags = info->control.rates[i].flags; if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) { rate |= 0x10; cts_rate |= 0x10; } if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { burst_allowed = false; rate |= 0x40; } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { rate |= 0x20; burst_allowed = false; } for (j = 0; j < calculated_tries[i] && ridx < 8; j++) { txhdr->rateset[ridx] = rate; ridx++; } } if (burst_allowed) hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST; /* TODO: enable bursting */ hdr->flags = cpu_to_le16(hdr_flags); hdr->tries = ridx; txhdr->rts_rate_idx = 0; if (info->control.hw_key) { txhdr->key_type = p54_convert_algo(info->control.hw_key->cipher); txhdr->key_len = min((u8)16, info->control.hw_key->keylen); memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len); if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { /* reserve space for the MIC key */ len += 8; memcpy(skb_put(skb, 8), &(info->control.hw_key->key [NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8); } /* reserve some space for ICV */ len += info->control.hw_key->icv_len; memset(skb_put(skb, info->control.hw_key->icv_len), 0, info->control.hw_key->icv_len); } else { txhdr->key_type = 0; txhdr->key_len = 0; } txhdr->crypt_offset = crypt_offset; txhdr->hw_queue = queue; txhdr->backlog = priv->tx_stats[queue].len - 1; memset(txhdr->durations, 0, sizeof(txhdr->durations)); txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ? 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask; if (priv->rxhw == 5) { txhdr->longbow.cts_rate = cts_rate; txhdr->longbow.output_power = cpu_to_le16(priv->output_power); } else { txhdr->normal.output_power = priv->output_power; txhdr->normal.cts_rate = cts_rate; } if (padding) txhdr->align[0] = padding; hdr->len = cpu_to_le16(len); /* modifies skb->cb and with it info, so must be last! */ p54info = (void *) info->rate_driver_data; p54info->extra_len = extra_len; p54_tx(priv, skb); }
gpl-2.0
JijonHyuni/HyperKernel-JB
drivers/input/mouse/amimouse.c
4237
3618
/* * Amiga mouse driver for Linux/m68k * * Copyright (c) 2000-2002 Vojtech Pavlik * * Based on the work of: * Michael Rausch James Banks * Matther Dillon David Giller * Nathan Laredo Linus Torvalds * Johan Myreen Jes Sorensen * Russell King */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation */ #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/irq.h> #include <asm/setup.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/amigahw.h> #include <asm/amigaints.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Amiga mouse driver"); MODULE_LICENSE("GPL"); static int amimouse_lastx, amimouse_lasty; static irqreturn_t amimouse_interrupt(int irq, void *data) { struct input_dev *dev = data; unsigned short joy0dat, potgor; int nx, ny, dx, dy; joy0dat = amiga_custom.joy0dat; nx = joy0dat & 0xff; ny = joy0dat >> 8; dx = nx - amimouse_lastx; dy = ny - amimouse_lasty; if (dx < -127) dx = (256 + nx) - amimouse_lastx; if (dx > 127) dx = (nx - 256) - amimouse_lastx; if (dy < -127) dy = (256 + ny) - amimouse_lasty; if (dy > 127) dy = (ny - 256) - amimouse_lasty; amimouse_lastx = nx; amimouse_lasty = ny; potgor = amiga_custom.potgor; input_report_rel(dev, REL_X, dx); input_report_rel(dev, REL_Y, dy); input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40); input_report_key(dev, BTN_MIDDLE, potgor & 0x0100); input_report_key(dev, BTN_RIGHT, potgor & 0x0400); input_sync(dev); return IRQ_HANDLED; } static int amimouse_open(struct input_dev *dev) { unsigned short joy0dat; int error; joy0dat = amiga_custom.joy0dat; amimouse_lastx = joy0dat & 0xff; amimouse_lasty = joy0dat >> 8; error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", dev); if (error) dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB); return error; } static void amimouse_close(struct input_dev *dev) { free_irq(IRQ_AMIGA_VERTB, dev); } static int __init amimouse_probe(struct platform_device *pdev) { int err; struct input_dev *dev; dev = input_allocate_device(); if (!dev) return -ENOMEM; dev->name = pdev->name; dev->phys = "amimouse/input0"; dev->id.bustype = BUS_AMIGA; dev->id.vendor = 0x0001; dev->id.product = 0x0002; dev->id.version = 0x0100; dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); dev->open = amimouse_open; dev->close = amimouse_close; dev->dev.parent = &pdev->dev; err = input_register_device(dev); if (err) { input_free_device(dev); return err; } platform_set_drvdata(pdev, dev); return 0; } static int __exit amimouse_remove(struct platform_device *pdev) { struct input_dev *dev = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); input_unregister_device(dev); return 0; } static struct platform_driver amimouse_driver = { .remove = __exit_p(amimouse_remove), .driver = { .name = "amiga-mouse", .owner = THIS_MODULE, }, }; static int __init amimouse_init(void) { return platform_driver_probe(&amimouse_driver, amimouse_probe); } module_init(amimouse_init); static void __exit amimouse_exit(void) { platform_driver_unregister(&amimouse_driver); } module_exit(amimouse_exit); MODULE_ALIAS("platform:amiga-mouse");
gpl-2.0
superr/android_kernel_lge_w5c
arch/arm/mach-mmp/flint.c
5005
2678
/* * linux/arch/arm/mach-mmp/flint.c * * Support for the Marvell Flint Development Platform. * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/smc91x.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/addr-map.h> #include <mach/mfp-mmp2.h> #include <mach/mmp2.h> #include <mach/irqs.h> #include "common.h" #define FLINT_NR_IRQS (MMP_NR_IRQS + 48) static unsigned long flint_pin_config[] __initdata = { /* UART1 */ GPIO45_UART1_RXD, GPIO46_UART1_TXD, /* UART2 */ GPIO47_UART2_RXD, GPIO48_UART2_TXD, /* SMC */ GPIO151_SMC_SCLK, GPIO145_SMC_nCS0, GPIO146_SMC_nCS1, GPIO152_SMC_BE0, GPIO153_SMC_BE1, GPIO154_SMC_IRQ, GPIO113_SMC_RDY, /*Ethernet*/ GPIO155_GPIO, /* DFI */ GPIO168_DFI_D0, GPIO167_DFI_D1, GPIO166_DFI_D2, GPIO165_DFI_D3, GPIO107_DFI_D4, GPIO106_DFI_D5, GPIO105_DFI_D6, GPIO104_DFI_D7, GPIO111_DFI_D8, GPIO164_DFI_D9, GPIO163_DFI_D10, GPIO162_DFI_D11, GPIO161_DFI_D12, GPIO110_DFI_D13, GPIO109_DFI_D14, GPIO108_DFI_D15, GPIO143_ND_nCS0, GPIO144_ND_nCS1, GPIO147_ND_nWE, GPIO148_ND_nRE, GPIO150_ND_ALE, GPIO149_ND_CLE, GPIO112_ND_RDY0, GPIO160_ND_RDY1, }; static struct smc91x_platdata flint_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_resources[] = { [0] = { .start = SMC_CS1_PHYS_BASE + 0x300, .end = SMC_CS1_PHYS_BASE + 0xfffff, .flags = IORESOURCE_MEM, }, [1] = { .start = MMP_GPIO_TO_IRQ(155), .end = MMP_GPIO_TO_IRQ(155), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .dev = { .platform_data = &flint_smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static void __init flint_init(void) { mfp_config(ARRAY_AND_SIZE(flint_pin_config)); /* on-chip devices */ mmp2_add_uart(1); mmp2_add_uart(2); platform_device_register(&mmp2_device_gpio); /* off-chip devices */ platform_device_register(&smc91x_device); } MACHINE_START(FLINT, "Flint Development Platform") .map_io = mmp_map_io, .nr_irqs = FLINT_NR_IRQS, .init_irq = mmp2_init_irq, .timer = &mmp2_timer, .init_machine = flint_init, .restart = mmp_restart, MACHINE_END
gpl-2.0
XMelancholy/android_kernel_sony_u8500
arch/arm/mach-ixp4xx/omixp-setup.c
5005
6349
/* * arch/arm/mach-ixp4xx/omixp-setup.c * * omicron ixp4xx board setup * Copyright (C) 2009 OMICRON electronics GmbH * * based nslu2-setup.c, ixdp425-setup.c: * Copyright (C) 2003-2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #ifdef CONFIG_LEDS_CLASS #include <linux/leds.h> #endif #include <asm/setup.h> #include <asm/memory.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> static struct resource omixp_flash_resources[] = { { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, }, }; static struct mtd_partition omixp_partitions[] = { { .name = "Recovery Bootloader", .size = 0x00020000, .offset = 0, }, { .name = "Calibration Data", .size = 0x00020000, .offset = 0x00020000, }, { .name = "Recovery FPGA", .size = 0x00020000, .offset = 0x00040000, }, { .name = "Release Bootloader", .size = 0x00020000, .offset = 0x00060000, }, { .name = "Release FPGA", .size = 0x00020000, .offset = 0x00080000, }, { .name = "Kernel", .size = 0x00160000, .offset = 0x000a0000, }, { .name = "Filesystem", .size = 0x00C00000, .offset = 0x00200000, }, { .name = "Persistent Storage", .size = 0x00200000, .offset = 0x00E00000, }, }; static struct flash_platform_data omixp_flash_data[] = { { .map_name = "cfi_probe", .parts = omixp_partitions, .nr_parts = ARRAY_SIZE(omixp_partitions), }, { .map_name = "cfi_probe", .parts = NULL, .nr_parts = 0, }, }; static struct platform_device omixp_flash_device[] = { { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &omixp_flash_data[0], }, .resource = &omixp_flash_resources[0], .num_resources = 1, }, { .name = "IXP4XX-Flash", .id = 1, .dev = { .platform_data = &omixp_flash_data[1], }, .resource = &omixp_flash_resources[1], .num_resources = 1, }, }; /* Swap UART's - These boards have the console on UART2. The following * configuration is used: * ttyS0 .. UART2 * ttyS1 .. UART1 * This way standard images can be used with the kernel that expect * the console on ttyS0. */ static struct resource omixp_uart_resources[] = { { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, }; static struct plat_serial8250_port omixp_uart_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { /* list termination */ } }; static struct platform_device omixp_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = omixp_uart_data, .num_resources = 2, .resource = omixp_uart_resources, }; static struct gpio_led mic256_led_pins[] = { { .name = "LED-A", .gpio = 7, }, }; static struct gpio_led_platform_data mic256_led_data = { .num_leds = ARRAY_SIZE(mic256_led_pins), .leds = mic256_led_pins, }; static struct platform_device mic256_leds = { .name = "leds-gpio", .id = -1, .dev.platform_data = &mic256_led_data, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info ixdp425_plat_eth[] = { { .phy = 0, .rxq = 3, .txreadyq = 20, }, { .phy = 1, .rxq = 4, .txreadyq = 21, }, }; static struct platform_device ixdp425_eth[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = ixdp425_plat_eth, }, { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEC, .dev.platform_data = ixdp425_plat_eth + 1, }, }; static struct platform_device *devixp_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &ixdp425_eth[0], &ixdp425_eth[1], }; static struct platform_device *mic256_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &mic256_leds, &ixdp425_eth[0], &ixdp425_eth[1], }; static struct platform_device *miccpt_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &omixp_flash_device[1], &ixdp425_eth[0], &ixdp425_eth[1], }; static void __init omixp_init(void) { ixp4xx_sys_init(); /* 16MiB Boot Flash */ omixp_flash_resources[0].start = IXP4XX_EXP_BUS_BASE(0); omixp_flash_resources[0].end = IXP4XX_EXP_BUS_END(0); /* 32 MiB Data Flash */ omixp_flash_resources[1].start = IXP4XX_EXP_BUS_BASE(2); omixp_flash_resources[1].end = IXP4XX_EXP_BUS_END(2); if (machine_is_devixp()) platform_add_devices(devixp_pldev, ARRAY_SIZE(devixp_pldev)); else if (machine_is_miccpt()) platform_add_devices(miccpt_pldev, ARRAY_SIZE(miccpt_pldev)); else if (machine_is_mic256()) platform_add_devices(mic256_pldev, ARRAY_SIZE(mic256_pldev)); } #ifdef CONFIG_MACH_DEVIXP MACHINE_START(DEVIXP, "Omicron DEVIXP") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = omixp_init, .restart = ixp4xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_MICCPT MACHINE_START(MICCPT, "Omicron MICCPT") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = omixp_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_MIC256 MACHINE_START(MIC256, "Omicron MIC256") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = omixp_init, .restart = ixp4xx_restart, MACHINE_END #endif
gpl-2.0
drewx2/android_kernel_htc_dlx
arch/avr32/mm/cache.c
10125
3817
/* * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/highmem.h> #include <linux/unistd.h> #include <asm/cacheflush.h> #include <asm/cachectl.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/syscalls.h> /* * If you attempt to flush anything more than this, you need superuser * privileges. The value is completely arbitrary. */ #define CACHEFLUSH_MAX_LEN 1024 void invalidate_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz, mask; linesz = boot_cpu_data.dcache.linesz; mask = linesz - 1; /* when first and/or last cachelines are shared, flush them * instead of invalidating ... never discard valid data! */ begin = (unsigned long)start; end = begin + size; if (begin & mask) { flush_dcache_line(start); begin += linesz; } if (end & mask) { flush_dcache_line((void *)end); end &= ~mask; } /* remaining cachelines only need invalidation */ for (v = begin; v < end; v += linesz) invalidate_dcache_line((void *)v); flush_write_buffer(); } void clean_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.dcache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) clean_dcache_line((void *)v); flush_write_buffer(); } void flush_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.dcache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) flush_dcache_line((void *)v); flush_write_buffer(); } void invalidate_icache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.icache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) invalidate_icache_line((void *)v); } static inline void __flush_icache_range(unsigned long start, unsigned long end) { unsigned long v, linesz; linesz = boot_cpu_data.dcache.linesz; for (v = start; v < end; v += linesz) { clean_dcache_line((void *)v); invalidate_icache_line((void *)v); } flush_write_buffer(); } /* * This one is called after a module has been loaded. */ void flush_icache_range(unsigned long start, unsigned long end) { unsigned long linesz; linesz = boot_cpu_data.dcache.linesz; __flush_icache_range(start & ~(linesz - 1), (end + linesz - 1) & ~(linesz - 1)); } /* * This one is called from __do_fault() and do_swap_page(). */ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { if (vma->vm_flags & VM_EXEC) { void *v = page_address(page); __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE); } } asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) { int ret; if (len > CACHEFLUSH_MAX_LEN) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; } ret = -EFAULT; if (!access_ok(VERIFY_WRITE, addr, len)) goto out; switch (operation) { case CACHE_IFLUSH: flush_icache_range((unsigned long)addr, (unsigned long)addr + len); ret = 0; break; default: ret = -EINVAL; } out: return ret; } void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { memcpy(dst, src, len); if (vma->vm_flags & VM_EXEC) flush_icache_range((unsigned long)dst, (unsigned long)dst + len); }
gpl-2.0
kornyone/htc-kernel-doubleshot_old
arch/x86/mm/kmmio.c
11405
15945
/* Support for MMIO probes. * Benfit many code from kprobes * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. * 2007 Alexander Eichner * 2008 Pekka Paalanen <pq@iki.fi> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/list.h> #include <linux/rculist.h> #include <linux/spinlock.h> #include <linux/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/uaccess.h> #include <linux/ptrace.h> #include <linux/preempt.h> #include <linux/percpu.h> #include <linux/kdebug.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <linux/errno.h> #include <asm/debugreg.h> #include <linux/mmiotrace.h> #define KMMIO_PAGE_HASH_BITS 4 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) struct kmmio_fault_page { struct list_head list; struct kmmio_fault_page *release_next; unsigned long page; /* location of the fault page */ pteval_t old_presence; /* page presence prior to arming */ bool armed; /* * Number of times this page has been registered as a part * of a probe. If zero, page is disarmed and this may be freed. * Used only by writers (RCU) and post_kmmio_handler(). * Protected by kmmio_lock, when linked into kmmio_page_table. */ int count; bool scheduled_for_release; }; struct kmmio_delayed_release { struct rcu_head rcu; struct kmmio_fault_page *release_list; }; struct kmmio_context { struct kmmio_fault_page *fpage; struct kmmio_probe *probe; unsigned long saved_flags; unsigned long addr; int active; }; static DEFINE_SPINLOCK(kmmio_lock); /* Protected by kmmio_lock */ unsigned int kmmio_count; /* Read-protected by RCU, write-protected by kmmio_lock. */ static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; static LIST_HEAD(kmmio_probes); static struct list_head *kmmio_page_list(unsigned long page) { return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; } /* Accessed per-cpu */ static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); /* * this is basically a dynamic stabbing problem: * Could use the existing prio tree code or * Possible better implementations: * The Interval Skip List: A Data Structure for Finding All Intervals That * Overlap a Point (might be simple) * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup */ /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ static struct kmmio_probe *get_kmmio_probe(unsigned long addr) { struct kmmio_probe *p; list_for_each_entry_rcu(p, &kmmio_probes, list) { if (addr >= p->addr && addr < (p->addr + p->len)) return p; } return NULL; } /* You must be holding RCU read lock. */ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) { struct list_head *head; struct kmmio_fault_page *f; page &= PAGE_MASK; head = kmmio_page_list(page); list_for_each_entry_rcu(f, head, list) { if (f->page == page) return f; } return NULL; } static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) { pmdval_t v = pmd_val(*pmd); if (clear) { *old = v & _PAGE_PRESENT; v &= ~_PAGE_PRESENT; } else /* presume this has been called with clear==true previously */ v |= *old; set_pmd(pmd, __pmd(v)); } static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) { pteval_t v = pte_val(*pte); if (clear) { *old = v & _PAGE_PRESENT; v &= ~_PAGE_PRESENT; } else /* presume this has been called with clear==true previously */ v |= *old; set_pte_atomic(pte, __pte(v)); } static int clear_page_presence(struct kmmio_fault_page *f, bool clear) { unsigned int level; pte_t *pte = lookup_address(f->page, &level); if (!pte) { pr_err("no pte for page 0x%08lx\n", f->page); return -1; } switch (level) { case PG_LEVEL_2M: clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence); break; case PG_LEVEL_4K: clear_pte_presence(pte, clear, &f->old_presence); break; default: pr_err("unexpected page level 0x%x.\n", level); return -1; } __flush_tlb_one(f->page); return 0; } /* * Mark the given page as not present. Access to it will trigger a fault. * * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the * protection is ignored here. RCU read lock is assumed held, so the struct * will not disappear unexpectedly. Furthermore, the caller must guarantee, * that double arming the same virtual address (page) cannot occur. * * Double disarming on the other hand is allowed, and may occur when a fault * and mmiotrace shutdown happen simultaneously. */ static int arm_kmmio_fault_page(struct kmmio_fault_page *f) { int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n", f->page, f->count, !!f->old_presence); } ret = clear_page_presence(f, true); WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"), f->page); f->armed = true; return ret; } /** Restore the given page to saved presence state. */ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) { int ret = clear_page_presence(f, false); WARN_ONCE(ret < 0, KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page); f->armed = false; } /* * This is being called from do_page_fault(). * * We may be in an interrupt or a critical section. Also prefecthing may * trigger a page fault. We may be in the middle of process switch. * We cannot take any locks, because we could be executing especially * within a kmmio critical section. * * Local interrupts are disabled, so preemption cannot happen. * Do not enable interrupts, do not sleep, and watch out for other CPUs. */ /* * Interrupts are disabled on entry as trap3 is an interrupt gate * and they remain disabled throughout this function. */ int kmmio_handler(struct pt_regs *regs, unsigned long addr) { struct kmmio_context *ctx; struct kmmio_fault_page *faultpage; int ret = 0; /* default to fault not handled */ /* * Preemption is now disabled to prevent process switch during * single stepping. We can only handle one active kmmio trace * per cpu, so ensure that we finish it before something else * gets to run. We also hold the RCU read lock over single * stepping to avoid looking up the probe and kmmio_fault_page * again. */ preempt_disable(); rcu_read_lock(); faultpage = get_kmmio_fault_page(addr); if (!faultpage) { /* * Either this page fault is not caused by kmmio, or * another CPU just pulled the kmmio probe from under * our feet. The latter case should not be possible. */ goto no_kmmio; } ctx = &get_cpu_var(kmmio_ctx); if (ctx->active) { if (addr == ctx->addr) { /* * A second fault on the same page means some other * condition needs handling by do_page_fault(), the * page really not being present is the most common. */ pr_debug("secondary hit for 0x%08lx CPU %d.\n", addr, smp_processor_id()); if (!faultpage->old_presence) pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n", addr, smp_processor_id()); } else { /* * Prevent overwriting already in-flight context. * This should not happen, let's hope disarming at * least prevents a panic. */ pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n", smp_processor_id(), addr); pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); disarm_kmmio_fault_page(faultpage); } goto no_kmmio_ctx; } ctx->active++; ctx->fpage = faultpage; ctx->probe = get_kmmio_probe(addr); ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); ctx->addr = addr; if (ctx->probe && ctx->probe->pre_handler) ctx->probe->pre_handler(ctx->probe, regs, addr); /* * Enable single-stepping and disable interrupts for the faulting * context. Local interrupts must not get enabled during stepping. */ regs->flags |= X86_EFLAGS_TF; regs->flags &= ~X86_EFLAGS_IF; /* Now we set present bit in PTE and single step. */ disarm_kmmio_fault_page(ctx->fpage); /* * If another cpu accesses the same page while we are stepping, * the access will not be caught. It will simply succeed and the * only downside is we lose the event. If this becomes a problem, * the user should drop to single cpu before tracing. */ put_cpu_var(kmmio_ctx); return 1; /* fault handled */ no_kmmio_ctx: put_cpu_var(kmmio_ctx); no_kmmio: rcu_read_unlock(); preempt_enable_no_resched(); return ret; } /* * Interrupts are disabled on entry as trap1 is an interrupt gate * and they remain disabled throughout this function. * This must always get called as the pair to kmmio_handler(). */ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) { int ret = 0; struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); if (!ctx->active) { /* * debug traps without an active context are due to either * something external causing them (f.e. using a debugger while * mmio tracing enabled), or erroneous behaviour */ pr_warning("unexpected debug trap on CPU %d.\n", smp_processor_id()); goto out; } if (ctx->probe && ctx->probe->post_handler) ctx->probe->post_handler(ctx->probe, condition, regs); /* Prevent racing against release_kmmio_fault_page(). */ spin_lock(&kmmio_lock); if (ctx->fpage->count) arm_kmmio_fault_page(ctx->fpage); spin_unlock(&kmmio_lock); regs->flags &= ~X86_EFLAGS_TF; regs->flags |= ctx->saved_flags; /* These were acquired in kmmio_handler(). */ ctx->active--; BUG_ON(ctx->active); rcu_read_unlock(); preempt_enable_no_resched(); /* * if somebody else is singlestepping across a probe point, flags * will have TF set, in which case, continue the remaining processing * of do_debug, as if this is not a probe hit. */ if (!(regs->flags & X86_EFLAGS_TF)) ret = 1; out: put_cpu_var(kmmio_ctx); return ret; } /* You must be holding kmmio_lock. */ static int add_kmmio_fault_page(unsigned long page) { struct kmmio_fault_page *f; page &= PAGE_MASK; f = get_kmmio_fault_page(page); if (f) { if (!f->count) arm_kmmio_fault_page(f); f->count++; return 0; } f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return -1; f->count = 1; f->page = page; if (arm_kmmio_fault_page(f)) { kfree(f); return -1; } list_add_rcu(&f->list, kmmio_page_list(f->page)); return 0; } /* You must be holding kmmio_lock. */ static void release_kmmio_fault_page(unsigned long page, struct kmmio_fault_page **release_list) { struct kmmio_fault_page *f; page &= PAGE_MASK; f = get_kmmio_fault_page(page); if (!f) return; f->count--; BUG_ON(f->count < 0); if (!f->count) { disarm_kmmio_fault_page(f); if (!f->scheduled_for_release) { f->release_next = *release_list; *release_list = f; f->scheduled_for_release = true; } } } /* * With page-unaligned ioremaps, one or two armed pages may contain * addresses from outside the intended mapping. Events for these addresses * are currently silently dropped. The events may result only from programming * mistakes by accessing addresses before the beginning or past the end of a * mapping. */ int register_kmmio_probe(struct kmmio_probe *p) { unsigned long flags; int ret = 0; unsigned long size = 0; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); spin_lock_irqsave(&kmmio_lock, flags); if (get_kmmio_probe(p->addr)) { ret = -EEXIST; goto out; } kmmio_count++; list_add_rcu(&p->list, &kmmio_probes); while (size < size_lim) { if (add_kmmio_fault_page(p->addr + size)) pr_err("Unable to set page fault.\n"); size += PAGE_SIZE; } out: spin_unlock_irqrestore(&kmmio_lock, flags); /* * XXX: What should I do here? * Here was a call to global_flush_tlb(), but it does not exist * anymore. It seems it's not needed after all. */ return ret; } EXPORT_SYMBOL(register_kmmio_probe); static void rcu_free_kmmio_fault_pages(struct rcu_head *head) { struct kmmio_delayed_release *dr = container_of( head, struct kmmio_delayed_release, rcu); struct kmmio_fault_page *f = dr->release_list; while (f) { struct kmmio_fault_page *next = f->release_next; BUG_ON(f->count); kfree(f); f = next; } kfree(dr); } static void remove_kmmio_fault_pages(struct rcu_head *head) { struct kmmio_delayed_release *dr = container_of(head, struct kmmio_delayed_release, rcu); struct kmmio_fault_page *f = dr->release_list; struct kmmio_fault_page **prevp = &dr->release_list; unsigned long flags; spin_lock_irqsave(&kmmio_lock, flags); while (f) { if (!f->count) { list_del_rcu(&f->list); prevp = &f->release_next; } else { *prevp = f->release_next; f->release_next = NULL; f->scheduled_for_release = false; } f = *prevp; } spin_unlock_irqrestore(&kmmio_lock, flags); /* This is the real RCU destroy call. */ call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); } /* * Remove a kmmio probe. You have to synchronize_rcu() before you can be * sure that the callbacks will not be called anymore. Only after that * you may actually release your struct kmmio_probe. * * Unregistering a kmmio fault page has three steps: * 1. release_kmmio_fault_page() * Disarm the page, wait a grace period to let all faults finish. * 2. remove_kmmio_fault_pages() * Remove the pages from kmmio_page_table. * 3. rcu_free_kmmio_fault_pages() * Actually free the kmmio_fault_page structs as with RCU. */ void unregister_kmmio_probe(struct kmmio_probe *p) { unsigned long flags; unsigned long size = 0; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); struct kmmio_fault_page *release_list = NULL; struct kmmio_delayed_release *drelease; spin_lock_irqsave(&kmmio_lock, flags); while (size < size_lim) { release_kmmio_fault_page(p->addr + size, &release_list); size += PAGE_SIZE; } list_del_rcu(&p->list); kmmio_count--; spin_unlock_irqrestore(&kmmio_lock, flags); if (!release_list) return; drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); if (!drelease) { pr_crit("leaking kmmio_fault_page objects.\n"); return; } drelease->release_list = release_list; /* * This is not really RCU here. We have just disarmed a set of * pages so that they cannot trigger page faults anymore. However, * we cannot remove the pages from kmmio_page_table, * because a probe hit might be in flight on another CPU. The * pages are collected into a list, and they will be removed from * kmmio_page_table when it is certain that no probe hit related to * these pages can be in flight. RCU grace period sounds like a * good choice. * * If we removed the pages too early, kmmio page fault handler might * not find the respective kmmio_fault_page and determine it's not * a kmmio fault, when it actually is. This would lead to madness. */ call_rcu(&drelease->rcu, remove_kmmio_fault_pages); } EXPORT_SYMBOL(unregister_kmmio_probe); static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) { struct die_args *arg = args; unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err); if (val == DIE_DEBUG && (*dr6_p & DR_STEP)) if (post_kmmio_handler(*dr6_p, arg->regs) == 1) { /* * Reset the BS bit in dr6 (pointed by args->err) to * denote completion of processing */ *dr6_p &= ~DR_STEP; return NOTIFY_STOP; } return NOTIFY_DONE; } static struct notifier_block nb_die = { .notifier_call = kmmio_die_notifier }; int kmmio_init(void) { int i; for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) INIT_LIST_HEAD(&kmmio_page_table[i]); return register_die_notifier(&nb_die); } void kmmio_cleanup(void) { int i; unregister_die_notifier(&nb_die); for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) { WARN_ONCE(!list_empty(&kmmio_page_table[i]), KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n"); } }
gpl-2.0
Tesla-Redux-Devices/kernel_apq8064
arch/x86/mm/kmmio.c
11405
15945
/* Support for MMIO probes. * Benfit many code from kprobes * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. * 2007 Alexander Eichner * 2008 Pekka Paalanen <pq@iki.fi> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/list.h> #include <linux/rculist.h> #include <linux/spinlock.h> #include <linux/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/uaccess.h> #include <linux/ptrace.h> #include <linux/preempt.h> #include <linux/percpu.h> #include <linux/kdebug.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <linux/errno.h> #include <asm/debugreg.h> #include <linux/mmiotrace.h> #define KMMIO_PAGE_HASH_BITS 4 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) struct kmmio_fault_page { struct list_head list; struct kmmio_fault_page *release_next; unsigned long page; /* location of the fault page */ pteval_t old_presence; /* page presence prior to arming */ bool armed; /* * Number of times this page has been registered as a part * of a probe. If zero, page is disarmed and this may be freed. * Used only by writers (RCU) and post_kmmio_handler(). * Protected by kmmio_lock, when linked into kmmio_page_table. */ int count; bool scheduled_for_release; }; struct kmmio_delayed_release { struct rcu_head rcu; struct kmmio_fault_page *release_list; }; struct kmmio_context { struct kmmio_fault_page *fpage; struct kmmio_probe *probe; unsigned long saved_flags; unsigned long addr; int active; }; static DEFINE_SPINLOCK(kmmio_lock); /* Protected by kmmio_lock */ unsigned int kmmio_count; /* Read-protected by RCU, write-protected by kmmio_lock. */ static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; static LIST_HEAD(kmmio_probes); static struct list_head *kmmio_page_list(unsigned long page) { return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; } /* Accessed per-cpu */ static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); /* * this is basically a dynamic stabbing problem: * Could use the existing prio tree code or * Possible better implementations: * The Interval Skip List: A Data Structure for Finding All Intervals That * Overlap a Point (might be simple) * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup */ /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ static struct kmmio_probe *get_kmmio_probe(unsigned long addr) { struct kmmio_probe *p; list_for_each_entry_rcu(p, &kmmio_probes, list) { if (addr >= p->addr && addr < (p->addr + p->len)) return p; } return NULL; } /* You must be holding RCU read lock. */ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) { struct list_head *head; struct kmmio_fault_page *f; page &= PAGE_MASK; head = kmmio_page_list(page); list_for_each_entry_rcu(f, head, list) { if (f->page == page) return f; } return NULL; } static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) { pmdval_t v = pmd_val(*pmd); if (clear) { *old = v & _PAGE_PRESENT; v &= ~_PAGE_PRESENT; } else /* presume this has been called with clear==true previously */ v |= *old; set_pmd(pmd, __pmd(v)); } static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) { pteval_t v = pte_val(*pte); if (clear) { *old = v & _PAGE_PRESENT; v &= ~_PAGE_PRESENT; } else /* presume this has been called with clear==true previously */ v |= *old; set_pte_atomic(pte, __pte(v)); } static int clear_page_presence(struct kmmio_fault_page *f, bool clear) { unsigned int level; pte_t *pte = lookup_address(f->page, &level); if (!pte) { pr_err("no pte for page 0x%08lx\n", f->page); return -1; } switch (level) { case PG_LEVEL_2M: clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence); break; case PG_LEVEL_4K: clear_pte_presence(pte, clear, &f->old_presence); break; default: pr_err("unexpected page level 0x%x.\n", level); return -1; } __flush_tlb_one(f->page); return 0; } /* * Mark the given page as not present. Access to it will trigger a fault. * * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the * protection is ignored here. RCU read lock is assumed held, so the struct * will not disappear unexpectedly. Furthermore, the caller must guarantee, * that double arming the same virtual address (page) cannot occur. * * Double disarming on the other hand is allowed, and may occur when a fault * and mmiotrace shutdown happen simultaneously. */ static int arm_kmmio_fault_page(struct kmmio_fault_page *f) { int ret; WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); if (f->armed) { pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n", f->page, f->count, !!f->old_presence); } ret = clear_page_presence(f, true); WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"), f->page); f->armed = true; return ret; } /** Restore the given page to saved presence state. */ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) { int ret = clear_page_presence(f, false); WARN_ONCE(ret < 0, KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page); f->armed = false; } /* * This is being called from do_page_fault(). * * We may be in an interrupt or a critical section. Also prefecthing may * trigger a page fault. We may be in the middle of process switch. * We cannot take any locks, because we could be executing especially * within a kmmio critical section. * * Local interrupts are disabled, so preemption cannot happen. * Do not enable interrupts, do not sleep, and watch out for other CPUs. */ /* * Interrupts are disabled on entry as trap3 is an interrupt gate * and they remain disabled throughout this function. */ int kmmio_handler(struct pt_regs *regs, unsigned long addr) { struct kmmio_context *ctx; struct kmmio_fault_page *faultpage; int ret = 0; /* default to fault not handled */ /* * Preemption is now disabled to prevent process switch during * single stepping. We can only handle one active kmmio trace * per cpu, so ensure that we finish it before something else * gets to run. We also hold the RCU read lock over single * stepping to avoid looking up the probe and kmmio_fault_page * again. */ preempt_disable(); rcu_read_lock(); faultpage = get_kmmio_fault_page(addr); if (!faultpage) { /* * Either this page fault is not caused by kmmio, or * another CPU just pulled the kmmio probe from under * our feet. The latter case should not be possible. */ goto no_kmmio; } ctx = &get_cpu_var(kmmio_ctx); if (ctx->active) { if (addr == ctx->addr) { /* * A second fault on the same page means some other * condition needs handling by do_page_fault(), the * page really not being present is the most common. */ pr_debug("secondary hit for 0x%08lx CPU %d.\n", addr, smp_processor_id()); if (!faultpage->old_presence) pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n", addr, smp_processor_id()); } else { /* * Prevent overwriting already in-flight context. * This should not happen, let's hope disarming at * least prevents a panic. */ pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n", smp_processor_id(), addr); pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); disarm_kmmio_fault_page(faultpage); } goto no_kmmio_ctx; } ctx->active++; ctx->fpage = faultpage; ctx->probe = get_kmmio_probe(addr); ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); ctx->addr = addr; if (ctx->probe && ctx->probe->pre_handler) ctx->probe->pre_handler(ctx->probe, regs, addr); /* * Enable single-stepping and disable interrupts for the faulting * context. Local interrupts must not get enabled during stepping. */ regs->flags |= X86_EFLAGS_TF; regs->flags &= ~X86_EFLAGS_IF; /* Now we set present bit in PTE and single step. */ disarm_kmmio_fault_page(ctx->fpage); /* * If another cpu accesses the same page while we are stepping, * the access will not be caught. It will simply succeed and the * only downside is we lose the event. If this becomes a problem, * the user should drop to single cpu before tracing. */ put_cpu_var(kmmio_ctx); return 1; /* fault handled */ no_kmmio_ctx: put_cpu_var(kmmio_ctx); no_kmmio: rcu_read_unlock(); preempt_enable_no_resched(); return ret; } /* * Interrupts are disabled on entry as trap1 is an interrupt gate * and they remain disabled throughout this function. * This must always get called as the pair to kmmio_handler(). */ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) { int ret = 0; struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); if (!ctx->active) { /* * debug traps without an active context are due to either * something external causing them (f.e. using a debugger while * mmio tracing enabled), or erroneous behaviour */ pr_warning("unexpected debug trap on CPU %d.\n", smp_processor_id()); goto out; } if (ctx->probe && ctx->probe->post_handler) ctx->probe->post_handler(ctx->probe, condition, regs); /* Prevent racing against release_kmmio_fault_page(). */ spin_lock(&kmmio_lock); if (ctx->fpage->count) arm_kmmio_fault_page(ctx->fpage); spin_unlock(&kmmio_lock); regs->flags &= ~X86_EFLAGS_TF; regs->flags |= ctx->saved_flags; /* These were acquired in kmmio_handler(). */ ctx->active--; BUG_ON(ctx->active); rcu_read_unlock(); preempt_enable_no_resched(); /* * if somebody else is singlestepping across a probe point, flags * will have TF set, in which case, continue the remaining processing * of do_debug, as if this is not a probe hit. */ if (!(regs->flags & X86_EFLAGS_TF)) ret = 1; out: put_cpu_var(kmmio_ctx); return ret; } /* You must be holding kmmio_lock. */ static int add_kmmio_fault_page(unsigned long page) { struct kmmio_fault_page *f; page &= PAGE_MASK; f = get_kmmio_fault_page(page); if (f) { if (!f->count) arm_kmmio_fault_page(f); f->count++; return 0; } f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return -1; f->count = 1; f->page = page; if (arm_kmmio_fault_page(f)) { kfree(f); return -1; } list_add_rcu(&f->list, kmmio_page_list(f->page)); return 0; } /* You must be holding kmmio_lock. */ static void release_kmmio_fault_page(unsigned long page, struct kmmio_fault_page **release_list) { struct kmmio_fault_page *f; page &= PAGE_MASK; f = get_kmmio_fault_page(page); if (!f) return; f->count--; BUG_ON(f->count < 0); if (!f->count) { disarm_kmmio_fault_page(f); if (!f->scheduled_for_release) { f->release_next = *release_list; *release_list = f; f->scheduled_for_release = true; } } } /* * With page-unaligned ioremaps, one or two armed pages may contain * addresses from outside the intended mapping. Events for these addresses * are currently silently dropped. The events may result only from programming * mistakes by accessing addresses before the beginning or past the end of a * mapping. */ int register_kmmio_probe(struct kmmio_probe *p) { unsigned long flags; int ret = 0; unsigned long size = 0; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); spin_lock_irqsave(&kmmio_lock, flags); if (get_kmmio_probe(p->addr)) { ret = -EEXIST; goto out; } kmmio_count++; list_add_rcu(&p->list, &kmmio_probes); while (size < size_lim) { if (add_kmmio_fault_page(p->addr + size)) pr_err("Unable to set page fault.\n"); size += PAGE_SIZE; } out: spin_unlock_irqrestore(&kmmio_lock, flags); /* * XXX: What should I do here? * Here was a call to global_flush_tlb(), but it does not exist * anymore. It seems it's not needed after all. */ return ret; } EXPORT_SYMBOL(register_kmmio_probe); static void rcu_free_kmmio_fault_pages(struct rcu_head *head) { struct kmmio_delayed_release *dr = container_of( head, struct kmmio_delayed_release, rcu); struct kmmio_fault_page *f = dr->release_list; while (f) { struct kmmio_fault_page *next = f->release_next; BUG_ON(f->count); kfree(f); f = next; } kfree(dr); } static void remove_kmmio_fault_pages(struct rcu_head *head) { struct kmmio_delayed_release *dr = container_of(head, struct kmmio_delayed_release, rcu); struct kmmio_fault_page *f = dr->release_list; struct kmmio_fault_page **prevp = &dr->release_list; unsigned long flags; spin_lock_irqsave(&kmmio_lock, flags); while (f) { if (!f->count) { list_del_rcu(&f->list); prevp = &f->release_next; } else { *prevp = f->release_next; f->release_next = NULL; f->scheduled_for_release = false; } f = *prevp; } spin_unlock_irqrestore(&kmmio_lock, flags); /* This is the real RCU destroy call. */ call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); } /* * Remove a kmmio probe. You have to synchronize_rcu() before you can be * sure that the callbacks will not be called anymore. Only after that * you may actually release your struct kmmio_probe. * * Unregistering a kmmio fault page has three steps: * 1. release_kmmio_fault_page() * Disarm the page, wait a grace period to let all faults finish. * 2. remove_kmmio_fault_pages() * Remove the pages from kmmio_page_table. * 3. rcu_free_kmmio_fault_pages() * Actually free the kmmio_fault_page structs as with RCU. */ void unregister_kmmio_probe(struct kmmio_probe *p) { unsigned long flags; unsigned long size = 0; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); struct kmmio_fault_page *release_list = NULL; struct kmmio_delayed_release *drelease; spin_lock_irqsave(&kmmio_lock, flags); while (size < size_lim) { release_kmmio_fault_page(p->addr + size, &release_list); size += PAGE_SIZE; } list_del_rcu(&p->list); kmmio_count--; spin_unlock_irqrestore(&kmmio_lock, flags); if (!release_list) return; drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); if (!drelease) { pr_crit("leaking kmmio_fault_page objects.\n"); return; } drelease->release_list = release_list; /* * This is not really RCU here. We have just disarmed a set of * pages so that they cannot trigger page faults anymore. However, * we cannot remove the pages from kmmio_page_table, * because a probe hit might be in flight on another CPU. The * pages are collected into a list, and they will be removed from * kmmio_page_table when it is certain that no probe hit related to * these pages can be in flight. RCU grace period sounds like a * good choice. * * If we removed the pages too early, kmmio page fault handler might * not find the respective kmmio_fault_page and determine it's not * a kmmio fault, when it actually is. This would lead to madness. */ call_rcu(&drelease->rcu, remove_kmmio_fault_pages); } EXPORT_SYMBOL(unregister_kmmio_probe); static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) { struct die_args *arg = args; unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err); if (val == DIE_DEBUG && (*dr6_p & DR_STEP)) if (post_kmmio_handler(*dr6_p, arg->regs) == 1) { /* * Reset the BS bit in dr6 (pointed by args->err) to * denote completion of processing */ *dr6_p &= ~DR_STEP; return NOTIFY_STOP; } return NOTIFY_DONE; } static struct notifier_block nb_die = { .notifier_call = kmmio_die_notifier }; int kmmio_init(void) { int i; for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) INIT_LIST_HEAD(&kmmio_page_table[i]); return register_die_notifier(&nb_die); } void kmmio_cleanup(void) { int i; unregister_die_notifier(&nb_die); for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) { WARN_ONCE(!list_empty(&kmmio_page_table[i]), KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n"); } }
gpl-2.0
cherifyass/android_kernel_lge_hammerhead
arch/powerpc/platforms/8xx/ep88xc.c
11661
4431
/* * Platform setup for the Embedded Planet EP88xC board * * Author: Scott Wood <scottwood@freescale.com> * Copyright 2007 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/udbg.h> #include <asm/cpm1.h> #include "mpc8xx.h" struct cpm_pin { int port, pin, flags; }; static struct cpm_pin ep88xc_pins[] = { /* SMC1 */ {1, 24, CPM_PIN_INPUT}, /* RX */ {1, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ /* SCC2 */ {0, 12, CPM_PIN_INPUT}, /* TX */ {0, 13, CPM_PIN_INPUT}, /* RX */ {2, 8, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CD */ {2, 9, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CTS */ {2, 14, CPM_PIN_INPUT}, /* RTS */ /* MII1 */ {0, 0, CPM_PIN_INPUT}, {0, 1, CPM_PIN_INPUT}, {0, 2, CPM_PIN_INPUT}, {0, 3, CPM_PIN_INPUT}, {0, 4, CPM_PIN_OUTPUT}, {0, 10, CPM_PIN_OUTPUT}, {0, 11, CPM_PIN_OUTPUT}, {1, 19, CPM_PIN_INPUT}, {1, 31, CPM_PIN_INPUT}, {2, 12, CPM_PIN_INPUT}, {2, 13, CPM_PIN_INPUT}, {3, 8, CPM_PIN_INPUT}, {4, 30, CPM_PIN_OUTPUT}, {4, 31, CPM_PIN_OUTPUT}, /* MII2 */ {4, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 16, CPM_PIN_OUTPUT}, {4, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 21, CPM_PIN_OUTPUT}, {4, 22, CPM_PIN_OUTPUT}, {4, 23, CPM_PIN_OUTPUT}, {4, 24, CPM_PIN_OUTPUT}, {4, 25, CPM_PIN_OUTPUT}, {4, 26, CPM_PIN_OUTPUT}, {4, 27, CPM_PIN_OUTPUT}, {4, 28, CPM_PIN_OUTPUT}, {4, 29, CPM_PIN_OUTPUT}, /* USB */ {0, 6, CPM_PIN_INPUT}, /* CLK2 */ {0, 14, CPM_PIN_INPUT}, /* USBOE */ {0, 15, CPM_PIN_INPUT}, /* USBRXD */ {2, 6, CPM_PIN_OUTPUT}, /* USBTXN */ {2, 7, CPM_PIN_OUTPUT}, /* USBTXP */ {2, 10, CPM_PIN_INPUT}, /* USBRXN */ {2, 11, CPM_PIN_INPUT}, /* USBRXP */ /* Misc */ {1, 26, CPM_PIN_INPUT}, /* BRGO2 */ {1, 27, CPM_PIN_INPUT}, /* BRGO1 */ }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(ep88xc_pins); i++) { struct cpm_pin *pin = &ep88xc_pins[i]; cpm1_set_pin(pin->port, pin->pin, pin->flags); } cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX); cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_TX); /* USB */ cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_RX); cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); } static u8 __iomem *ep88xc_bcsr; #define BCSR7_SCC2_ENABLE 0x10 #define BCSR8_PHY1_ENABLE 0x80 #define BCSR8_PHY1_POWER 0x40 #define BCSR8_PHY2_ENABLE 0x20 #define BCSR8_PHY2_POWER 0x10 #define BCSR9_USB_ENABLE 0x80 #define BCSR9_USB_POWER 0x40 #define BCSR9_USB_HOST 0x20 #define BCSR9_USB_FULL_SPEED_TARGET 0x10 static void __init ep88xc_setup_arch(void) { struct device_node *np; cpm_reset(); init_ioports(); np = of_find_compatible_node(NULL, NULL, "fsl,ep88xc-bcsr"); if (!np) { printk(KERN_CRIT "Could not find fsl,ep88xc-bcsr node\n"); return; } ep88xc_bcsr = of_iomap(np, 0); of_node_put(np); if (!ep88xc_bcsr) { printk(KERN_CRIT "Could not remap BCSR\n"); return; } setbits8(&ep88xc_bcsr[7], BCSR7_SCC2_ENABLE); setbits8(&ep88xc_bcsr[8], BCSR8_PHY1_ENABLE | BCSR8_PHY1_POWER | BCSR8_PHY2_ENABLE | BCSR8_PHY2_POWER); } static int __init ep88xc_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,ep88xc"); } static struct of_device_id __initdata of_bus_ids[] = { { .name = "soc", }, { .name = "cpm", }, { .name = "localbus", }, {}, }; static int __init declare_of_platform_devices(void) { /* Publish the QE devices */ of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(ep88xc, declare_of_platform_devices); define_machine(ep88xc) { .name = "Embedded Planet EP88xC", .probe = ep88xc_probe, .setup_arch = ep88xc_setup_arch, .init_IRQ = mpc8xx_pics_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, .set_rtc_time = mpc8xx_set_rtc_time, .get_rtc_time = mpc8xx_get_rtc_time, .progress = udbg_progress, };
gpl-2.0
Split-Screen/android_kernel_samsung_hlte
arch/powerpc/platforms/8xx/ep88xc.c
11661
4431
/* * Platform setup for the Embedded Planet EP88xC board * * Author: Scott Wood <scottwood@freescale.com> * Copyright 2007 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/udbg.h> #include <asm/cpm1.h> #include "mpc8xx.h" struct cpm_pin { int port, pin, flags; }; static struct cpm_pin ep88xc_pins[] = { /* SMC1 */ {1, 24, CPM_PIN_INPUT}, /* RX */ {1, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ /* SCC2 */ {0, 12, CPM_PIN_INPUT}, /* TX */ {0, 13, CPM_PIN_INPUT}, /* RX */ {2, 8, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CD */ {2, 9, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CTS */ {2, 14, CPM_PIN_INPUT}, /* RTS */ /* MII1 */ {0, 0, CPM_PIN_INPUT}, {0, 1, CPM_PIN_INPUT}, {0, 2, CPM_PIN_INPUT}, {0, 3, CPM_PIN_INPUT}, {0, 4, CPM_PIN_OUTPUT}, {0, 10, CPM_PIN_OUTPUT}, {0, 11, CPM_PIN_OUTPUT}, {1, 19, CPM_PIN_INPUT}, {1, 31, CPM_PIN_INPUT}, {2, 12, CPM_PIN_INPUT}, {2, 13, CPM_PIN_INPUT}, {3, 8, CPM_PIN_INPUT}, {4, 30, CPM_PIN_OUTPUT}, {4, 31, CPM_PIN_OUTPUT}, /* MII2 */ {4, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 16, CPM_PIN_OUTPUT}, {4, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {4, 21, CPM_PIN_OUTPUT}, {4, 22, CPM_PIN_OUTPUT}, {4, 23, CPM_PIN_OUTPUT}, {4, 24, CPM_PIN_OUTPUT}, {4, 25, CPM_PIN_OUTPUT}, {4, 26, CPM_PIN_OUTPUT}, {4, 27, CPM_PIN_OUTPUT}, {4, 28, CPM_PIN_OUTPUT}, {4, 29, CPM_PIN_OUTPUT}, /* USB */ {0, 6, CPM_PIN_INPUT}, /* CLK2 */ {0, 14, CPM_PIN_INPUT}, /* USBOE */ {0, 15, CPM_PIN_INPUT}, /* USBRXD */ {2, 6, CPM_PIN_OUTPUT}, /* USBTXN */ {2, 7, CPM_PIN_OUTPUT}, /* USBTXP */ {2, 10, CPM_PIN_INPUT}, /* USBRXN */ {2, 11, CPM_PIN_INPUT}, /* USBRXP */ /* Misc */ {1, 26, CPM_PIN_INPUT}, /* BRGO2 */ {1, 27, CPM_PIN_INPUT}, /* BRGO1 */ }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(ep88xc_pins); i++) { struct cpm_pin *pin = &ep88xc_pins[i]; cpm1_set_pin(pin->port, pin->pin, pin->flags); } cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX); cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_TX); /* USB */ cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_RX); cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); } static u8 __iomem *ep88xc_bcsr; #define BCSR7_SCC2_ENABLE 0x10 #define BCSR8_PHY1_ENABLE 0x80 #define BCSR8_PHY1_POWER 0x40 #define BCSR8_PHY2_ENABLE 0x20 #define BCSR8_PHY2_POWER 0x10 #define BCSR9_USB_ENABLE 0x80 #define BCSR9_USB_POWER 0x40 #define BCSR9_USB_HOST 0x20 #define BCSR9_USB_FULL_SPEED_TARGET 0x10 static void __init ep88xc_setup_arch(void) { struct device_node *np; cpm_reset(); init_ioports(); np = of_find_compatible_node(NULL, NULL, "fsl,ep88xc-bcsr"); if (!np) { printk(KERN_CRIT "Could not find fsl,ep88xc-bcsr node\n"); return; } ep88xc_bcsr = of_iomap(np, 0); of_node_put(np); if (!ep88xc_bcsr) { printk(KERN_CRIT "Could not remap BCSR\n"); return; } setbits8(&ep88xc_bcsr[7], BCSR7_SCC2_ENABLE); setbits8(&ep88xc_bcsr[8], BCSR8_PHY1_ENABLE | BCSR8_PHY1_POWER | BCSR8_PHY2_ENABLE | BCSR8_PHY2_POWER); } static int __init ep88xc_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,ep88xc"); } static struct of_device_id __initdata of_bus_ids[] = { { .name = "soc", }, { .name = "cpm", }, { .name = "localbus", }, {}, }; static int __init declare_of_platform_devices(void) { /* Publish the QE devices */ of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(ep88xc, declare_of_platform_devices); define_machine(ep88xc) { .name = "Embedded Planet EP88xC", .probe = ep88xc_probe, .setup_arch = ep88xc_setup_arch, .init_IRQ = mpc8xx_pics_init, .get_irq = mpc8xx_get_irq, .restart = mpc8xx_restart, .calibrate_decr = mpc8xx_calibrate_decr, .set_rtc_time = mpc8xx_set_rtc_time, .get_rtc_time = mpc8xx_get_rtc_time, .progress = udbg_progress, };
gpl-2.0
NovaFusion/twrp_kernel
drivers/char/hw_random/intel-rng.c
12173
11445
/* * RNG driver for Intel RNGs * * Copyright 2005 (c) MontaVista Software, Inc. * * with the majority of the code coming from: * * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> * * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) * (c) Copyright 2001 Red Hat Inc * * derived from * * Hardware driver for Intel i810 Random Number Generator (RNG) * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stop_machine.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/io.h> #define PFX KBUILD_MODNAME ": " /* * RNG registers */ #define INTEL_RNG_HW_STATUS 0 #define INTEL_RNG_PRESENT 0x40 #define INTEL_RNG_ENABLED 0x01 #define INTEL_RNG_STATUS 1 #define INTEL_RNG_DATA_PRESENT 0x01 #define INTEL_RNG_DATA 2 /* * Magic address at which Intel PCI bridges locate the RNG */ #define INTEL_RNG_ADDR 0xFFBC015F #define INTEL_RNG_ADDR_LEN 3 /* * LPC bridge PCI config space registers */ #define FWH_DEC_EN1_REG_OLD 0xe3 #define FWH_DEC_EN1_REG_NEW 0xd9 /* high byte of 16-bit register */ #define FWH_F8_EN_MASK 0x80 #define BIOS_CNTL_REG_OLD 0x4e #define BIOS_CNTL_REG_NEW 0xdc #define BIOS_CNTL_WRITE_ENABLE_MASK 0x01 #define BIOS_CNTL_LOCK_ENABLE_MASK 0x02 /* * Magic address at which Intel Firmware Hubs get accessed */ #define INTEL_FWH_ADDR 0xffff0000 #define INTEL_FWH_ADDR_LEN 2 /* * Intel Firmware Hub command codes (write to any address inside the device) */ #define INTEL_FWH_RESET_CMD 0xff /* aka READ_ARRAY */ #define INTEL_FWH_READ_ID_CMD 0x90 /* * Intel Firmware Hub Read ID command result addresses */ #define INTEL_FWH_MANUFACTURER_CODE_ADDRESS 0x000000 #define INTEL_FWH_DEVICE_CODE_ADDRESS 0x000001 /* * Intel Firmware Hub Read ID command result values */ #define INTEL_FWH_MANUFACTURER_CODE 0x89 #define INTEL_FWH_DEVICE_CODE_8M 0xac #define INTEL_FWH_DEVICE_CODE_4M 0xad /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static const struct pci_device_id pci_tbl[] = { /* AA { PCI_DEVICE(0x8086, 0x2418) }, */ { PCI_DEVICE(0x8086, 0x2410) }, /* AA */ /* AB { PCI_DEVICE(0x8086, 0x2428) }, */ { PCI_DEVICE(0x8086, 0x2420) }, /* AB */ /* ?? { PCI_DEVICE(0x8086, 0x2430) }, */ /* BAM, CAM, DBM, FBM, GxM { PCI_DEVICE(0x8086, 0x2448) }, */ { PCI_DEVICE(0x8086, 0x244c) }, /* BAM */ { PCI_DEVICE(0x8086, 0x248c) }, /* CAM */ { PCI_DEVICE(0x8086, 0x24cc) }, /* DBM */ { PCI_DEVICE(0x8086, 0x2641) }, /* FBM */ { PCI_DEVICE(0x8086, 0x27b9) }, /* GxM */ { PCI_DEVICE(0x8086, 0x27bd) }, /* GxM DH */ /* BA, CA, DB, Ex, 6300, Fx, 631x/632x, Gx { PCI_DEVICE(0x8086, 0x244e) }, */ { PCI_DEVICE(0x8086, 0x2440) }, /* BA */ { PCI_DEVICE(0x8086, 0x2480) }, /* CA */ { PCI_DEVICE(0x8086, 0x24c0) }, /* DB */ { PCI_DEVICE(0x8086, 0x24d0) }, /* Ex */ { PCI_DEVICE(0x8086, 0x25a1) }, /* 6300 */ { PCI_DEVICE(0x8086, 0x2640) }, /* Fx */ { PCI_DEVICE(0x8086, 0x2670) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2671) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2672) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2673) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2674) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2675) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2676) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2677) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2678) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x2679) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267a) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267b) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267c) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267d) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267e) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x267f) }, /* 631x/632x */ { PCI_DEVICE(0x8086, 0x27b8) }, /* Gx */ /* E { PCI_DEVICE(0x8086, 0x245e) }, */ { PCI_DEVICE(0x8086, 0x2450) }, /* E */ { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pci_tbl); static __initdata int no_fwh_detect; module_param(no_fwh_detect, int, 0); MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n" " positive value - skip if FWH space locked read-only\n" " negative value - skip always"); static inline u8 hwstatus_get(void __iomem *mem) { return readb(mem + INTEL_RNG_HW_STATUS); } static inline u8 hwstatus_set(void __iomem *mem, u8 hw_status) { writeb(hw_status, mem + INTEL_RNG_HW_STATUS); return hwstatus_get(mem); } static int intel_rng_data_present(struct hwrng *rng, int wait) { void __iomem *mem = (void __iomem *)rng->priv; int data, i; for (i = 0; i < 20; i++) { data = !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT); if (data || !wait) break; udelay(10); } return data; } static int intel_rng_data_read(struct hwrng *rng, u32 *data) { void __iomem *mem = (void __iomem *)rng->priv; *data = readb(mem + INTEL_RNG_DATA); return 1; } static int intel_rng_init(struct hwrng *rng) { void __iomem *mem = (void __iomem *)rng->priv; u8 hw_status; int err = -EIO; hw_status = hwstatus_get(mem); /* turn RNG h/w on, if it's off */ if ((hw_status & INTEL_RNG_ENABLED) == 0) hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED); if ((hw_status & INTEL_RNG_ENABLED) == 0) { printk(KERN_ERR PFX "cannot enable RNG, aborting\n"); goto out; } err = 0; out: return err; } static void intel_rng_cleanup(struct hwrng *rng) { void __iomem *mem = (void __iomem *)rng->priv; u8 hw_status; hw_status = hwstatus_get(mem); if (hw_status & INTEL_RNG_ENABLED) hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED); else printk(KERN_WARNING PFX "unusual: RNG already disabled\n"); } static struct hwrng intel_rng = { .name = "intel", .init = intel_rng_init, .cleanup = intel_rng_cleanup, .data_present = intel_rng_data_present, .data_read = intel_rng_data_read, }; struct intel_rng_hw { struct pci_dev *dev; void __iomem *mem; u8 bios_cntl_off; u8 bios_cntl_val; u8 fwh_dec_en1_off; u8 fwh_dec_en1_val; }; static int __init intel_rng_hw_init(void *_intel_rng_hw) { struct intel_rng_hw *intel_rng_hw = _intel_rng_hw; u8 mfc, dvc; /* interrupts disabled in stop_machine call */ if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->fwh_dec_en1_off, intel_rng_hw->fwh_dec_en1_val | FWH_F8_EN_MASK); if (!(intel_rng_hw->bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK)) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->bios_cntl_off, intel_rng_hw->bios_cntl_val | BIOS_CNTL_WRITE_ENABLE_MASK); writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem); writeb(INTEL_FWH_READ_ID_CMD, intel_rng_hw->mem); mfc = readb(intel_rng_hw->mem + INTEL_FWH_MANUFACTURER_CODE_ADDRESS); dvc = readb(intel_rng_hw->mem + INTEL_FWH_DEVICE_CODE_ADDRESS); writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem); if (!(intel_rng_hw->bios_cntl_val & (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->bios_cntl_off, intel_rng_hw->bios_cntl_val); if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) pci_write_config_byte(intel_rng_hw->dev, intel_rng_hw->fwh_dec_en1_off, intel_rng_hw->fwh_dec_en1_val); if (mfc != INTEL_FWH_MANUFACTURER_CODE || (dvc != INTEL_FWH_DEVICE_CODE_8M && dvc != INTEL_FWH_DEVICE_CODE_4M)) { printk(KERN_NOTICE PFX "FWH not detected\n"); return -ENODEV; } return 0; } static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw, struct pci_dev *dev) { intel_rng_hw->bios_cntl_val = 0xff; intel_rng_hw->fwh_dec_en1_val = 0xff; intel_rng_hw->dev = dev; /* Check for Intel 82802 */ if (dev->device < 0x2640) { intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD; intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_OLD; } else { intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_NEW; intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_NEW; } pci_read_config_byte(dev, intel_rng_hw->fwh_dec_en1_off, &intel_rng_hw->fwh_dec_en1_val); pci_read_config_byte(dev, intel_rng_hw->bios_cntl_off, &intel_rng_hw->bios_cntl_val); if ((intel_rng_hw->bios_cntl_val & (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) == BIOS_CNTL_LOCK_ENABLE_MASK) { static __initdata /*const*/ char warning[] = KERN_WARNING PFX "Firmware space is locked read-only. If you can't or\n" PFX "don't want to disable this in firmware setup, and if\n" PFX "you are certain that your system has a functional\n" PFX "RNG, try using the 'no_fwh_detect' option.\n"; if (no_fwh_detect) return -ENODEV; printk(warning); return -EBUSY; } intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); if (intel_rng_hw->mem == NULL) return -EBUSY; return 0; } static int __init mod_init(void) { int err = -ENODEV; int i; struct pci_dev *dev = NULL; void __iomem *mem = mem; u8 hw_status; struct intel_rng_hw *intel_rng_hw; for (i = 0; !dev && pci_tbl[i].vendor; ++i) dev = pci_get_device(pci_tbl[i].vendor, pci_tbl[i].device, NULL); if (!dev) goto out; /* Device not found. */ if (no_fwh_detect < 0) { pci_dev_put(dev); goto fwh_done; } intel_rng_hw = kmalloc(sizeof(*intel_rng_hw), GFP_KERNEL); if (!intel_rng_hw) { pci_dev_put(dev); goto out; } err = intel_init_hw_struct(intel_rng_hw, dev); if (err) { pci_dev_put(dev); kfree(intel_rng_hw); if (err == -ENODEV) goto fwh_done; goto out; } /* * Since the BIOS code/data is going to disappear from its normal * location with the Read ID command, all activity on the system * must be stopped until the state is back to normal. * * Use stop_machine because IPIs can be blocked by disabling * interrupts. */ err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL); pci_dev_put(dev); iounmap(intel_rng_hw->mem); kfree(intel_rng_hw); if (err) goto out; fwh_done: err = -ENOMEM; mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN); if (!mem) goto out; intel_rng.priv = (unsigned long)mem; /* Check for Random Number Generator */ err = -ENODEV; hw_status = hwstatus_get(mem); if ((hw_status & INTEL_RNG_PRESENT) == 0) { iounmap(mem); goto out; } printk(KERN_INFO "Intel 82802 RNG detected\n"); err = hwrng_register(&intel_rng); if (err) { printk(KERN_ERR PFX "RNG registering failed (%d)\n", err); iounmap(mem); } out: return err; } static void __exit mod_exit(void) { void __iomem *mem = (void __iomem *)intel_rng.priv; hwrng_unregister(&intel_rng); iounmap(mem); } module_init(mod_init); module_exit(mod_exit); MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets"); MODULE_LICENSE("GPL");
gpl-2.0
kaber/net-next-netlink-mmap
arch/score/mm/pgtable.c
13709
1690
/* * arch/score/mm/pgtable-32.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/pfn.h> #include <linux/mm.h> void pgd_init(unsigned long page) { unsigned long *p = (unsigned long *) page; int i; for (i = 0; i < USER_PTRS_PER_PGD; i += 8) { p[i + 0] = (unsigned long) invalid_pte_table; p[i + 1] = (unsigned long) invalid_pte_table; p[i + 2] = (unsigned long) invalid_pte_table; p[i + 3] = (unsigned long) invalid_pte_table; p[i + 4] = (unsigned long) invalid_pte_table; p[i + 5] = (unsigned long) invalid_pte_table; p[i + 6] = (unsigned long) invalid_pte_table; p[i + 7] = (unsigned long) invalid_pte_table; } } void __init pagetable_init(void) { /* Initialize the entire pgd. */ pgd_init((unsigned long)swapper_pg_dir); }
gpl-2.0
embest-tech/rowboat-kernel
drivers/net/wireless/orinoco/spectrum_cs.c
142
15809
/* * Driver for 802.11b cards using RAM-loadable Symbol firmware, such as * Symbol Wireless Networker LA4137, CompactFlash cards by Socket * Communications and Intel PRO/Wireless 2011B. * * The driver implements Symbol firmware download. The rest is handled * in hermes.c and orinoco.c. * * Utilities for downloading the Symbol firmware are available at * http://sourceforge.net/projects/orinoco/ * * Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org> * Portions based on orinoco_cs.c: * Copyright (C) David Gibson, Linuxcare Australia * Portions based on Spectrum24tDnld.c from original spectrum24 driver: * Copyright (C) Symbol Technologies. * * See copyright notice in file orinoco.c. */ #define DRIVER_NAME "spectrum_cs" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "orinoco.h" /********************************************************************/ /* Module stuff */ /********************************************************************/ MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>"); MODULE_DESCRIPTION("Driver for Symbol Spectrum24 Trilogy cards with firmware downloader"); MODULE_LICENSE("Dual MPL/GPL"); /* Module parameters */ /* Some D-Link cards have buggy CIS. They do work at 5v properly, but * don't have any CIS entry for it. This workaround it... */ static int ignore_cis_vcc; /* = 0 */ module_param(ignore_cis_vcc, int, 0); MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket"); /********************************************************************/ /* Data structures */ /********************************************************************/ /* PCMCIA specific device information (goes in the card field of * struct orinoco_private */ struct orinoco_pccard { struct pcmcia_device *p_dev; dev_node_t node; }; /********************************************************************/ /* Function prototypes */ /********************************************************************/ static int spectrum_cs_config(struct pcmcia_device *link); static void spectrum_cs_release(struct pcmcia_device *link); /* Constants for the CISREG_CCSR register */ #define HCR_RUN 0x07 /* run firmware after reset */ #define HCR_IDLE 0x0E /* don't run firmware after reset */ #define HCR_MEM16 0x10 /* memory width bit, should be preserved */ #define CS_CHECK(fn, ret) \ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) /* * Reset the card using configuration registers COR and CCSR. * If IDLE is 1, stop the firmware, so that it can be safely rewritten. */ static int spectrum_reset(struct pcmcia_device *link, int idle) { int last_ret, last_fn; conf_reg_t reg; u_int save_cor; /* Doing it if hardware is gone is guaranteed crash */ if (!pcmcia_dev_present(link)) return -ENODEV; /* Save original COR value */ reg.Function = 0; reg.Action = CS_READ; reg.Offset = CISREG_COR; CS_CHECK(AccessConfigurationRegister, pcmcia_access_configuration_register(link, &reg)); save_cor = reg.Value; /* Soft-Reset card */ reg.Action = CS_WRITE; reg.Offset = CISREG_COR; reg.Value = (save_cor | COR_SOFT_RESET); CS_CHECK(AccessConfigurationRegister, pcmcia_access_configuration_register(link, &reg)); udelay(1000); /* Read CCSR */ reg.Action = CS_READ; reg.Offset = CISREG_CCSR; CS_CHECK(AccessConfigurationRegister, pcmcia_access_configuration_register(link, &reg)); /* * Start or stop the firmware. Memory width bit should be * preserved from the value we've just read. */ reg.Action = CS_WRITE; reg.Offset = CISREG_CCSR; reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16); CS_CHECK(AccessConfigurationRegister, pcmcia_access_configuration_register(link, &reg)); udelay(1000); /* Restore original COR configuration index */ reg.Action = CS_WRITE; reg.Offset = CISREG_COR; reg.Value = (save_cor & ~COR_SOFT_RESET); CS_CHECK(AccessConfigurationRegister, pcmcia_access_configuration_register(link, &reg)); udelay(1000); return 0; cs_failed: cs_error(link, last_fn, last_ret); return -ENODEV; } /********************************************************************/ /* Device methods */ /********************************************************************/ static int spectrum_cs_hard_reset(struct orinoco_private *priv) { struct orinoco_pccard *card = priv->card; struct pcmcia_device *link = card->p_dev; /* Soft reset using COR and HCR */ spectrum_reset(link, 0); return 0; } static int spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle) { struct orinoco_pccard *card = priv->card; struct pcmcia_device *link = card->p_dev; return spectrum_reset(link, idle); } /********************************************************************/ /* PCMCIA stuff */ /********************************************************************/ /* * This creates an "instance" of the driver, allocating local data * structures for one device. The device is registered with Card * Services. * * The dev_link structure is initialized, but we don't actually * configure the card at this point -- we wait until we receive a card * insertion event. */ static int spectrum_cs_probe(struct pcmcia_device *link) { struct net_device *dev; struct orinoco_private *priv; struct orinoco_pccard *card; dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), spectrum_cs_hard_reset, spectrum_cs_stop_firmware); if (! dev) return -ENOMEM; priv = netdev_priv(dev); card = priv->card; /* Link both structures together */ card->p_dev = link; link->priv = dev; /* Interrupt setup */ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; link->irq.IRQInfo1 = IRQ_LEVEL_ID; link->irq.Handler = orinoco_interrupt; link->irq.Instance = dev; /* General socket configuration defaults can go here. In this * client, we assume very little, and rely on the CIS for * almost everything. In most clients, many details (i.e., * number, sizes, and attributes of IO windows) are fixed by * the nature of the device, and can be hard-wired here. */ link->conf.Attributes = 0; link->conf.IntType = INT_MEMORY_AND_IO; return spectrum_cs_config(link); } /* spectrum_cs_attach */ /* * This deletes a driver "instance". The device is de-registered with * Card Services. If it has been released, all local data structures * are freed. Otherwise, the structures will be freed when the device * is released. */ static void spectrum_cs_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->dev_node) unregister_netdev(dev); spectrum_cs_release(link); free_orinocodev(dev); } /* spectrum_cs_detach */ /* * spectrum_cs_config() is scheduled to run after a CARD_INSERTION * event is received, to configure the PCMCIA socket, and to make the * device available to the system. */ static int spectrum_cs_config_check(struct pcmcia_device *p_dev, cistpl_cftable_entry_t *cfg, cistpl_cftable_entry_t *dflt, unsigned int vcc, void *priv_data) { if (cfg->index == 0) goto next_entry; /* Use power settings for Vcc and Vpp if present */ /* Note that the CIS values need to be rescaled */ if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) { DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n", __func__, vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000); if (!ignore_cis_vcc) goto next_entry; } } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) { DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n", __func__, vcc, dflt->vcc.param[CISTPL_POWER_VNOM] / 10000); if (!ignore_cis_vcc) goto next_entry; } } if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; /* Do we need to allocate an interrupt? */ p_dev->conf.Attributes |= CONF_ENABLE_IRQ; /* IO window settings */ p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; if (!(io->flags & CISTPL_IO_8BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; if (!(io->flags & CISTPL_IO_16BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; p_dev->io.BasePort1 = io->win[0].base; p_dev->io.NumPorts1 = io->win[0].len; if (io->nwin > 1) { p_dev->io.Attributes2 = p_dev->io.Attributes1; p_dev->io.BasePort2 = io->win[1].base; p_dev->io.NumPorts2 = io->win[1].len; } /* This reserves IO space but doesn't actually enable it */ if (pcmcia_request_io(p_dev, &p_dev->io) != 0) goto next_entry; } return 0; next_entry: pcmcia_disable_device(p_dev); return -ENODEV; }; static int spectrum_cs_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct orinoco_private *priv = netdev_priv(dev); struct orinoco_pccard *card = priv->card; hermes_t *hw = &priv->hw; int last_fn, last_ret; void __iomem *mem; /* * In this loop, we scan the CIS for configuration table * entries, each of which describes a valid card * configuration, including voltage, IO window, memory window, * and interrupt settings. * * We make no assumptions about the card to be configured: we * use just the information available in the CIS. In an ideal * world, this would work for any PCMCIA card, but it requires * a complete and accurate CIS. In practice, a driver usually * "knows" most of these things without consulting the CIS, * and most client drivers will only use the CIS to fill in * implementation-defined details. */ last_ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL); if (last_ret) { if (!ignore_cis_vcc) printk(KERN_ERR PFX "GetNextTuple(): No matching " "CIS configuration. Maybe you need the " "ignore_cis_vcc=1 parameter.\n"); cs_error(link, RequestIO, last_ret); goto failed; } /* * Allocate an interrupt line. Note that this does not assign * a handler to the interrupt, unless the 'Handler' member of * the irq structure is initialized. */ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); /* We initialize the hermes structure before completing PCMCIA * configuration just in case the interrupt handler gets * called. */ mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); if (!mem) goto cs_failed; hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); /* * This actually configures the PCMCIA socket -- setting up * the I/O windows and the interrupt mapping, and putting the * card and host interface into "Memory and IO" mode. */ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); /* Ok, we have the configuration, prepare to register the netdev */ dev->base_addr = link->io.BasePort1; dev->irq = link->irq.AssignedIRQ; card->node.major = card->node.minor = 0; /* Reset card */ if (spectrum_cs_hard_reset(priv) != 0) { goto failed; } SET_NETDEV_DEV(dev, &handle_to_dev(link)); /* Tell the stack we exist */ if (register_netdev(dev) != 0) { printk(KERN_ERR PFX "register_netdev() failed\n"); goto failed; } /* At this point, the dev_node_t structure(s) needs to be * initialized and arranged in a linked list at link->dev_node. */ strcpy(card->node.dev_name, dev->name); link->dev_node = &card->node; /* link->dev_node being non-NULL is also used to indicate that the net_device has been registered */ /* Finally, report what we've done */ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " "0x%04x-0x%04x\n", dev->name, dev_name(dev->dev.parent), link->irq.AssignedIRQ, link->io.BasePort1, link->io.BasePort1 + link->io.NumPorts1 - 1); return 0; cs_failed: cs_error(link, last_fn, last_ret); failed: spectrum_cs_release(link); return -ENODEV; } /* spectrum_cs_config */ /* * After a card is removed, spectrum_cs_release() will unregister the * device, and release the PCMCIA configuration. If the device is * still open, this will be postponed until it is closed. */ static void spectrum_cs_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct orinoco_private *priv = netdev_priv(dev); unsigned long flags; /* We're committed to taking the device away now, so mark the * hardware as unavailable */ spin_lock_irqsave(&priv->lock, flags); priv->hw_unavailable++; spin_unlock_irqrestore(&priv->lock, flags); pcmcia_disable_device(link); if (priv->hw.iobase) ioport_unmap(priv->hw.iobase); } /* spectrum_cs_release */ static int spectrum_cs_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct orinoco_private *priv = netdev_priv(dev); unsigned long flags; int err = 0; /* Mark the device as stopped, to block IO until later */ spin_lock_irqsave(&priv->lock, flags); err = __orinoco_down(dev); if (err) printk(KERN_WARNING "%s: Error %d downing interface\n", dev->name, err); netif_device_detach(dev); priv->hw_unavailable++; spin_unlock_irqrestore(&priv->lock, flags); return err; } static int spectrum_cs_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct orinoco_private *priv = netdev_priv(dev); unsigned long flags; int err; err = orinoco_reinit_firmware(dev); if (err) { printk(KERN_ERR "%s: Error %d re-initializing firmware\n", dev->name, err); return -EIO; } spin_lock_irqsave(&priv->lock, flags); netif_device_attach(dev); priv->hw_unavailable--; if (priv->open && !priv->hw_unavailable) { err = __orinoco_up(dev); if (err) printk(KERN_ERR "%s: Error %d restarting card\n", dev->name, err); } spin_unlock_irqrestore(&priv->lock, flags); return 0; } /********************************************************************/ /* Module initialization */ /********************************************************************/ /* Can't be declared "const" or the whole __initdata section will * become const */ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Pavel Roskin <proski@gnu.org>," " David Gibson <hermes@gibson.dropbear.id.au>, et al)"; static struct pcmcia_device_id spectrum_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids); static struct pcmcia_driver orinoco_driver = { .owner = THIS_MODULE, .drv = { .name = DRIVER_NAME, }, .probe = spectrum_cs_probe, .remove = spectrum_cs_detach, .suspend = spectrum_cs_suspend, .resume = spectrum_cs_resume, .id_table = spectrum_cs_ids, }; static int __init init_spectrum_cs(void) { printk(KERN_DEBUG "%s\n", version); return pcmcia_register_driver(&orinoco_driver); } static void __exit exit_spectrum_cs(void) { pcmcia_unregister_driver(&orinoco_driver); } module_init(init_spectrum_cs); module_exit(exit_spectrum_cs);
gpl-2.0
kakazhang/kernel
drivers/gpu/drm/nouveau/nvc0_vm.c
654
3825
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_vm.h" void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, struct nouveau_gpuobj *pgt[2]) { u32 pde[2] = { 0, 0 }; if (pgt[0]) pde[1] = 0x00000001 | (pgt[0]->vinst >> 8); if (pgt[1]) pde[0] = 0x00000001 | (pgt[1]->vinst >> 8); nv_wo32(pgd, (index * 8) + 0, pde[0]); nv_wo32(pgd, (index * 8) + 4, pde[1]); } static inline u64 nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) { phys >>= 8; phys |= 0x00000001; /* present */ if (vma->access & NV_MEM_ACCESS_SYS) phys |= 0x00000002; phys |= ((u64)target << 32); phys |= ((u64)memtype << 36); return phys; } void nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) { u32 next = 1 << (vma->node->type - 8); phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); pte <<= 3; while (cnt--) { nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nv_wo32(pgt, pte + 4, upper_32_bits(phys)); phys += next; pte += 8; } } void nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { pte <<= 3; while (cnt--) { u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5); nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nv_wo32(pgt, pte + 4, upper_32_bits(phys)); pte += 8; } } void nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) { pte <<= 3; while (cnt--) { nv_wo32(pgt, pte + 0, 0x00000000); nv_wo32(pgt, pte + 4, 0x00000000); pte += 8; } } void nvc0_vm_flush(struct nouveau_vm *vm) { struct drm_nouveau_private *dev_priv = vm->dev->dev_private; struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct drm_device *dev = vm->dev; struct nouveau_vm_pgd *vpgd; unsigned long flags; u32 engine; engine = 1; if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) engine |= 4; pinstmem->flush(vm->dev); spin_lock_irqsave(&dev_priv->vm_lock, flags); list_for_each_entry(vpgd, &vm->pgd_list, head) { /* looks like maybe a "free flush slots" counter, the * faster you write to 0x100cbc to more it decreases */ if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) { NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n", nv_rd32(dev, 0x100c80), engine); } nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); nv_wr32(dev, 0x100cbc, 0x80000000 | engine); /* wait for flush to be queued? */ if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) { NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n", nv_rd32(dev, 0x100c80), engine); } } spin_unlock_irqrestore(&dev_priv->vm_lock, flags); }
gpl-2.0
MCherifiOSS/linux
fs/coda/psdev.c
1678
10510
/* * An implementation of a loadable kernel mode driver providing * multiple kernel/user space bidirectional communications links. * * Author: Alan Cox <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Adapted to become the Linux 2.0 Coda pseudo device * Peter Braam <braam@maths.ox.ac.uk> * Michael Callahan <mjc@emmy.smith.edu> * * Changes for Linux 2.1 * Copyright (c) 1997 Carnegie-Mellon University */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/pid_namespace.h> #include <asm/io.h> #include <asm/poll.h> #include <linux/uaccess.h> #include <linux/coda.h> #include <linux/coda_psdev.h> #include "coda_linux.h" #include "coda_int.h" /* statistics */ int coda_hard; /* allows signals during upcalls */ unsigned long coda_timeout = 30; /* .. secs, then signals will dequeue */ struct venus_comm coda_comms[MAX_CODADEVS]; static struct class *coda_psdev_class; /* * Device operations */ static unsigned int coda_psdev_poll(struct file *file, poll_table * wait) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; unsigned int mask = POLLOUT | POLLWRNORM; poll_wait(file, &vcp->vc_waitq, wait); mutex_lock(&vcp->vc_mutex); if (!list_empty(&vcp->vc_pending)) mask |= POLLIN | POLLRDNORM; mutex_unlock(&vcp->vc_mutex); return mask; } static long coda_psdev_ioctl(struct file * filp, unsigned int cmd, unsigned long arg) { unsigned int data; switch(cmd) { case CIOC_KERNEL_VERSION: data = CODA_KERNEL_VERSION; return put_user(data, (int __user *) arg); default: return -ENOTTY; } return 0; } /* * Receive a message written by Venus to the psdev */ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *off) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req = NULL; struct upc_req *tmp; struct list_head *lh; struct coda_in_hdr hdr; ssize_t retval = 0, count = 0; int error; /* Peek at the opcode, uniquefier */ if (copy_from_user(&hdr, buf, 2 * sizeof(u_long))) return -EFAULT; if (DOWNCALL(hdr.opcode)) { union outputArgs *dcbuf; int size = sizeof(*dcbuf); if ( nbytes < sizeof(struct coda_out_hdr) ) { pr_warn("coda_downcall opc %d uniq %d, not enough!\n", hdr.opcode, hdr.unique); count = nbytes; goto out; } if ( nbytes > size ) { pr_warn("downcall opc %d, uniq %d, too much!", hdr.opcode, hdr.unique); nbytes = size; } CODA_ALLOC(dcbuf, union outputArgs *, nbytes); if (copy_from_user(dcbuf, buf, nbytes)) { CODA_FREE(dcbuf, nbytes); retval = -EFAULT; goto out; } /* what downcall errors does Venus handle ? */ error = coda_downcall(vcp, hdr.opcode, dcbuf); CODA_FREE(dcbuf, nbytes); if (error) { pr_warn("%s: coda_downcall error: %d\n", __func__, error); retval = error; goto out; } count = nbytes; goto out; } /* Look for the message on the processing queue. */ mutex_lock(&vcp->vc_mutex); list_for_each(lh, &vcp->vc_processing) { tmp = list_entry(lh, struct upc_req , uc_chain); if (tmp->uc_unique == hdr.unique) { req = tmp; list_del(&req->uc_chain); break; } } mutex_unlock(&vcp->vc_mutex); if (!req) { pr_warn("%s: msg (%d, %d) not found\n", __func__, hdr.opcode, hdr.unique); retval = -ESRCH; goto out; } /* move data into response buffer. */ if (req->uc_outSize < nbytes) { pr_warn("%s: too much cnt: %d, cnt: %ld, opc: %d, uniq: %d.\n", __func__, req->uc_outSize, (long)nbytes, hdr.opcode, hdr.unique); nbytes = req->uc_outSize; /* don't have more space! */ } if (copy_from_user(req->uc_data, buf, nbytes)) { req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); retval = -EFAULT; goto out; } /* adjust outsize. is this useful ?? */ req->uc_outSize = nbytes; req->uc_flags |= CODA_REQ_WRITE; count = nbytes; /* Convert filedescriptor into a file handle */ if (req->uc_opcode == CODA_OPEN_BY_FD) { struct coda_open_by_fd_out *outp = (struct coda_open_by_fd_out *)req->uc_data; if (!outp->oh.result) outp->fh = fget(outp->fd); } wake_up(&req->uc_sleep); out: return(count ? count : retval); } /* * Read a message from the kernel to Venus */ static ssize_t coda_psdev_read(struct file * file, char __user * buf, size_t nbytes, loff_t *off) { DECLARE_WAITQUEUE(wait, current); struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req; ssize_t retval = 0, count = 0; if (nbytes == 0) return 0; mutex_lock(&vcp->vc_mutex); add_wait_queue(&vcp->vc_waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list_empty(&vcp->vc_pending)) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } mutex_unlock(&vcp->vc_mutex); schedule(); mutex_lock(&vcp->vc_mutex); } set_current_state(TASK_RUNNING); remove_wait_queue(&vcp->vc_waitq, &wait); if (retval) goto out; req = list_entry(vcp->vc_pending.next, struct upc_req,uc_chain); list_del(&req->uc_chain); /* Move the input args into userspace */ count = req->uc_inSize; if (nbytes < req->uc_inSize) { pr_warn("%s: Venus read %ld bytes of %d in message\n", __func__, (long)nbytes, req->uc_inSize); count = nbytes; } if (copy_to_user(buf, req->uc_data, count)) retval = -EFAULT; /* If request was not a signal, enqueue and don't free */ if (!(req->uc_flags & CODA_REQ_ASYNC)) { req->uc_flags |= CODA_REQ_READ; list_add_tail(&(req->uc_chain), &vcp->vc_processing); goto out; } CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); kfree(req); out: mutex_unlock(&vcp->vc_mutex); return (count ? count : retval); } static int coda_psdev_open(struct inode * inode, struct file * file) { struct venus_comm *vcp; int idx, err; if (task_active_pid_ns(current) != &init_pid_ns) return -EINVAL; if (current_user_ns() != &init_user_ns) return -EINVAL; idx = iminor(inode); if (idx < 0 || idx >= MAX_CODADEVS) return -ENODEV; err = -EBUSY; vcp = &coda_comms[idx]; mutex_lock(&vcp->vc_mutex); if (!vcp->vc_inuse) { vcp->vc_inuse++; INIT_LIST_HEAD(&vcp->vc_pending); INIT_LIST_HEAD(&vcp->vc_processing); init_waitqueue_head(&vcp->vc_waitq); vcp->vc_sb = NULL; vcp->vc_seq = 0; file->private_data = vcp; err = 0; } mutex_unlock(&vcp->vc_mutex); return err; } static int coda_psdev_release(struct inode * inode, struct file * file) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req, *tmp; if (!vcp || !vcp->vc_inuse ) { pr_warn("%s: Not open.\n", __func__); return -1; } mutex_lock(&vcp->vc_mutex); /* Wakeup clients so they can return. */ list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) { list_del(&req->uc_chain); /* Async requests need to be freed here */ if (req->uc_flags & CODA_REQ_ASYNC) { CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); kfree(req); continue; } req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); } list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { list_del(&req->uc_chain); req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); } file->private_data = NULL; vcp->vc_inuse--; mutex_unlock(&vcp->vc_mutex); return 0; } static const struct file_operations coda_psdev_fops = { .owner = THIS_MODULE, .read = coda_psdev_read, .write = coda_psdev_write, .poll = coda_psdev_poll, .unlocked_ioctl = coda_psdev_ioctl, .open = coda_psdev_open, .release = coda_psdev_release, .llseek = noop_llseek, }; static int init_coda_psdev(void) { int i, err = 0; if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) { pr_err("%s: unable to get major %d\n", __func__, CODA_PSDEV_MAJOR); return -EIO; } coda_psdev_class = class_create(THIS_MODULE, "coda"); if (IS_ERR(coda_psdev_class)) { err = PTR_ERR(coda_psdev_class); goto out_chrdev; } for (i = 0; i < MAX_CODADEVS; i++) { mutex_init(&(&coda_comms[i])->vc_mutex); device_create(coda_psdev_class, NULL, MKDEV(CODA_PSDEV_MAJOR, i), NULL, "cfs%d", i); } coda_sysctl_init(); goto out; out_chrdev: unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); out: return err; } MODULE_AUTHOR("Jan Harkes, Peter J. Braam"); MODULE_DESCRIPTION("Coda Distributed File System VFS interface"); MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR); MODULE_LICENSE("GPL"); MODULE_VERSION("6.6"); static int __init init_coda(void) { int status; int i; status = coda_init_inodecache(); if (status) goto out2; status = init_coda_psdev(); if ( status ) { pr_warn("Problem (%d) in init_coda_psdev\n", status); goto out1; } status = register_filesystem(&coda_fs_type); if (status) { pr_warn("failed to register filesystem!\n"); goto out; } return 0; out: for (i = 0; i < MAX_CODADEVS; i++) device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i)); class_destroy(coda_psdev_class); unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); coda_sysctl_clean(); out1: coda_destroy_inodecache(); out2: return status; } static void __exit exit_coda(void) { int err, i; err = unregister_filesystem(&coda_fs_type); if (err != 0) pr_warn("failed to unregister filesystem\n"); for (i = 0; i < MAX_CODADEVS; i++) device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i)); class_destroy(coda_psdev_class); unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); coda_sysctl_clean(); coda_destroy_inodecache(); } module_init(init_coda); module_exit(exit_coda);
gpl-2.0
Radium-Devices/Radium_yu
drivers/gpu/drm/drm_edid.c
1678
90799
/* * Copyright (c) 2006 Luc Verhaegen (quirks list) * Copyright (c) 2007-2008 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * Copyright 2010 Red Hat, Inc. * * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from * FB layer. * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/module.h> #include <drm/drmP.h> #include <drm/drm_edid.h> #define version_greater(edid, maj, min) \ (((edid)->version > (maj)) || \ ((edid)->version == (maj) && (edid)->revision > (min))) #define EDID_EST_TIMINGS 16 #define EDID_STD_TIMINGS 8 #define EDID_DETAILED_TIMINGS 4 /* * EDID blocks out in the wild have a variety of bugs, try to collect * them here (note that userspace may work around broken monitors first, * but fixes should make their way here so that the kernel "just works" * on as many displays as possible). */ /* First detailed mode wrong, use largest 60Hz mode */ #define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) /* Reported 135MHz pixel clock is too high, needs adjustment */ #define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) /* Prefer the largest mode at 75 Hz */ #define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) /* Detail timing is in cm not mm */ #define EDID_QUIRK_DETAILED_IN_CM (1 << 3) /* Detailed timing descriptors have bogus size values, so just take the * maximum size and use that. */ #define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) /* Monitor forgot to set the first detailed is preferred bit. */ #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) /* use +hsync +vsync for detailed mode */ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) /* Force reduced-blanking timings for detailed modes */ #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) /* Force 8bpc */ #define EDID_QUIRK_FORCE_8BPC (1 << 8) struct detailed_mode_closure { struct drm_connector *connector; struct edid *edid; bool preferred; u32 quirks; int modes; }; #define LEVEL_DMT 0 #define LEVEL_GTF 1 #define LEVEL_GTF2 2 #define LEVEL_CVT 3 static struct edid_quirk { char vendor[4]; int product_id; u32 quirks; } edid_quirk_list[] = { /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, /* Unknown Acer */ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, /* Envision Peripherals, Inc. EN-7100e */ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, /* Envision EN2028 */ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, /* Funai Electronics PM36B */ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | EDID_QUIRK_DETAILED_IN_CM }, /* LG Philips LCD LP154W01-A5 */ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, /* Philips 107p5 CRT */ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Proview AY765C */ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Samsung SyncMaster 205BW. Note: irony */ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, /* Samsung SyncMaster 22[5-6]BW */ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, /* ViewSonic VA2026w */ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, }; /* * Autogenerated from the DMT spec. * This table is copied from xfree86/modes/xf86EdidModes.c. */ static const struct drm_display_mode drm_dmt_modes[] = { /* 640x350@85Hz */ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, 736, 832, 0, 350, 382, 385, 445, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x400@85Hz */ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, 736, 832, 0, 400, 401, 404, 445, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@85Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756, 828, 936, 0, 400, 401, 404, 446, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 640x480@60Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 489, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 492, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@85Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696, 752, 832, 0, 480, 481, 484, 509, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 800x600@56Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, 896, 1024, 0, 600, 601, 603, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, 976, 1040, 0, 600, 637, 643, 666, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, 896, 1056, 0, 600, 601, 604, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@85Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, 896, 1048, 0, 600, 601, 604, 631, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@120Hz RB */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848, 880, 960, 0, 600, 603, 607, 636, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 848x480@60Hz */ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, 976, 1088, 0, 480, 486, 494, 517, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@43Hz, interlace */ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 1208, 1264, 0, 768, 768, 772, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 1184, 1328, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, 1136, 1312, 0, 768, 769, 772, 800, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@85Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, 1168, 1376, 0, 768, 769, 772, 808, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@120Hz RB */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072, 1104, 1184, 0, 768, 771, 775, 813, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1152x864@75Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@60Hz RB */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328, 1360, 1440, 0, 768, 771, 778, 790, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x768@60Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1472, 1664, 0, 768, 771, 778, 798, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@75Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360, 1488, 1696, 0, 768, 771, 778, 805, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x768@85Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, 1496, 1712, 0, 768, 771, 778, 809, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@120Hz RB */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328, 1360, 1440, 0, 768, 771, 778, 813, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@60Hz RB */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328, 1360, 1440, 0, 800, 803, 809, 823, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@60Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 1480, 1680, 0, 800, 803, 809, 831, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@75Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360, 1488, 1696, 0, 800, 803, 809, 838, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x800@85Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, 1496, 1712, 0, 800, 803, 809, 843, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x800@120Hz RB */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328, 1360, 1440, 0, 800, 803, 809, 847, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x960@60Hz */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 1488, 1800, 0, 960, 961, 964, 1000, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x960@85Hz */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, 1504, 1728, 0, 960, 961, 964, 1011, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x960@120Hz RB */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328, 1360, 1440, 0, 960, 963, 967, 1017, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x1024@60Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@85Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, 1504, 1728, 0, 1024, 1025, 1028, 1072, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@120Hz RB */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328, 1360, 1440, 0, 1024, 1027, 1034, 1084, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1360x768@60Hz */ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 1536, 1792, 0, 768, 771, 777, 795, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1360x768@120Hz RB */ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408, 1440, 1520, 0, 768, 771, 776, 813, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1400x1050@60Hz RB */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448, 1480, 1560, 0, 1050, 1053, 1057, 1080, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1400x1050@60Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1400x1050@75Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, 1648, 1896, 0, 1050, 1053, 1057, 1099, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1400x1050@85Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, 1656, 1912, 0, 1050, 1053, 1057, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1400x1050@120Hz RB */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448, 1480, 1560, 0, 1050, 1053, 1057, 1112, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1440x900@60Hz RB */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488, 1520, 1600, 0, 900, 903, 909, 926, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1440x900@60Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 1672, 1904, 0, 900, 903, 909, 934, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@75Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536, 1688, 1936, 0, 900, 903, 909, 942, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@85Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, 1696, 1952, 0, 900, 903, 909, 948, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@120Hz RB */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488, 1520, 1600, 0, 900, 903, 909, 953, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1600x1200@60Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@65Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@70Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@75Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@85Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@120Hz RB */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648, 1680, 1760, 0, 1200, 1203, 1207, 1271, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1680x1050@60Hz RB */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728, 1760, 1840, 0, 1050, 1053, 1059, 1080, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1680x1050@60Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@75Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800, 1976, 2272, 0, 1050, 1053, 1059, 1099, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@85Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, 1984, 2288, 0, 1050, 1053, 1059, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@120Hz RB */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728, 1760, 1840, 0, 1050, 1053, 1059, 1112, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1792x1344@60Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1792x1344@75Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, 2104, 2456, 0, 1344, 1345, 1348, 1417, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1792x1344@120Hz RB */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840, 1872, 1952, 0, 1344, 1347, 1351, 1423, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1856x1392@60Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1856x1392@75Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, 2208, 2560, 0, 1392, 1395, 1399, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1856x1392@120Hz RB */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904, 1936, 2016, 0, 1392, 1395, 1399, 1474, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1200@60Hz RB */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968, 2000, 2080, 0, 1200, 1203, 1209, 1235, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1200@60Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@75Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056, 2264, 2608, 0, 1200, 1203, 1209, 1255, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@85Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, 2272, 2624, 0, 1200, 1203, 1209, 1262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@120Hz RB */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968, 2000, 2080, 0, 1200, 1203, 1209, 1271, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1440@60Hz */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1440@75Hz */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, 2288, 2640, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1440@120Hz RB */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968, 2000, 2080, 0, 1440, 1443, 1447, 1525, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz RB */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608, 2640, 2720, 0, 1600, 1603, 1609, 1646, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@75HZ */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768, 3048, 3536, 0, 1600, 1603, 1609, 1672, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@85HZ */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@120Hz RB */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608, 2640, 2720, 0, 1600, 1603, 1609, 1694, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, }; static const struct drm_display_mode edid_est_modes[] = { { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, 896, 1024, 0, 600, 601, 603, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 768, 864, 0, 480, 483, 486, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, 846, 900, 0, 400, 421, 423, 449, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738, 846, 900, 0, 400, 412, 414, 449, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, 1136, 1312, 0, 768, 769, 772, 800, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 1184, 1328, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, 1208, 1264, 0, 768, 768, 776, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, 928, 1152, 0, 624, 625, 628, 667, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, 896, 1056, 0, 600, 601, 604, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, 976, 1040, 0, 600, 637, 643, 666, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ }; struct minimode { short w; short h; short r; short rb; }; static const struct minimode est3_modes[] = { /* byte 6 */ { 640, 350, 85, 0 }, { 640, 400, 85, 0 }, { 720, 400, 85, 0 }, { 640, 480, 85, 0 }, { 848, 480, 60, 0 }, { 800, 600, 85, 0 }, { 1024, 768, 85, 0 }, { 1152, 864, 75, 0 }, /* byte 7 */ { 1280, 768, 60, 1 }, { 1280, 768, 60, 0 }, { 1280, 768, 75, 0 }, { 1280, 768, 85, 0 }, { 1280, 960, 60, 0 }, { 1280, 960, 85, 0 }, { 1280, 1024, 60, 0 }, { 1280, 1024, 85, 0 }, /* byte 8 */ { 1360, 768, 60, 0 }, { 1440, 900, 60, 1 }, { 1440, 900, 60, 0 }, { 1440, 900, 75, 0 }, { 1440, 900, 85, 0 }, { 1400, 1050, 60, 1 }, { 1400, 1050, 60, 0 }, { 1400, 1050, 75, 0 }, /* byte 9 */ { 1400, 1050, 85, 0 }, { 1680, 1050, 60, 1 }, { 1680, 1050, 60, 0 }, { 1680, 1050, 75, 0 }, { 1680, 1050, 85, 0 }, { 1600, 1200, 60, 0 }, { 1600, 1200, 65, 0 }, { 1600, 1200, 70, 0 }, /* byte 10 */ { 1600, 1200, 75, 0 }, { 1600, 1200, 85, 0 }, { 1792, 1344, 60, 0 }, { 1792, 1344, 85, 0 }, { 1856, 1392, 60, 0 }, { 1856, 1392, 75, 0 }, { 1920, 1200, 60, 1 }, { 1920, 1200, 60, 0 }, /* byte 11 */ { 1920, 1200, 75, 0 }, { 1920, 1200, 85, 0 }, { 1920, 1440, 60, 0 }, { 1920, 1440, 75, 0 }, }; static const struct minimode extra_modes[] = { { 1024, 576, 60, 0 }, { 1366, 768, 60, 0 }, { 1600, 900, 60, 0 }, { 1680, 945, 60, 0 }, { 1920, 1080, 60, 0 }, { 2048, 1152, 60, 0 }, { 2048, 1536, 60, 0 }, }; /* * Probably taken from CEA-861 spec. * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. */ static const struct drm_display_mode edid_cea_modes[] = { /* 1 - 640x480@60Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 2 - 720x480@60Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 3 - 720x480@60Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 4 - 1280x720@60Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 60, }, /* 5 - 1920x1080i@60Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 60, }, /* 6 - 1440x480i@60Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 7 - 1440x480i@60Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 8 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 9 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 10 - 2880x480i@60Hz */ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 60, }, /* 11 - 2880x480i@60Hz */ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 60, }, /* 12 - 2880x240@60Hz */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 13 - 2880x240@60Hz */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 14 - 1440x480@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 15 - 1440x480@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 16 - 1920x1080@60Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 60, }, /* 17 - 720x576@50Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 18 - 720x576@50Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 19 - 1280x720@50Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 50, }, /* 20 - 1920x1080i@50Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 21 - 1440x576i@50Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 22 - 1440x576i@50Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 23 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 24 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 25 - 2880x576i@50Hz */ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 26 - 2880x576i@50Hz */ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 27 - 2880x288@50Hz */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 28 - 2880x288@50Hz */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 29 - 1440x576@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 30 - 1440x576@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 31 - 1920x1080@50Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 50, }, /* 32 - 1920x1080@24Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 24, }, /* 33 - 1920x1080@25Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 25, }, /* 34 - 1920x1080@30Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 30, }, /* 35 - 2880x480@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 36 - 2880x480@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 37 - 2880x576@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 38 - 2880x576@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 39 - 1920x1080i@50Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 40 - 1920x1080i@100Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 100, }, /* 41 - 1280x720@100Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 100, }, /* 42 - 720x576@100Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 100, }, /* 43 - 720x576@100Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 100, }, /* 44 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 100, }, /* 45 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 100, }, /* 46 - 1920x1080i@120Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 120, }, /* 47 - 1280x720@120Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 120, }, /* 48 - 720x480@120Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 120, }, /* 49 - 720x480@120Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 120, }, /* 50 - 1440x480i@120Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 120, }, /* 51 - 1440x480i@120Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 120, }, /* 52 - 720x576@200Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 200, }, /* 53 - 720x576@200Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 200, }, /* 54 - 1440x576i@200Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 200, }, /* 55 - 1440x576i@200Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 200, }, /* 56 - 720x480@240Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 240, }, /* 57 - 720x480@240Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 240, }, /* 58 - 1440x480i@240 */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 240, }, /* 59 - 1440x480i@240 */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 240, }, /* 60 - 1280x720@24Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 24, }, /* 61 - 1280x720@25Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 3740, 3960, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 25, }, /* 62 - 1280x720@30Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 30, }, /* 63 - 1920x1080@120Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 120, }, /* 64 - 1920x1080@100Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 100, }, }; /*** DDC fetch and block validation ***/ static const u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; /* * Sanity check the header of the base EDID block. Return 8 if the header * is perfect, down to 0 if it's totally wrong. */ int drm_edid_header_is_valid(const u8 *raw_edid) { int i, score = 0; for (i = 0; i < sizeof(edid_header); i++) if (raw_edid[i] == edid_header[i]) score++; return score; } EXPORT_SYMBOL(drm_edid_header_is_valid); static int edid_fixup __read_mostly = 6; module_param_named(edid_fixup, edid_fixup, int, 0400); MODULE_PARM_DESC(edid_fixup, "Minimum number of valid EDID header bytes (0-8, default 6)"); /* * Sanity check the EDID block (base or extension). Return 0 if the block * doesn't check out, or 1 if it's valid. */ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) { int i; u8 csum = 0; struct edid *edid = (struct edid *)raw_edid; if (edid_fixup > 8 || edid_fixup < 0) edid_fixup = 6; if (block == 0) { int score = drm_edid_header_is_valid(raw_edid); if (score == 8) ; else if (score >= edid_fixup) { DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); memcpy(raw_edid, edid_header, sizeof(edid_header)); } else { goto bad; } } for (i = 0; i < EDID_LENGTH; i++) csum += raw_edid[i]; if (csum) { if (print_bad_edid) { DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); } /* allow CEA to slide through, switches mangle this */ if (raw_edid[0] != 0x02) goto bad; } /* per-block-type checks */ switch (raw_edid[0]) { case 0: /* base */ if (edid->version != 1) { DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); goto bad; } if (edid->revision > 4) DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); break; default: break; } return 1; bad: if (raw_edid && print_bad_edid) { printk(KERN_ERR "Raw EDID:\n"); print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, raw_edid, EDID_LENGTH, false); } return 0; } EXPORT_SYMBOL(drm_edid_block_valid); /** * drm_edid_is_valid - sanity check EDID data * @edid: EDID data * * Sanity-check an entire EDID record (including extensions) */ bool drm_edid_is_valid(struct edid *edid) { int i; u8 *raw = (u8 *)edid; if (!edid) return false; for (i = 0; i <= edid->extensions; i++) if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true)) return false; return true; } EXPORT_SYMBOL(drm_edid_is_valid); #define DDC_SEGMENT_ADDR 0x30 /** * Get EDID information via I2C. * * \param adapter : i2c device adaptor * \param buf : EDID data buffer to be filled * \param len : EDID data buffer length * \return 0 on success or -1 on failure. * * Try to fetch EDID information by calling i2c driver function. */ static int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, int block, int len) { unsigned char start = block * EDID_LENGTH; unsigned char segment = block >> 1; unsigned char xfers = segment ? 3 : 2; int ret, retries = 5; /* The core i2c driver will automatically retry the transfer if the * adapter reports EAGAIN. However, we find that bit-banging transfers * are susceptible to errors under a heavily loaded machine and * generate spurious NAKs and timeouts. Retrying the transfer * of the individual block a few times seems to overcome this. */ do { struct i2c_msg msgs[] = { { .addr = DDC_SEGMENT_ADDR, .flags = 0, .len = 1, .buf = &segment, }, { .addr = DDC_ADDR, .flags = 0, .len = 1, .buf = &start, }, { .addr = DDC_ADDR, .flags = I2C_M_RD, .len = len, .buf = buf, } }; /* * Avoid sending the segment addr to not upset non-compliant ddc * monitors. */ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); if (ret == -ENXIO) { DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", adapter->name); break; } } while (ret != xfers && --retries); return ret == xfers ? 0 : -1; } static bool drm_edid_is_zero(u8 *in_edid, int length) { if (memchr_inv(in_edid, 0, length)) return false; return true; } static u8 * drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { int i, j = 0, valid_extensions = 0; u8 *block, *new; bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; /* base block fetch */ for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) goto out; if (drm_edid_block_valid(block, 0, print_bad_edid)) break; if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { connector->null_edid_counter++; goto carp; } } if (i == 4) goto carp; /* if there's no extensions, we're done */ if (block[0x7e] == 0) return block; new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; block = new; for (j = 1; j <= block[0x7e]; j++) { for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block + (valid_extensions + 1) * EDID_LENGTH, j, EDID_LENGTH)) goto out; if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) { valid_extensions++; break; } } if (i == 4 && print_bad_edid) { dev_warn(connector->dev->dev, "%s: Ignoring invalid EDID block %d.\n", drm_get_connector_name(connector), j); connector->bad_edid_counter++; } } if (valid_extensions != block[0x7e]) { block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; block[0x7e] = valid_extensions; new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; block = new; } return block; carp: if (print_bad_edid) { dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", drm_get_connector_name(connector), j); } connector->bad_edid_counter++; out: kfree(block); return NULL; } /** * Probe DDC presence. * * \param adapter : i2c device adaptor * \return 1 on success */ bool drm_probe_ddc(struct i2c_adapter *adapter) { unsigned char out; return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); } EXPORT_SYMBOL(drm_probe_ddc); /** * drm_get_edid - get EDID data, if available * @connector: connector we're probing * @adapter: i2c adapter to use for DDC * * Poke the given i2c channel to grab EDID data if possible. If found, * attach it to the connector. * * Return edid data or NULL if we couldn't find any. */ struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid = NULL; if (drm_probe_ddc(adapter)) edid = (struct edid *)drm_do_get_edid(connector, adapter); return edid; } EXPORT_SYMBOL(drm_get_edid); /*** EDID parsing ***/ /** * edid_vendor - match a string against EDID's obfuscated vendor field * @edid: EDID to match * @vendor: vendor string * * Returns true if @vendor is in @edid, false otherwise */ static bool edid_vendor(struct edid *edid, char *vendor) { char edid_vendor[3]; edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@'; return !strncmp(edid_vendor, vendor, 3); } /** * edid_get_quirks - return quirk flags for a given EDID * @edid: EDID to process * * This tells subsequent routines what fixes they need to apply. */ static u32 edid_get_quirks(struct edid *edid) { struct edid_quirk *quirk; int i; for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { quirk = &edid_quirk_list[i]; if (edid_vendor(edid, quirk->vendor) && (EDID_PRODUCT_ID(edid) == quirk->product_id)) return quirk->quirks; } return 0; } #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) /** * edid_fixup_preferred - set preferred modes based on quirk list * @connector: has mode list to fix up * @quirks: quirks list * * Walk the mode list for @connector, clearing the preferred status * on existing modes and setting it anew for the right mode ala @quirks. */ static void edid_fixup_preferred(struct drm_connector *connector, u32 quirks) { struct drm_display_mode *t, *cur_mode, *preferred_mode; int target_refresh = 0; if (list_empty(&connector->probed_modes)) return; if (quirks & EDID_QUIRK_PREFER_LARGE_60) target_refresh = 60; if (quirks & EDID_QUIRK_PREFER_LARGE_75) target_refresh = 75; preferred_mode = list_first_entry(&connector->probed_modes, struct drm_display_mode, head); list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; if (cur_mode == preferred_mode) continue; /* Largest mode is preferred */ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) preferred_mode = cur_mode; /* At a given size, try to get closest to target refresh */ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && MODE_REFRESH_DIFF(cur_mode, target_refresh) < MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { preferred_mode = cur_mode; } } preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; } static bool mode_is_rb(const struct drm_display_mode *mode) { return (mode->htotal - mode->hdisplay == 160) && (mode->hsync_end - mode->hdisplay == 80) && (mode->hsync_end - mode->hsync_start == 32) && (mode->vsync_start - mode->vdisplay == 3); } /* * drm_mode_find_dmt - Create a copy of a mode if present in DMT * @dev: Device to duplicate against * @hsize: Mode width * @vsize: Mode height * @fresh: Mode refresh rate * @rb: Mode reduced-blanking-ness * * Walk the DMT mode list looking for a match for the given parameters. * Return a newly allocated copy of the mode, or NULL if not found. */ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh, bool rb) { int i; for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { const struct drm_display_mode *ptr = &drm_dmt_modes[i]; if (hsize != ptr->hdisplay) continue; if (vsize != ptr->vdisplay) continue; if (fresh != drm_mode_vrefresh(ptr)) continue; if (rb != mode_is_rb(ptr)) continue; return drm_mode_duplicate(dev, ptr); } return NULL; } EXPORT_SYMBOL(drm_mode_find_dmt); typedef void detailed_cb(struct detailed_timing *timing, void *closure); static void cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) { int i, n = 0; u8 d = ext[0x02]; u8 *det_base = ext + d; n = (127 - d) / 18; for (i = 0; i < n; i++) cb((struct detailed_timing *)(det_base + 18 * i), closure); } static void vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) { unsigned int i, n = min((int)ext[0x02], 6); u8 *det_base = ext + 5; if (ext[0x01] != 1) return; /* unknown version */ for (i = 0; i < n; i++) cb((struct detailed_timing *)(det_base + 18 * i), closure); } static void drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) { int i; struct edid *edid = (struct edid *)raw_edid; if (edid == NULL) return; for (i = 0; i < EDID_DETAILED_TIMINGS; i++) cb(&(edid->detailed_timings[i]), closure); for (i = 1; i <= raw_edid[0x7e]; i++) { u8 *ext = raw_edid + (i * EDID_LENGTH); switch (*ext) { case CEA_EXT: cea_for_each_detailed_block(ext, cb, closure); break; case VTB_EXT: vtb_for_each_detailed_block(ext, cb, closure); break; default: break; } } } static void is_rb(struct detailed_timing *t, void *data) { u8 *r = (u8 *)t; if (r[3] == EDID_DETAIL_MONITOR_RANGE) if (r[15] & 0x10) *(bool *)data = true; } /* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ static bool drm_monitor_supports_rb(struct edid *edid) { if (edid->revision >= 4) { bool ret = false; drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); return ret; } return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); } static void find_gtf2(struct detailed_timing *t, void *data) { u8 *r = (u8 *)t; if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) *(u8 **)data = r; } /* Secondary GTF curve kicks in above some break frequency */ static int drm_gtf2_hbreak(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? (r[12] * 2) : 0; } static int drm_gtf2_2c(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[13] : 0; } static int drm_gtf2_m(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? (r[15] << 8) + r[14] : 0; } static int drm_gtf2_k(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[16] : 0; } static int drm_gtf2_2j(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[17] : 0; } /** * standard_timing_level - get std. timing level(CVT/GTF/DMT) * @edid: EDID block to scan */ static int standard_timing_level(struct edid *edid) { if (edid->revision >= 2) { if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) return LEVEL_CVT; if (drm_gtf2_hbreak(edid)) return LEVEL_GTF2; return LEVEL_GTF; } return LEVEL_DMT; } /* * 0 is reserved. The spec says 0x01 fill for unused timings. Some old * monitors fill with ascii space (0x20) instead. */ static int bad_std_timing(u8 a, u8 b) { return (a == 0x00 && b == 0x00) || (a == 0x01 && b == 0x01) || (a == 0x20 && b == 0x20); } /** * drm_mode_std - convert standard mode info (width, height, refresh) into mode * @t: standard timing params * @timing_level: standard timing level * * Take the standard timing params (in this case width, aspect, and refresh) * and convert them into a real mode using CVT/GTF/DMT. */ static struct drm_display_mode * drm_mode_std(struct drm_connector *connector, struct edid *edid, struct std_timing *t, int revision) { struct drm_device *dev = connector->dev; struct drm_display_mode *m, *mode = NULL; int hsize, vsize; int vrefresh_rate; unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) >> EDID_TIMING_ASPECT_SHIFT; unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) >> EDID_TIMING_VFREQ_SHIFT; int timing_level = standard_timing_level(edid); if (bad_std_timing(t->hsize, t->vfreq_aspect)) return NULL; /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ hsize = t->hsize * 8 + 248; /* vrefresh_rate = vfreq + 60 */ vrefresh_rate = vfreq + 60; /* the vdisplay is calculated based on the aspect ratio */ if (aspect_ratio == 0) { if (revision < 3) vsize = hsize; else vsize = (hsize * 10) / 16; } else if (aspect_ratio == 1) vsize = (hsize * 3) / 4; else if (aspect_ratio == 2) vsize = (hsize * 4) / 5; else vsize = (hsize * 9) / 16; /* HDTV hack, part 1 */ if (vrefresh_rate == 60 && ((hsize == 1360 && vsize == 765) || (hsize == 1368 && vsize == 769))) { hsize = 1366; vsize = 768; } /* * If this connector already has a mode for this size and refresh * rate (because it came from detailed or CVT info), use that * instead. This way we don't have to guess at interlace or * reduced blanking. */ list_for_each_entry(m, &connector->probed_modes, head) if (m->hdisplay == hsize && m->vdisplay == vsize && drm_mode_vrefresh(m) == vrefresh_rate) return NULL; /* HDTV hack, part 2 */ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, false); mode->hdisplay = 1366; mode->hsync_start = mode->hsync_start - 1; mode->hsync_end = mode->hsync_end - 1; return mode; } /* check whether it can be found in default mode table */ if (drm_monitor_supports_rb(edid)) { mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, true); if (mode) return mode; } mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false); if (mode) return mode; /* okay, generate it */ switch (timing_level) { case LEVEL_DMT: break; case LEVEL_GTF: mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); break; case LEVEL_GTF2: /* * This is potentially wrong if there's ever a monitor with * more than one ranges section, each claiming a different * secondary GTF curve. Please don't do that. */ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); if (!mode) return NULL; if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { drm_mode_destroy(dev, mode); mode = drm_gtf_mode_complex(dev, hsize, vsize, vrefresh_rate, 0, 0, drm_gtf2_m(edid), drm_gtf2_2c(edid), drm_gtf2_k(edid), drm_gtf2_2j(edid)); } break; case LEVEL_CVT: mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, false); break; } return mode; } /* * EDID is delightfully ambiguous about how interlaced modes are to be * encoded. Our internal representation is of frame height, but some * HDTV detailed timings are encoded as field height. * * The format list here is from CEA, in frame size. Technically we * should be checking refresh rate too. Whatever. */ static void drm_mode_do_interlace_quirk(struct drm_display_mode *mode, struct detailed_pixel_timing *pt) { int i; static const struct { int w, h; } cea_interlaced[] = { { 1920, 1080 }, { 720, 480 }, { 1440, 480 }, { 2880, 480 }, { 720, 576 }, { 1440, 576 }, { 2880, 576 }, }; if (!(pt->misc & DRM_EDID_PT_INTERLACED)) return; for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) { if ((mode->hdisplay == cea_interlaced[i].w) && (mode->vdisplay == cea_interlaced[i].h / 2)) { mode->vdisplay *= 2; mode->vsync_start *= 2; mode->vsync_end *= 2; mode->vtotal *= 2; mode->vtotal |= 1; } } mode->flags |= DRM_MODE_FLAG_INTERLACE; } /** * drm_mode_detailed - create a new mode from an EDID detailed timing section * @dev: DRM device (needed to create new mode) * @edid: EDID block * @timing: EDID detailed timing info * @quirks: quirks to apply * * An EDID detailed timing block contains enough info for us to create and * return a new struct drm_display_mode. */ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, struct edid *edid, struct detailed_timing *timing, u32 quirks) { struct drm_display_mode *mode; struct detailed_pixel_timing *pt = &timing->data.pixel_data; unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo; unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ if (hactive < 64 || vactive < 64) return NULL; if (pt->misc & DRM_EDID_PT_STEREO) { printk(KERN_WARNING "stereo mode not supported\n"); return NULL; } if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { printk(KERN_WARNING "composite sync not supported\n"); } /* it is incorrect if hsync/vsync width is zero */ if (!hsync_pulse_width || !vsync_pulse_width) { DRM_DEBUG_KMS("Incorrect Detailed timing. " "Wrong Hsync/Vsync pulse width\n"); return NULL; } if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) { mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false); if (!mode) return NULL; goto set_size; } mode = drm_mode_create(dev); if (!mode) return NULL; if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) timing->pixel_clock = cpu_to_le16(1088); mode->clock = le16_to_cpu(timing->pixel_clock) * 10; mode->hdisplay = hactive; mode->hsync_start = mode->hdisplay + hsync_offset; mode->hsync_end = mode->hsync_start + hsync_pulse_width; mode->htotal = mode->hdisplay + hblank; mode->vdisplay = vactive; mode->vsync_start = mode->vdisplay + vsync_offset; mode->vsync_end = mode->vsync_start + vsync_pulse_width; mode->vtotal = mode->vdisplay + vblank; /* Some EDIDs have bogus h/vtotal values */ if (mode->hsync_end > mode->htotal) mode->htotal = mode->hsync_end + 1; if (mode->vsync_end > mode->vtotal) mode->vtotal = mode->vsync_end + 1; drm_mode_do_interlace_quirk(mode, pt); if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; } mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; set_size: mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; if (quirks & EDID_QUIRK_DETAILED_IN_CM) { mode->width_mm *= 10; mode->height_mm *= 10; } if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { mode->width_mm = edid->width_cm * 10; mode->height_mm = edid->height_cm * 10; } mode->type = DRM_MODE_TYPE_DRIVER; mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; } static bool mode_in_hsync_range(const struct drm_display_mode *mode, struct edid *edid, u8 *t) { int hsync, hmin, hmax; hmin = t[7]; if (edid->revision >= 4) hmin += ((t[4] & 0x04) ? 255 : 0); hmax = t[8]; if (edid->revision >= 4) hmax += ((t[4] & 0x08) ? 255 : 0); hsync = drm_mode_hsync(mode); return (hsync <= hmax && hsync >= hmin); } static bool mode_in_vsync_range(const struct drm_display_mode *mode, struct edid *edid, u8 *t) { int vsync, vmin, vmax; vmin = t[5]; if (edid->revision >= 4) vmin += ((t[4] & 0x01) ? 255 : 0); vmax = t[6]; if (edid->revision >= 4) vmax += ((t[4] & 0x02) ? 255 : 0); vsync = drm_mode_vrefresh(mode); return (vsync <= vmax && vsync >= vmin); } static u32 range_pixel_clock(struct edid *edid, u8 *t) { /* unspecified */ if (t[9] == 0 || t[9] == 255) return 0; /* 1.4 with CVT support gives us real precision, yay */ if (edid->revision >= 4 && t[10] == 0x04) return (t[9] * 10000) - ((t[12] >> 2) * 250); /* 1.3 is pathetic, so fuzz up a bit */ return t[9] * 10000 + 5001; } static bool mode_in_range(const struct drm_display_mode *mode, struct edid *edid, struct detailed_timing *timing) { u32 max_clock; u8 *t = (u8 *)timing; if (!mode_in_hsync_range(mode, edid, t)) return false; if (!mode_in_vsync_range(mode, edid, t)) return false; if ((max_clock = range_pixel_clock(edid, t))) if (mode->clock > max_clock) return false; /* 1.4 max horizontal check */ if (edid->revision >= 4 && t[10] == 0x04) if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) return false; if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) return false; return true; } static bool valid_inferred_mode(const struct drm_connector *connector, const struct drm_display_mode *mode) { struct drm_display_mode *m; bool ok = false; list_for_each_entry(m, &connector->probed_modes, head) { if (mode->hdisplay == m->hdisplay && mode->vdisplay == m->vdisplay && drm_mode_vrefresh(mode) == drm_mode_vrefresh(m)) return false; /* duplicated */ if (mode->hdisplay <= m->hdisplay && mode->vdisplay <= m->vdisplay) ok = true; } return ok; } static int drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { if (mode_in_range(drm_dmt_modes + i, edid, timing) && valid_inferred_mode(connector, drm_dmt_modes + i)) { newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } return modes; } /* fix up 1366x768 mode from 1368x768; * GFT/CVT can't express 1366 width which isn't dividable by 8 */ static void fixup_mode_1366x768(struct drm_display_mode *mode) { if (mode->hdisplay == 1368 && mode->vdisplay == 768) { mode->hdisplay = 1366; mode->hsync_start--; mode->hsync_end--; drm_mode_set_name(mode); } } static int drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { const struct minimode *m = &extra_modes[i]; newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); if (!newmode) return modes; fixup_mode_1366x768(newmode); if (!mode_in_range(newmode, edid, timing) || !valid_inferred_mode(connector, newmode)) { drm_mode_destroy(dev, newmode); continue; } drm_mode_probed_add(connector, newmode); modes++; } return modes; } static int drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; bool rb = drm_monitor_supports_rb(edid); for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { const struct minimode *m = &extra_modes[i]; newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); if (!newmode) return modes; fixup_mode_1366x768(newmode); if (!mode_in_range(newmode, edid, timing) || !valid_inferred_mode(connector, newmode)) { drm_mode_destroy(dev, newmode); continue; } drm_mode_probed_add(connector, newmode); modes++; } return modes; } static void do_inferred_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; struct detailed_data_monitor_range *range = &data->data.range; if (data->type != EDID_DETAIL_MONITOR_RANGE) return; closure->modes += drm_dmt_modes_for_range(closure->connector, closure->edid, timing); if (!version_greater(closure->edid, 1, 1)) return; /* GTF not defined yet */ switch (range->flags) { case 0x02: /* secondary gtf, XXX could do more */ case 0x00: /* default gtf */ closure->modes += drm_gtf_modes_for_range(closure->connector, closure->edid, timing); break; case 0x04: /* cvt, only in 1.4+ */ if (!version_greater(closure->edid, 1, 3)) break; closure->modes += drm_cvt_modes_for_range(closure->connector, closure->edid, timing); break; case 0x01: /* just the ranges, no formula */ default: break; } } static int add_inferred_modes(struct drm_connector *connector, struct edid *edid) { struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_inferred_modes, &closure); return closure.modes; } static int drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) { int i, j, m, modes = 0; struct drm_display_mode *mode; u8 *est = ((u8 *)timing) + 5; for (i = 0; i < 6; i++) { for (j = 7; j > 0; j--) { m = (i * 8) + (7 - j); if (m >= ARRAY_SIZE(est3_modes)) break; if (est[i] & (1 << j)) { mode = drm_mode_find_dmt(connector->dev, est3_modes[m].w, est3_modes[m].h, est3_modes[m].r, est3_modes[m].rb); if (mode) { drm_mode_probed_add(connector, mode); modes++; } } } } return modes; } static void do_established_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; if (data->type == EDID_DETAIL_EST_TIMINGS) closure->modes += drm_est3_modes(closure->connector, timing); } /** * add_established_modes - get est. modes from EDID and add them * @edid: EDID block to scan * * Each EDID block contains a bitmap of the supported "established modes" list * (defined above). Tease them out and add them to the global modes list. */ static int add_established_modes(struct drm_connector *connector, struct edid *edid) { struct drm_device *dev = connector->dev; unsigned long est_bits = edid->established_timings.t1 | (edid->established_timings.t2 << 8) | ((edid->established_timings.mfg_rsvd & 0x80) << 9); int i, modes = 0; struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; for (i = 0; i <= EDID_EST_TIMINGS; i++) { if (est_bits & (1<<i)) { struct drm_display_mode *newmode; newmode = drm_mode_duplicate(dev, &edid_est_modes[i]); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_established_modes, &closure); return modes + closure.modes; } static void do_standard_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; struct drm_connector *connector = closure->connector; struct edid *edid = closure->edid; if (data->type == EDID_DETAIL_STD_MODES) { int i; for (i = 0; i < 6; i++) { struct std_timing *std; struct drm_display_mode *newmode; std = &data->data.timings[i]; newmode = drm_mode_std(connector, edid, std, edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); closure->modes++; } } } } /** * add_standard_modes - get std. modes from EDID and add them * @edid: EDID block to scan * * Standard modes can be calculated using the appropriate standard (DMT, * GTF or CVT. Grab them from @edid and add them to the list. */ static int add_standard_modes(struct drm_connector *connector, struct edid *edid) { int i, modes = 0; struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; for (i = 0; i < EDID_STD_TIMINGS; i++) { struct drm_display_mode *newmode; newmode = drm_mode_std(connector, edid, &edid->standard_timings[i], edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_standard_modes, &closure); /* XXX should also look for standard codes in VTB blocks */ return modes + closure.modes; } static int drm_cvt_modes(struct drm_connector *connector, struct detailed_timing *timing) { int i, j, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; struct cvt_timing *cvt; const int rates[] = { 60, 85, 75, 60, 50 }; const u8 empty[3] = { 0, 0, 0 }; for (i = 0; i < 4; i++) { int uninitialized_var(width), height; cvt = &(timing->data.other_data.data.cvt[i]); if (!memcmp(cvt->code, empty, 3)) continue; height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; switch (cvt->code[1] & 0x0c) { case 0x00: width = height * 4 / 3; break; case 0x04: width = height * 16 / 9; break; case 0x08: width = height * 16 / 10; break; case 0x0c: width = height * 15 / 9; break; } for (j = 1; j < 5; j++) { if (cvt->code[2] & (1 << j)) { newmode = drm_cvt_mode(dev, width, height, rates[j], j == 0, false, false); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } } return modes; } static void do_cvt_mode(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; if (data->type == EDID_DETAIL_CVT_3BYTE) closure->modes += drm_cvt_modes(closure->connector, timing); } static int add_cvt_modes(struct drm_connector *connector, struct edid *edid) { struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; if (version_greater(edid, 1, 2)) drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure); /* XXX should also look for CVT codes in VTB blocks */ return closure.modes; } static void do_detailed_mode(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct drm_display_mode *newmode; if (timing->pixel_clock) { newmode = drm_mode_detailed(closure->connector->dev, closure->edid, timing, closure->quirks); if (!newmode) return; if (closure->preferred) newmode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(closure->connector, newmode); closure->modes++; closure->preferred = 0; } } /* * add_detailed_modes - Add modes from detailed timings * @connector: attached connector * @edid: EDID block to scan * @quirks: quirks to apply */ static int add_detailed_modes(struct drm_connector *connector, struct edid *edid, u32 quirks) { struct detailed_mode_closure closure = { connector, edid, 1, quirks, 0 }; if (closure.preferred && !version_greater(edid, 1, 3)) closure.preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure); return closure.modes; } #define HDMI_IDENTIFIER 0x000C03 #define AUDIO_BLOCK 0x01 #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 #define VIDEO_CAPABILITY_BLOCK 0x07 #define EDID_BASIC_AUDIO (1 << 6) #define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB422 (1 << 4) #define EDID_CEA_VCDB_QS (1 << 6) /** * Search EDID for CEA extension block. */ u8 *drm_find_cea_extension(struct edid *edid) { u8 *edid_ext = NULL; int i; /* No EDID or EDID extensions */ if (edid == NULL || edid->extensions == 0) return NULL; /* Find CEA extension */ for (i = 0; i < edid->extensions; i++) { edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); if (edid_ext[0] == CEA_EXT) break; } if (i == edid->extensions) return NULL; return edid_ext; } EXPORT_SYMBOL(drm_find_cea_extension); /** * drm_match_cea_mode - look for a CEA mode matching given mode * @to_match: display mode * * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861 * mode. */ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) { u8 mode; if (!to_match->clock) return 0; for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; unsigned int clock1, clock2; clock1 = clock2 = cea_mode->clock; /* Check both 60Hz and 59.94Hz */ if (cea_mode->vrefresh % 6 == 0) { /* * edid_cea_modes contains the 59.94Hz * variant for 240 and 480 line modes, * and the 60Hz variant otherwise. */ if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480) clock1 = clock1 * 1001 / 1000; else clock2 = DIV_ROUND_UP(clock2 * 1000, 1001); } if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && drm_mode_equal_no_clocks(to_match, cea_mode)) return mode + 1; } return 0; } EXPORT_SYMBOL(drm_match_cea_mode); static int do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) { struct drm_device *dev = connector->dev; u8 * mode, cea_mode; int modes = 0; for (mode = db; mode < db + len; mode++) { cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */ if (cea_mode < ARRAY_SIZE(edid_cea_modes)) { struct drm_display_mode *newmode; newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); if (newmode) { newmode->vrefresh = 0; drm_mode_probed_add(connector, newmode); modes++; } } } return modes; } static int cea_db_payload_len(const u8 *db) { return db[0] & 0x1f; } static int cea_db_tag(const u8 *db) { return db[0] >> 5; } static int cea_revision(const u8 *cea) { return cea[1]; } static int cea_db_offsets(const u8 *cea, int *start, int *end) { /* Data block offset in CEA extension block */ *start = 4; *end = cea[2]; if (*end == 0) *end = 127; if (*end < 4 || *end > 127) return -ERANGE; return 0; } #define for_each_cea_db(cea, i, start, end) \ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) static int add_cea_modes(struct drm_connector *connector, struct edid *edid) { u8 * cea = drm_find_cea_extension(edid); u8 * db, dbl; int modes = 0; if (cea && cea_revision(cea) >= 3) { int i, start, end; if (cea_db_offsets(cea, &start, &end)) return 0; for_each_cea_db(cea, i, start, end) { db = &cea[i]; dbl = cea_db_payload_len(db); if (cea_db_tag(db) == VIDEO_BLOCK) modes += do_cea_modes (connector, db+1, dbl); } } return modes; } static void parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) { u8 len = cea_db_payload_len(db); if (len >= 6) { connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ connector->dvi_dual = db[6] & 1; } if (len >= 7) connector->max_tmds_clock = db[7] * 5; if (len >= 8) { connector->latency_present[0] = db[8] >> 7; connector->latency_present[1] = (db[8] >> 6) & 1; } if (len >= 9) connector->video_latency[0] = db[9]; if (len >= 10) connector->audio_latency[0] = db[10]; if (len >= 11) connector->video_latency[1] = db[11]; if (len >= 12) connector->audio_latency[1] = db[12]; DRM_DEBUG_KMS("HDMI: DVI dual %d, " "max TMDS clock %d, " "latency present %d %d, " "video latency %d %d, " "audio latency %d %d\n", connector->dvi_dual, connector->max_tmds_clock, (int) connector->latency_present[0], (int) connector->latency_present[1], connector->video_latency[0], connector->video_latency[1], connector->audio_latency[0], connector->audio_latency[1]); } static void monitor_name(struct detailed_timing *t, void *data) { if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME) *(u8 **)data = t->data.other_data.data.str.str; } static bool cea_db_is_hdmi_vsdb(const u8 *db) { int hdmi_id; if (cea_db_tag(db) != VENDOR_BLOCK) return false; if (cea_db_payload_len(db) < 5) return false; hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); return hdmi_id == HDMI_IDENTIFIER; } /** * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink * @edid: EDID to parse * * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. * Some ELD fields are left to the graphics driver caller: * - Conn_Type * - HDCP * - Port_ID */ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) { uint8_t *eld = connector->eld; u8 *cea; u8 *name; u8 *db; int sad_count = 0; int mnl; int dbl; memset(eld, 0, sizeof(connector->eld)); cea = drm_find_cea_extension(edid); if (!cea) { DRM_DEBUG_KMS("ELD: no CEA Extension found\n"); return; } name = NULL; drm_for_each_detailed_block((u8 *)edid, monitor_name, &name); for (mnl = 0; name && mnl < 13; mnl++) { if (name[mnl] == 0x0a) break; eld[20 + mnl] = name[mnl]; } eld[4] = (cea[1] << 5) | mnl; DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20); eld[0] = 2 << 3; /* ELD version: 2 */ eld[16] = edid->mfg_id[0]; eld[17] = edid->mfg_id[1]; eld[18] = edid->prod_code[0]; eld[19] = edid->prod_code[1]; if (cea_revision(cea) >= 3) { int i, start, end; if (cea_db_offsets(cea, &start, &end)) { start = 0; end = 0; } for_each_cea_db(cea, i, start, end) { db = &cea[i]; dbl = cea_db_payload_len(db); switch (cea_db_tag(db)) { case AUDIO_BLOCK: /* Audio Data Block, contains SADs */ sad_count = dbl / 3; if (dbl >= 1) memcpy(eld + 20 + mnl, &db[1], dbl); break; case SPEAKER_BLOCK: /* Speaker Allocation Data Block */ if (dbl >= 1) eld[7] = db[1]; break; case VENDOR_BLOCK: /* HDMI Vendor-Specific Data Block */ if (cea_db_is_hdmi_vsdb(db)) parse_hdmi_vsdb(connector, db); break; default: break; } } } eld[5] |= sad_count << 4; eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count); } EXPORT_SYMBOL(drm_edid_to_eld); /** * drm_edid_to_sad - extracts SADs from EDID * @edid: EDID to parse * @sads: pointer that will be set to the extracted SADs * * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it. * Note: returned pointer needs to be kfreed * * Return number of found SADs or negative number on error. */ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads) { int count = 0; int i, start, end, dbl; u8 *cea; cea = drm_find_cea_extension(edid); if (!cea) { DRM_DEBUG_KMS("SAD: no CEA Extension found\n"); return -ENOENT; } if (cea_revision(cea) < 3) { DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); return -ENOTSUPP; } if (cea_db_offsets(cea, &start, &end)) { DRM_DEBUG_KMS("SAD: invalid data block offsets\n"); return -EPROTO; } for_each_cea_db(cea, i, start, end) { u8 *db = &cea[i]; if (cea_db_tag(db) == AUDIO_BLOCK) { int j; dbl = cea_db_payload_len(db); count = dbl / 3; /* SAD is 3B */ *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL); if (!*sads) return -ENOMEM; for (j = 0; j < count; j++) { u8 *sad = &db[1 + j * 3]; (*sads)[j].format = (sad[0] & 0x78) >> 3; (*sads)[j].channels = sad[0] & 0x7; (*sads)[j].freq = sad[1] & 0x7F; (*sads)[j].byte2 = sad[2]; } break; } } return count; } EXPORT_SYMBOL(drm_edid_to_sad); /** * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond * @connector: connector associated with the HDMI/DP sink * @mode: the display mode */ int drm_av_sync_delay(struct drm_connector *connector, struct drm_display_mode *mode) { int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); int a, v; if (!connector->latency_present[0]) return 0; if (!connector->latency_present[1]) i = 0; a = connector->audio_latency[i]; v = connector->video_latency[i]; /* * HDMI/DP sink doesn't support audio or video? */ if (a == 255 || v == 255) return 0; /* * Convert raw EDID values to millisecond. * Treat unknown latency as 0ms. */ if (a) a = min(2 * (a - 1), 500); if (v) v = min(2 * (v - 1), 500); return max(v - a, 0); } EXPORT_SYMBOL(drm_av_sync_delay); /** * drm_select_eld - select one ELD from multiple HDMI/DP sinks * @encoder: the encoder just changed display mode * @mode: the adjusted display mode * * It's possible for one encoder to be associated with multiple HDMI/DP sinks. * The policy is now hard coded to simply use the first HDMI/DP sink's ELD. */ struct drm_connector *drm_select_eld(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_connector *connector; struct drm_device *dev = encoder->dev; list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder && connector->eld[0]) return connector; return NULL; } EXPORT_SYMBOL(drm_select_eld); /** * drm_detect_hdmi_monitor - detect whether monitor is hdmi. * @edid: monitor EDID information * * Parse the CEA extension according to CEA-861-B. * Return true if HDMI, false if not or unknown. */ bool drm_detect_hdmi_monitor(struct edid *edid) { u8 *edid_ext; int i; int start_offset, end_offset; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) return false; if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) return false; /* * Because HDMI identifier is in Vendor Specific Block, * search it from all data blocks of CEA extension. */ for_each_cea_db(edid_ext, i, start_offset, end_offset) { if (cea_db_is_hdmi_vsdb(&edid_ext[i])) return true; } return false; } EXPORT_SYMBOL(drm_detect_hdmi_monitor); /** * drm_detect_monitor_audio - check monitor audio capability * * Monitor should have CEA extension block. * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic * audio' only. If there is any audio extension block and supported * audio format, assume at least 'basic audio' support, even if 'basic * audio' is not defined in EDID. * */ bool drm_detect_monitor_audio(struct edid *edid) { u8 *edid_ext; int i, j; bool has_audio = false; int start_offset, end_offset; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) goto end; has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); if (has_audio) { DRM_DEBUG_KMS("Monitor has basic audio support\n"); goto end; } if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) goto end; for_each_cea_db(edid_ext, i, start_offset, end_offset) { if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) { has_audio = true; for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3) DRM_DEBUG_KMS("CEA audio format %d\n", (edid_ext[i + j] >> 3) & 0xf); goto end; } } end: return has_audio; } EXPORT_SYMBOL(drm_detect_monitor_audio); /** * drm_rgb_quant_range_selectable - is RGB quantization range selectable? * * Check whether the monitor reports the RGB quantization range selection * as supported. The AVI infoframe can then be used to inform the monitor * which quantization range (full or limited) is used. */ bool drm_rgb_quant_range_selectable(struct edid *edid) { u8 *edid_ext; int i, start, end; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) return false; if (cea_db_offsets(edid_ext, &start, &end)) return false; for_each_cea_db(edid_ext, i, start, end) { if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK && cea_db_payload_len(&edid_ext[i]) == 2) { DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]); return edid_ext[i + 2] & EDID_CEA_VCDB_QS; } } return false; } EXPORT_SYMBOL(drm_rgb_quant_range_selectable); /** * drm_add_display_info - pull display info out if present * @edid: EDID data * @info: display info (attached to connector) * * Grab any available display info and stuff it into the drm_display_info * structure that's part of the connector. Useful for tracking bpp and * color spaces. */ static void drm_add_display_info(struct edid *edid, struct drm_display_info *info) { u8 *edid_ext; info->width_mm = edid->width_cm * 10; info->height_mm = edid->height_cm * 10; /* driver figures it out in this case */ info->bpc = 0; info->color_formats = 0; if (edid->revision < 3) return; if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return; /* Get data from CEA blocks if present */ edid_ext = drm_find_cea_extension(edid); if (edid_ext) { info->cea_rev = edid_ext[1]; /* The existence of a CEA block should imply RGB support */ info->color_formats = DRM_COLOR_FORMAT_RGB444; if (edid_ext[3] & EDID_CEA_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid_ext[3] & EDID_CEA_YCRCB422) info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; } /* Only defined for 1.4 with digital displays */ if (edid->revision < 4) return; switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { case DRM_EDID_DIGITAL_DEPTH_6: info->bpc = 6; break; case DRM_EDID_DIGITAL_DEPTH_8: info->bpc = 8; break; case DRM_EDID_DIGITAL_DEPTH_10: info->bpc = 10; break; case DRM_EDID_DIGITAL_DEPTH_12: info->bpc = 12; break; case DRM_EDID_DIGITAL_DEPTH_14: info->bpc = 14; break; case DRM_EDID_DIGITAL_DEPTH_16: info->bpc = 16; break; case DRM_EDID_DIGITAL_DEPTH_UNDEF: default: info->bpc = 0; break; } info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; } /** * drm_add_edid_modes - add modes from EDID data, if available * @connector: connector we're probing * @edid: edid data * * Add the specified modes to the connector's mode list. * * Return number of modes added or 0 if we couldn't find any. */ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) { int num_modes = 0; u32 quirks; if (edid == NULL) { return 0; } if (!drm_edid_is_valid(edid)) { dev_warn(connector->dev->dev, "%s: EDID invalid.\n", drm_get_connector_name(connector)); return 0; } quirks = edid_get_quirks(edid); /* * EDID spec says modes should be preferred in this order: * - preferred detailed mode * - other detailed modes from base block * - detailed modes from extension blocks * - CVT 3-byte code modes * - standard timing codes * - established timing codes * - modes inferred from GTF or CVT range information * * We get this pretty much right. * * XXX order for additional mode types in extension blocks? */ num_modes += add_detailed_modes(connector, edid, quirks); num_modes += add_cvt_modes(connector, edid); num_modes += add_standard_modes(connector, edid); num_modes += add_established_modes(connector, edid); if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) num_modes += add_inferred_modes(connector, edid); num_modes += add_cea_modes(connector, edid); if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) edid_fixup_preferred(connector, quirks); drm_add_display_info(edid, &connector->display_info); if (quirks & EDID_QUIRK_FORCE_8BPC) connector->display_info.bpc = 8; return num_modes; } EXPORT_SYMBOL(drm_add_edid_modes); /** * drm_add_modes_noedid - add modes for the connectors without EDID * @connector: connector we're probing * @hdisplay: the horizontal display limit * @vdisplay: the vertical display limit * * Add the specified modes to the connector's mode list. Only when the * hdisplay/vdisplay is not beyond the given limit, it will be added. * * Return number of modes added or 0 if we couldn't find any. */ int drm_add_modes_noedid(struct drm_connector *connector, int hdisplay, int vdisplay) { int i, count, num_modes = 0; struct drm_display_mode *mode; struct drm_device *dev = connector->dev; count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); if (hdisplay < 0) hdisplay = 0; if (vdisplay < 0) vdisplay = 0; for (i = 0; i < count; i++) { const struct drm_display_mode *ptr = &drm_dmt_modes[i]; if (hdisplay && vdisplay) { /* * Only when two are valid, they will be used to check * whether the mode should be added to the mode list of * the connector. */ if (ptr->hdisplay > hdisplay || ptr->vdisplay > vdisplay) continue; } if (drm_mode_vrefresh(ptr) > 61) continue; mode = drm_mode_duplicate(dev, ptr); if (mode) { drm_mode_probed_add(connector, mode); num_modes++; } } return num_modes; } EXPORT_SYMBOL(drm_add_modes_noedid); /** * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with * data from a DRM display mode * @frame: HDMI AVI infoframe * @mode: DRM display mode * * Returns 0 on success or a negative error code on failure. */ int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode) { int err; if (!frame || !mode) return -EINVAL; err = hdmi_avi_infoframe_init(frame); if (err < 0) return err; frame->video_code = drm_match_cea_mode(mode); if (!frame->video_code) return 0; frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; return 0; } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
gpl-2.0
Hacker432-Y550/android_kernel_huawei_msm8916
sound/oss/sb_ess.c
2446
52660
#undef FKS_LOGGING #undef FKS_TEST /* * tabs should be 4 spaces, in vi(m): set tabstop=4 * * TODO: consistency speed calculations!! * cleanup! * ????: Did I break MIDI support? * * History: * * Rolf Fokkens (Dec 20 1998): ES188x recording level support on a per * fokkensr@vertis.nl input basis. * (Dec 24 1998): Recognition of ES1788, ES1887, ES1888, * ES1868, ES1869 and ES1878. Could be used for * specific handling in the future. All except * ES1887 and ES1888 and ES688 are handled like * ES1688. * (Dec 27 1998): RECLEV for all (?) ES1688+ chips. ES188x now * have the "Dec 20" support + RECLEV * (Jan 2 1999): Preparation for Full Duplex. This means * Audio 2 is now used for playback when dma16 * is specified. The next step would be to use * Audio 1 and Audio 2 at the same time. * (Jan 9 1999): Put all ESS stuff into sb_ess.[ch], this * includes both the ESS stuff that has been in * sb_*[ch] before I touched it and the ESS support * I added later * (Jan 23 1999): Full Duplex seems to work. I wrote a small * test proggy which works OK. Haven't found * any applications to test it though. So why did * I bother to create it anyway?? :) Just for * fun. * (May 2 1999): I tried to be too smart by "introducing" * ess_calc_best_speed (). The idea was that two * dividers could be used to setup a samplerate, * ess_calc_best_speed () would choose the best. * This works for playback, but results in * recording problems for high samplerates. I * fixed this by removing ess_calc_best_speed () * and just doing what the documentation says. * Andy Sloane (Jun 4 1999): Stole some code from ALSA to fix the playback * andy@guildsoftware.com speed on ES1869, ES1879, ES1887, and ES1888. * 1879's were previously ignored by this driver; * added (untested) support for those. * Cvetan Ivanov (Oct 27 1999): Fixed ess_dsp_init to call ess_set_dma_hw for * zezo@inet.bg _ALL_ ESS models, not only ES1887 * * This files contains ESS chip specifics. It's based on the existing ESS * handling as it resided in sb_common.c, sb_mixer.c and sb_audio.c. This * file adds features like: * - Chip Identification (as shown in /proc/sound) * - RECLEV support for ES1688 and later * - 6 bits playback level support chips later than ES1688 * - Recording level support on a per-device basis for ES1887 * - Full-Duplex for ES1887 * * Full duplex is enabled by specifying dma16. While the normal dma must * be one of 0, 1 or 3, dma16 can be one of 0, 1, 3 or 5. DMA 5 is a 16 bit * DMA channel, while the others are 8 bit.. * * ESS detection isn't full proof (yet). If it fails an additional module * parameter esstype can be specified to be one of the following: * -1, 0, 688, 1688, 1868, 1869, 1788, 1887, 1888 * -1 means: mimic 2.0 behaviour, * 0 means: auto detect. * others: explicitly specify chip * -1 is default, cause auto detect still doesn't work. */ /* * About the documentation * * I don't know if the chips all are OK, but the documentation is buggy. 'cause * I don't have all the cips myself, there's a lot I cannot verify. I'll try to * keep track of my latest insights about his here. If you have additional info, * please enlighten me (fokkensr@vertis.nl)! * * I had the impression that ES1688 also has 6 bit master volume control. The * documentation about ES1888 (rev C, october '95) claims that ES1888 has * the following features ES1688 doesn't have: * - 6 bit master volume * - Full Duplex * So ES1688 apparently doesn't have 6 bit master volume control, but the * ES1688 does have RECLEV control. Makes me wonder: does ES688 have it too? * Without RECLEV ES688 won't be much fun I guess. * * From the ES1888 (rev C, october '95) documentation I got the impression * that registers 0x68 to 0x6e don't exist which means: no recording volume * controls. To my surprise the ES888 documentation (1/14/96) claims that * ES888 does have these record mixer registers, but that ES1888 doesn't have * 0x69 and 0x6b. So the rest should be there. * * I'm trying to get ES1887 Full Duplex. Audio 2 is playback only, while Audio 2 * is both record and playback. I think I should use Audio 2 for all playback. * * The documentation is an adventure: it's close but not fully accurate. I * found out that after a reset some registers are *NOT* reset, though the * docs say the would be. Interesting ones are 0x7f, 0x7d and 0x7a. They are * related to the Audio 2 channel. I also was surprised about the consequences * of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves * into ES1888 mode. This means that it claims IRQ 11, which happens to be my * ISDN adapter. Needless to say it no longer worked. I now understand why * after rebooting 0x7f already was 0x05, the value of my choice: the BIOS * did it. * * Oh, and this is another trap: in ES1887 docs mixer register 0x70 is * described as if it's exactly the same as register 0xa1. This is *NOT* true. * The description of 0x70 in ES1869 docs is accurate however. * Well, the assumption about ES1869 was wrong: register 0x70 is very much * like register 0xa1, except that bit 7 is always 1, whatever you want * it to be. * * When using audio 2 mixer register 0x72 seems te be meaningless. Only 0xa2 * has effect. * * Software reset not being able to reset all registers is great! Especially * the fact that register 0x78 isn't reset is great when you wanna change back * to single dma operation (simplex): audio 2 is still operational, and uses * the same dma as audio 1: your ess changes into a funny echo machine. * * Received the news that ES1688 is detected as a ES1788. Did some thinking: * the ES1887 detection scheme suggests in step 2 to try if bit 3 of register * 0x64 can be changed. This is inaccurate, first I inverted the * check: "If * can be modified, it's a 1688", which lead to a correct detection * of my ES1887. It resulted however in bad detection of 1688 (reported by mail) * and 1868 (if no PnP detection first): they result in a 1788 being detected. * I don't have docs on 1688, but I do have docs on 1868: The documentation is * probably inaccurate in the fact that I should check bit 2, not bit 3. This * is what I do now. */ /* * About recognition of ESS chips * * The distinction of ES688, ES1688, ES1788, ES1887 and ES1888 is described in * a (preliminary ??) datasheet on ES1887. Its aim is to identify ES1887, but * during detection the text claims that "this chip may be ..." when a step * fails. This scheme is used to distinct between the above chips. * It appears however that some PnP chips like ES1868 are recognized as ES1788 * by the ES1887 detection scheme. These PnP chips can be detected in another * way however: ES1868, ES1869 and ES1878 can be recognized (full proof I think) * by repeatedly reading mixer register 0x40. This is done by ess_identify in * sb_common.c. * This results in the following detection steps: * - distinct between ES688 and ES1688+ (as always done in this driver) * if ES688 we're ready * - try to detect ES1868, ES1869 or ES1878 * if successful we're ready * - try to detect ES1888, ES1887 or ES1788 * if successful we're ready * - Dunno. Must be 1688. Will do in general * * About RECLEV support: * * The existing ES1688 support didn't take care of the ES1688+ recording * levels very well. Whenever a device was selected (recmask) for recording * its recording level was loud, and it couldn't be changed. The fact that * internal register 0xb4 could take care of RECLEV, didn't work meaning until * its value was restored every time the chip was reset; this reset the * value of 0xb4 too. I guess that's what 4front also had (have?) trouble with. * * About ES1887 support: * * The ES1887 has separate registers to control the recording levels, for all * inputs. The ES1887 specific software makes these levels the same as their * corresponding playback levels, unless recmask says they aren't recorded. In * the latter case the recording volumes are 0. * Now recording levels of inputs can be controlled, by changing the playback * levels. Furthermore several devices can be recorded together (which is not * possible with the ES1688). * Besides the separate recording level control for each input, the common * recording level can also be controlled by RECLEV as described above. * * Not only ES1887 have this recording mixer. I know the following from the * documentation: * ES688 no * ES1688 no * ES1868 no * ES1869 yes * ES1878 no * ES1879 yes * ES1888 no/yes Contradicting documentation; most recent: yes * ES1946 yes This is a PCI chip; not handled by this driver */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include "sound_config.h" #include "sb_mixer.h" #include "sb.h" #include "sb_ess.h" #define ESSTYPE_LIKE20 -1 /* Mimic 2.0 behaviour */ #define ESSTYPE_DETECT 0 /* Mimic 2.0 behaviour */ #define SUBMDL_ES1788 0x10 /* Subtype ES1788 for specific handling */ #define SUBMDL_ES1868 0x11 /* Subtype ES1868 for specific handling */ #define SUBMDL_ES1869 0x12 /* Subtype ES1869 for specific handling */ #define SUBMDL_ES1878 0x13 /* Subtype ES1878 for specific handling */ #define SUBMDL_ES1879 0x16 /* ES1879 was initially forgotten */ #define SUBMDL_ES1887 0x14 /* Subtype ES1887 for specific handling */ #define SUBMDL_ES1888 0x15 /* Subtype ES1888 for specific handling */ #define SB_CAP_ES18XX_RATE 0x100 #define ES1688_CLOCK1 795444 /* 128 - div */ #define ES1688_CLOCK2 397722 /* 256 - div */ #define ES18XX_CLOCK1 793800 /* 128 - div */ #define ES18XX_CLOCK2 768000 /* 256 - div */ #ifdef FKS_LOGGING static void ess_show_mixerregs (sb_devc *devc); #endif static int ess_read (sb_devc * devc, unsigned char reg); static int ess_write (sb_devc * devc, unsigned char reg, unsigned char data); static void ess_chgmixer (sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val); /**************************************************************************** * * * ESS audio * * * ****************************************************************************/ struct ess_command {short cmd; short data;}; /* * Commands for initializing Audio 1 for input (record) */ static struct ess_command ess_i08m[] = /* input 8 bit mono */ { {0xb7, 0x51}, {0xb7, 0xd0}, {-1, 0} }; static struct ess_command ess_i16m[] = /* input 16 bit mono */ { {0xb7, 0x71}, {0xb7, 0xf4}, {-1, 0} }; static struct ess_command ess_i08s[] = /* input 8 bit stereo */ { {0xb7, 0x51}, {0xb7, 0x98}, {-1, 0} }; static struct ess_command ess_i16s[] = /* input 16 bit stereo */ { {0xb7, 0x71}, {0xb7, 0xbc}, {-1, 0} }; static struct ess_command *ess_inp_cmds[] = { ess_i08m, ess_i16m, ess_i08s, ess_i16s }; /* * Commands for initializing Audio 1 for output (playback) */ static struct ess_command ess_o08m[] = /* output 8 bit mono */ { {0xb6, 0x80}, {0xb7, 0x51}, {0xb7, 0xd0}, {-1, 0} }; static struct ess_command ess_o16m[] = /* output 16 bit mono */ { {0xb6, 0x00}, {0xb7, 0x71}, {0xb7, 0xf4}, {-1, 0} }; static struct ess_command ess_o08s[] = /* output 8 bit stereo */ { {0xb6, 0x80}, {0xb7, 0x51}, {0xb7, 0x98}, {-1, 0} }; static struct ess_command ess_o16s[] = /* output 16 bit stereo */ { {0xb6, 0x00}, {0xb7, 0x71}, {0xb7, 0xbc}, {-1, 0} }; static struct ess_command *ess_out_cmds[] = { ess_o08m, ess_o16m, ess_o08s, ess_o16s }; static void ess_exec_commands (sb_devc *devc, struct ess_command *cmdtab[]) { struct ess_command *cmd; cmd = cmdtab [ ((devc->channels != 1) << 1) + (devc->bits != AFMT_U8) ]; while (cmd->cmd != -1) { ess_write (devc, cmd->cmd, cmd->data); cmd++; } } static void ess_change (sb_devc *devc, unsigned int reg, unsigned int mask, unsigned int val) { int value; value = ess_read (devc, reg); value = (value & ~mask) | (val & mask); ess_write (devc, reg, value); } static void ess_set_output_parms (int dev, unsigned long buf, int nr_bytes, int intrflag) { sb_devc *devc = audio_devs[dev]->devc; if (devc->duplex) { devc->trg_buf_16 = buf; devc->trg_bytes_16 = nr_bytes; devc->trg_intrflag_16 = intrflag; devc->irq_mode_16 = IMODE_OUTPUT; } else { devc->trg_buf = buf; devc->trg_bytes = nr_bytes; devc->trg_intrflag = intrflag; devc->irq_mode = IMODE_OUTPUT; } } static void ess_set_input_parms (int dev, unsigned long buf, int count, int intrflag) { sb_devc *devc = audio_devs[dev]->devc; devc->trg_buf = buf; devc->trg_bytes = count; devc->trg_intrflag = intrflag; devc->irq_mode = IMODE_INPUT; } static int ess_calc_div (int clock, int revert, int *speedp, int *diffp) { int divider; int speed, diff; int retval; speed = *speedp; divider = (clock + speed / 2) / speed; retval = revert - divider; if (retval > revert - 1) { retval = revert - 1; divider = revert - retval; } /* This line is suggested. Must be wrong I think *speedp = (clock + divider / 2) / divider; So I chose the next one */ *speedp = clock / divider; diff = speed - *speedp; if (diff < 0) diff =-diff; *diffp = diff; return retval; } static int ess_calc_best_speed (int clock1, int rev1, int clock2, int rev2, int *divp, int *speedp) { int speed1 = *speedp, speed2 = *speedp; int div1, div2; int diff1, diff2; int retval; div1 = ess_calc_div (clock1, rev1, &speed1, &diff1); div2 = ess_calc_div (clock2, rev2, &speed2, &diff2); if (diff1 < diff2) { *divp = div1; *speedp = speed1; retval = 1; } else { /* *divp = div2; */ *divp = 0x80 | div2; *speedp = speed2; retval = 2; } return retval; } /* * Depending on the audiochannel ESS devices can * have different clock settings. These are made consistent for duplex * however. * callers of ess_speed only do an audionum suggestion, which means * input suggests 1, output suggests 2. This suggestion is only true * however when doing duplex. */ static void ess_common_speed (sb_devc *devc, int *speedp, int *divp) { int diff = 0, div; if (devc->duplex) { /* * The 0x80 is important for the first audio channel */ if (devc->submodel == SUBMDL_ES1888) { div = 0x80 | ess_calc_div (795500, 256, speedp, &diff); } else { div = 0x80 | ess_calc_div (795500, 128, speedp, &diff); } } else if(devc->caps & SB_CAP_ES18XX_RATE) { if (devc->submodel == SUBMDL_ES1888) { ess_calc_best_speed(397700, 128, 795500, 256, &div, speedp); } else { ess_calc_best_speed(ES18XX_CLOCK1, 128, ES18XX_CLOCK2, 256, &div, speedp); } } else { if (*speedp > 22000) { div = 0x80 | ess_calc_div (ES1688_CLOCK1, 256, speedp, &diff); } else { div = 0x00 | ess_calc_div (ES1688_CLOCK2, 128, speedp, &diff); } } *divp = div; } static void ess_speed (sb_devc *devc, int audionum) { int speed; int div, div2; ess_common_speed (devc, &(devc->speed), &div); #ifdef FKS_REG_LOGGING printk (KERN_INFO "FKS: ess_speed (%d) b speed = %d, div=%x\n", audionum, devc->speed, div); #endif /* Set filter roll-off to 90% of speed/2 */ speed = (devc->speed * 9) / 20; div2 = 256 - 7160000 / (speed * 82); if (!devc->duplex) audionum = 1; if (audionum == 1) { /* Change behaviour of register A1 * sb_chg_mixer(devc, 0x71, 0x20, 0x20) * For ES1869 only??? */ ess_write (devc, 0xa1, div); ess_write (devc, 0xa2, div2); } else { ess_setmixer (devc, 0x70, div); /* * FKS: fascinating: 0x72 doesn't seem to work. */ ess_write (devc, 0xa2, div2); ess_setmixer (devc, 0x72, div2); } } static int ess_audio_prepare_for_input(int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; ess_speed(devc, 1); sb_dsp_command(devc, DSP_CMD_SPKOFF); ess_write (devc, 0xb8, 0x0e); /* Auto init DMA mode */ ess_change (devc, 0xa8, 0x03, 3 - devc->channels); /* Mono/stereo */ ess_write (devc, 0xb9, 2); /* Demand mode (4 bytes/DMA request) */ ess_exec_commands (devc, ess_inp_cmds); ess_change (devc, 0xb1, 0xf0, 0x50); ess_change (devc, 0xb2, 0xf0, 0x50); devc->trigger_bits = 0; return 0; } static int ess_audio_prepare_for_output_audio1 (int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; sb_dsp_reset(devc); ess_speed(devc, 1); ess_write (devc, 0xb8, 4); /* Auto init DMA mode */ ess_change (devc, 0xa8, 0x03, 3 - devc->channels); /* Mono/stereo */ ess_write (devc, 0xb9, 2); /* Demand mode (4 bytes/request) */ ess_exec_commands (devc, ess_out_cmds); ess_change (devc, 0xb1, 0xf0, 0x50); /* Enable DMA */ ess_change (devc, 0xb2, 0xf0, 0x50); /* Enable IRQ */ sb_dsp_command(devc, DSP_CMD_SPKON); /* There be sound! */ devc->trigger_bits = 0; return 0; } static int ess_audio_prepare_for_output_audio2 (int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; unsigned char bits; /* FKS: qqq sb_dsp_reset(devc); */ /* * Auto-Initialize: * DMA mode + demand mode (8 bytes/request, yes I want it all!) * But leave 16-bit DMA bit untouched! */ ess_chgmixer (devc, 0x78, 0xd0, 0xd0); ess_speed(devc, 2); /* bits 4:3 on ES1887 represent recording source. Keep them! */ bits = ess_getmixer (devc, 0x7a) & 0x18; /* Set stereo/mono */ if (devc->channels != 1) bits |= 0x02; /* Init DACs; UNSIGNED mode for 8 bit; SIGNED mode for 16 bit */ if (devc->bits != AFMT_U8) bits |= 0x05; /* 16 bit */ /* Enable DMA, IRQ will be shared (hopefully)*/ bits |= 0x60; ess_setmixer (devc, 0x7a, bits); ess_mixer_reload (devc, SOUND_MIXER_PCM); /* There be sound! */ devc->trigger_bits = 0; return 0; } static int ess_audio_prepare_for_output(int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; #ifdef FKS_REG_LOGGING printk(KERN_INFO "ess_audio_prepare_for_output: dma_out=%d,dma_in=%d\n" , audio_devs[dev]->dmap_out->dma, audio_devs[dev]->dmap_in->dma); #endif if (devc->duplex) { return ess_audio_prepare_for_output_audio2 (dev, bsize, bcount); } else { return ess_audio_prepare_for_output_audio1 (dev, bsize, bcount); } } static void ess_audio_halt_xfer(int dev) { unsigned long flags; sb_devc *devc = audio_devs[dev]->devc; spin_lock_irqsave(&devc->lock, flags); sb_dsp_reset(devc); spin_unlock_irqrestore(&devc->lock, flags); /* * Audio 2 may still be operational! Creates awful sounds! */ if (devc->duplex) ess_chgmixer(devc, 0x78, 0x03, 0x00); } static void ess_audio_start_input (int dev, unsigned long buf, int nr_bytes, int intrflag) { int count = nr_bytes; sb_devc *devc = audio_devs[dev]->devc; short c = -nr_bytes; /* * Start a DMA input to the buffer pointed by dmaqtail */ if (audio_devs[dev]->dmap_in->dma > 3) count >>= 1; count--; devc->irq_mode = IMODE_INPUT; ess_write (devc, 0xa4, (unsigned char) ((unsigned short) c & 0xff)); ess_write (devc, 0xa5, (unsigned char) (((unsigned short) c >> 8) & 0xff)); ess_change (devc, 0xb8, 0x0f, 0x0f); /* Go */ devc->intr_active = 1; } static void ess_audio_output_block_audio1 (int dev, unsigned long buf, int nr_bytes, int intrflag) { int count = nr_bytes; sb_devc *devc = audio_devs[dev]->devc; short c = -nr_bytes; if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; count--; devc->irq_mode = IMODE_OUTPUT; ess_write (devc, 0xa4, (unsigned char) ((unsigned short) c & 0xff)); ess_write (devc, 0xa5, (unsigned char) (((unsigned short) c >> 8) & 0xff)); ess_change (devc, 0xb8, 0x05, 0x05); /* Go */ devc->intr_active = 1; } static void ess_audio_output_block_audio2 (int dev, unsigned long buf, int nr_bytes, int intrflag) { int count = nr_bytes; sb_devc *devc = audio_devs[dev]->devc; short c = -nr_bytes; if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; count--; ess_setmixer (devc, 0x74, (unsigned char) ((unsigned short) c & 0xff)); ess_setmixer (devc, 0x76, (unsigned char) (((unsigned short) c >> 8) & 0xff)); ess_chgmixer (devc, 0x78, 0x03, 0x03); /* Go */ devc->irq_mode_16 = IMODE_OUTPUT; devc->intr_active_16 = 1; } static void ess_audio_output_block (int dev, unsigned long buf, int nr_bytes, int intrflag) { sb_devc *devc = audio_devs[dev]->devc; if (devc->duplex) { ess_audio_output_block_audio2 (dev, buf, nr_bytes, intrflag); } else { ess_audio_output_block_audio1 (dev, buf, nr_bytes, intrflag); } } /* * FKS: the if-statements for both bits and bits_16 are quite alike. * Combine this... */ static void ess_audio_trigger(int dev, int bits) { sb_devc *devc = audio_devs[dev]->devc; int bits_16 = bits & devc->irq_mode_16; bits &= devc->irq_mode; if (!bits && !bits_16) { /* FKS oh oh.... wrong?? for dma 16? */ sb_dsp_command(devc, 0xd0); /* Halt DMA */ } if (bits) { switch (devc->irq_mode) { case IMODE_INPUT: ess_audio_start_input(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; case IMODE_OUTPUT: ess_audio_output_block(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; } } if (bits_16) { switch (devc->irq_mode_16) { case IMODE_INPUT: ess_audio_start_input(dev, devc->trg_buf_16, devc->trg_bytes_16, devc->trg_intrflag_16); break; case IMODE_OUTPUT: ess_audio_output_block(dev, devc->trg_buf_16, devc->trg_bytes_16, devc->trg_intrflag_16); break; } } devc->trigger_bits = bits | bits_16; } static int ess_audio_set_speed(int dev, int speed) { sb_devc *devc = audio_devs[dev]->devc; int minspeed, maxspeed, dummydiv; if (speed > 0) { minspeed = (devc->duplex ? 6215 : 5000 ); maxspeed = (devc->duplex ? 44100 : 48000); if (speed < minspeed) speed = minspeed; if (speed > maxspeed) speed = maxspeed; ess_common_speed (devc, &speed, &dummydiv); devc->speed = speed; } return devc->speed; } /* * FKS: This is a one-on-one copy of sb1_audio_set_bits */ static unsigned int ess_audio_set_bits(int dev, unsigned int bits) { sb_devc *devc = audio_devs[dev]->devc; if (bits != 0) { if (bits == AFMT_U8 || bits == AFMT_S16_LE) { devc->bits = bits; } else { devc->bits = AFMT_U8; } } return devc->bits; } /* * FKS: This is a one-on-one copy of sbpro_audio_set_channels * (*) Modified it!! */ static short ess_audio_set_channels(int dev, short channels) { sb_devc *devc = audio_devs[dev]->devc; if (channels == 1 || channels == 2) devc->channels = channels; return devc->channels; } static struct audio_driver ess_audio_driver = /* ESS ES688/1688 */ { .owner = THIS_MODULE, .open = sb_audio_open, .close = sb_audio_close, .output_block = ess_set_output_parms, .start_input = ess_set_input_parms, .prepare_for_input = ess_audio_prepare_for_input, .prepare_for_output = ess_audio_prepare_for_output, .halt_io = ess_audio_halt_xfer, .trigger = ess_audio_trigger, .set_speed = ess_audio_set_speed, .set_bits = ess_audio_set_bits, .set_channels = ess_audio_set_channels }; /* * ess_audio_init must be called from sb_audio_init */ struct audio_driver *ess_audio_init (sb_devc *devc, int *audio_flags, int *format_mask) { *audio_flags = DMA_AUTOMODE; *format_mask |= AFMT_S16_LE; if (devc->duplex) { int tmp_dma; /* * sb_audio_init thinks dma8 is for playback and * dma16 is for record. Not now! So swap them. */ tmp_dma = devc->dma16; devc->dma16 = devc->dma8; devc->dma8 = tmp_dma; *audio_flags |= DMA_DUPLEX; } return &ess_audio_driver; } /**************************************************************************** * * * ESS common * * * ****************************************************************************/ static void ess_handle_channel (char *channel, int dev, int intr_active, unsigned char flag, int irq_mode) { if (!intr_active || !flag) return; #ifdef FKS_REG_LOGGING printk(KERN_INFO "FKS: ess_handle_channel %s irq_mode=%d\n", channel, irq_mode); #endif switch (irq_mode) { case IMODE_OUTPUT: DMAbuf_outputintr (dev, 1); break; case IMODE_INPUT: DMAbuf_inputintr (dev); break; case IMODE_INIT: break; default:; /* printk(KERN_WARNING "ESS: Unexpected interrupt\n"); */ } } /* * FKS: TODO!!! Finish this! * * I think midi stuff uses uart401, without interrupts. * So IMODE_MIDI isn't a value for devc->irq_mode. */ void ess_intr (sb_devc *devc) { int status; unsigned char src; if (devc->submodel == SUBMDL_ES1887) { src = ess_getmixer (devc, 0x7f) >> 4; } else { src = 0xff; } #ifdef FKS_REG_LOGGING printk(KERN_INFO "FKS: sbintr src=%x\n",(int)src); #endif ess_handle_channel ( "Audio 1" , devc->dev, devc->intr_active , src & 0x01, devc->irq_mode ); ess_handle_channel ( "Audio 2" , devc->dev, devc->intr_active_16, src & 0x02, devc->irq_mode_16); /* * Acknowledge interrupts */ if (devc->submodel == SUBMDL_ES1887 && (src & 0x02)) { ess_chgmixer (devc, 0x7a, 0x80, 0x00); } if (src & 0x01) { status = inb(DSP_DATA_AVAIL); } } static void ess_extended (sb_devc * devc) { /* Enable extended mode */ sb_dsp_command(devc, 0xc6); } static int ess_write (sb_devc * devc, unsigned char reg, unsigned char data) { #ifdef FKS_REG_LOGGING printk(KERN_INFO "FKS: write reg %x: %x\n", reg, data); #endif /* Write a byte to an extended mode register of ES1688 */ if (!sb_dsp_command(devc, reg)) return 0; return sb_dsp_command(devc, data); } static int ess_read (sb_devc * devc, unsigned char reg) { /* Read a byte from an extended mode register of ES1688 */ /* Read register command */ if (!sb_dsp_command(devc, 0xc0)) return -1; if (!sb_dsp_command(devc, reg )) return -1; return sb_dsp_get_byte(devc); } int ess_dsp_reset(sb_devc * devc) { int loopc; #ifdef FKS_REG_LOGGING printk(KERN_INFO "FKS: ess_dsp_reset 1\n"); ess_show_mixerregs (devc); #endif DEB(printk("Entered ess_dsp_reset()\n")); outb(3, DSP_RESET); /* Reset FIFO too */ udelay(10); outb(0, DSP_RESET); udelay(30); for (loopc = 0; loopc < 1000 && !(inb(DSP_DATA_AVAIL) & 0x80); loopc++); if (inb(DSP_READ) != 0xAA) { DDB(printk("sb: No response to RESET\n")); return 0; /* Sorry */ } ess_extended (devc); DEB(printk("sb_dsp_reset() OK\n")); #ifdef FKS_LOGGING printk(KERN_INFO "FKS: dsp_reset 2\n"); ess_show_mixerregs (devc); #endif return 1; } static int ess_irq_bits (int irq) { switch (irq) { case 2: case 9: return 0; case 5: return 1; case 7: return 2; case 10: return 3; default: printk(KERN_ERR "ESS1688: Invalid IRQ %d\n", irq); return -1; } } /* * Set IRQ configuration register for all ESS models */ static int ess_common_set_irq_hw (sb_devc * devc) { int irq_bits; if ((irq_bits = ess_irq_bits (devc->irq)) == -1) return 0; if (!ess_write (devc, 0xb1, 0x50 | (irq_bits << 2))) { printk(KERN_ERR "ES1688: Failed to write to IRQ config register\n"); return 0; } return 1; } /* * I wanna use modern ES1887 mixer irq handling. Funny is the * fact that my BIOS wants the same. But suppose someone's BIOS * doesn't do this! * This is independent of duplex. If there's a 1887 this will * prevent it from going into 1888 mode. */ static void ess_es1887_set_irq_hw (sb_devc * devc) { int irq_bits; if ((irq_bits = ess_irq_bits (devc->irq)) == -1) return; ess_chgmixer (devc, 0x7f, 0x0f, 0x01 | ((irq_bits + 1) << 1)); } static int ess_set_irq_hw (sb_devc * devc) { if (devc->submodel == SUBMDL_ES1887) ess_es1887_set_irq_hw (devc); return ess_common_set_irq_hw (devc); } #ifdef FKS_TEST /* * FKS_test: * for ES1887: 00, 18, non wr bits: 0001 1000 * for ES1868: 00, b8, non wr bits: 1011 1000 * for ES1888: 00, f8, non wr bits: 1111 1000 * for ES1688: 00, f8, non wr bits: 1111 1000 * + ES968 */ static void FKS_test (sb_devc * devc) { int val1, val2; val1 = ess_getmixer (devc, 0x64); ess_setmixer (devc, 0x64, ~val1); val2 = ess_getmixer (devc, 0x64) ^ ~val1; ess_setmixer (devc, 0x64, val1); val1 ^= ess_getmixer (devc, 0x64); printk (KERN_INFO "FKS: FKS_test %02x, %02x\n", (val1 & 0x0ff), (val2 & 0x0ff)); }; #endif static unsigned int ess_identify (sb_devc * devc) { unsigned int val; unsigned long flags; spin_lock_irqsave(&devc->lock, flags); outb(((unsigned char) (0x40 & 0xff)), MIXER_ADDR); udelay(20); val = inb(MIXER_DATA) << 8; udelay(20); val |= inb(MIXER_DATA); udelay(20); spin_unlock_irqrestore(&devc->lock, flags); return val; } /* * ESS technology describes a detection scheme in their docs. It involves * fiddling with the bits in certain mixer registers. ess_probe is supposed * to help. * * FKS: tracing shows ess_probe writes wrong value to 0x64. Bit 3 reads 1, but * should be written 0 only. Check this. */ static int ess_probe (sb_devc * devc, int reg, int xorval) { int val1, val2, val3; val1 = ess_getmixer (devc, reg); val2 = val1 ^ xorval; ess_setmixer (devc, reg, val2); val3 = ess_getmixer (devc, reg); ess_setmixer (devc, reg, val1); return (val2 == val3); } int ess_init(sb_devc * devc, struct address_info *hw_config) { unsigned char cfg; int ess_major = 0, ess_minor = 0; int i; static char name[100], modelname[10]; /* * Try to detect ESS chips. */ sb_dsp_command(devc, 0xe7); /* Return identification */ for (i = 1000; i; i--) { if (inb(DSP_DATA_AVAIL) & 0x80) { if (ess_major == 0) { ess_major = inb(DSP_READ); } else { ess_minor = inb(DSP_READ); break; } } } if (ess_major == 0) return 0; if (ess_major == 0x48 && (ess_minor & 0xf0) == 0x80) { sprintf(name, "ESS ES488 AudioDrive (rev %d)", ess_minor & 0x0f); hw_config->name = name; devc->model = MDL_SBPRO; return 1; } /* * This the detection heuristic of ESS technology, though somewhat * changed to actually make it work. * This results in the following detection steps: * - distinct between ES688 and ES1688+ (as always done in this driver) * if ES688 we're ready * - try to detect ES1868, ES1869 or ES1878 (ess_identify) * if successful we're ready * - try to detect ES1888, ES1887 or ES1788 (aim: detect ES1887) * if successful we're ready * - Dunno. Must be 1688. Will do in general * * This is the most BETA part of the software: Will the detection * always work? */ devc->model = MDL_ESS; devc->submodel = ess_minor & 0x0f; if (ess_major == 0x68 && (ess_minor & 0xf0) == 0x80) { char *chip = NULL; int submodel = -1; switch (devc->sbmo.esstype) { case ESSTYPE_DETECT: case ESSTYPE_LIKE20: break; case 688: submodel = 0x00; break; case 1688: submodel = 0x08; break; case 1868: submodel = SUBMDL_ES1868; break; case 1869: submodel = SUBMDL_ES1869; break; case 1788: submodel = SUBMDL_ES1788; break; case 1878: submodel = SUBMDL_ES1878; break; case 1879: submodel = SUBMDL_ES1879; break; case 1887: submodel = SUBMDL_ES1887; break; case 1888: submodel = SUBMDL_ES1888; break; default: printk (KERN_ERR "Invalid esstype=%d specified\n", devc->sbmo.esstype); return 0; } if (submodel != -1) { devc->submodel = submodel; sprintf (modelname, "ES%d", devc->sbmo.esstype); chip = modelname; } if (chip == NULL && (ess_minor & 0x0f) < 8) { chip = "ES688"; } #ifdef FKS_TEST FKS_test (devc); #endif /* * If Nothing detected yet, and we want 2.0 behaviour... * Then let's assume it's ES1688. */ if (chip == NULL && devc->sbmo.esstype == ESSTYPE_LIKE20) { chip = "ES1688"; } if (chip == NULL) { int type; type = ess_identify (devc); switch (type) { case 0x1868: chip = "ES1868"; devc->submodel = SUBMDL_ES1868; break; case 0x1869: chip = "ES1869"; devc->submodel = SUBMDL_ES1869; break; case 0x1878: chip = "ES1878"; devc->submodel = SUBMDL_ES1878; break; case 0x1879: chip = "ES1879"; devc->submodel = SUBMDL_ES1879; break; default: if ((type & 0x00ff) != ((type >> 8) & 0x00ff)) { printk ("ess_init: Unrecognized %04x\n", type); } } } #if 0 /* * this one failed: * the probing of bit 4 is another thought: from ES1788 and up, all * chips seem to have hardware volume control. Bit 4 is readonly to * check if a hardware volume interrupt has fired. * Cause ES688/ES1688 don't have this feature, bit 4 might be writeable * for these chips. */ if (chip == NULL && !ess_probe(devc, 0x64, (1 << 4))) { #endif /* * the probing of bit 2 is my idea. The ES1887 docs want me to probe * bit 3. This results in ES1688 being detected as ES1788. * Bit 2 is for "Enable HWV IRQE", but as ES(1)688 chips don't have * HardWare Volume, I think they don't have this IRQE. */ if (chip == NULL && ess_probe(devc, 0x64, (1 << 2))) { if (ess_probe (devc, 0x70, 0x7f)) { if (ess_probe (devc, 0x64, (1 << 5))) { chip = "ES1887"; devc->submodel = SUBMDL_ES1887; } else { chip = "ES1888"; devc->submodel = SUBMDL_ES1888; } } else { chip = "ES1788"; devc->submodel = SUBMDL_ES1788; } } if (chip == NULL) { chip = "ES1688"; } printk ( KERN_INFO "ESS chip %s %s%s\n" , chip , ( devc->sbmo.esstype == ESSTYPE_DETECT || devc->sbmo.esstype == ESSTYPE_LIKE20 ? "detected" : "specified" ) , ( devc->sbmo.esstype == ESSTYPE_LIKE20 ? " (kernel 2.0 compatible)" : "" ) ); sprintf(name,"ESS %s AudioDrive (rev %d)", chip, ess_minor & 0x0f); } else { strcpy(name, "Jazz16"); } /* AAS: info stolen from ALSA: these boards have different clocks */ switch(devc->submodel) { /* APPARENTLY NOT 1869 AND 1887 case SUBMDL_ES1869: case SUBMDL_ES1887: */ case SUBMDL_ES1888: devc->caps |= SB_CAP_ES18XX_RATE; break; } hw_config->name = name; /* FKS: sb_dsp_reset to enable extended mode???? */ sb_dsp_reset(devc); /* Turn on extended mode */ /* * Enable joystick and OPL3 */ cfg = ess_getmixer (devc, 0x40); ess_setmixer (devc, 0x40, cfg | 0x03); if (devc->submodel >= 8) { /* ES1688 */ devc->caps |= SB_NO_MIDI; /* ES1688 uses MPU401 MIDI mode */ } sb_dsp_reset (devc); /* * This is important! If it's not done, the IRQ probe in sb_dsp_init * may fail. */ return ess_set_irq_hw (devc); } static int ess_set_dma_hw(sb_devc * devc) { unsigned char cfg, dma_bits = 0, dma16_bits; int dma; #ifdef FKS_LOGGING printk(KERN_INFO "ess_set_dma_hw: dma8=%d,dma16=%d,dup=%d\n" , devc->dma8, devc->dma16, devc->duplex); #endif /* * FKS: It seems as if this duplex flag isn't set yet. Check it. */ dma = devc->dma8; if (dma > 3 || dma < 0 || dma == 2) { dma_bits = 0; printk(KERN_ERR "ESS1688: Invalid DMA8 %d\n", dma); return 0; } else { /* Extended mode DMA enable */ cfg = 0x50; if (dma == 3) { dma_bits = 3; } else { dma_bits = dma + 1; } } if (!ess_write (devc, 0xb2, cfg | (dma_bits << 2))) { printk(KERN_ERR "ESS1688: Failed to write to DMA config register\n"); return 0; } if (devc->duplex) { dma = devc->dma16; dma16_bits = 0; if (dma >= 0) { switch (dma) { case 0: dma_bits = 0x04; break; case 1: dma_bits = 0x05; break; case 3: dma_bits = 0x06; break; case 5: dma_bits = 0x07; dma16_bits = 0x20; break; default: printk(KERN_ERR "ESS1887: Invalid DMA16 %d\n", dma); return 0; } ess_chgmixer (devc, 0x78, 0x20, dma16_bits); ess_chgmixer (devc, 0x7d, 0x07, dma_bits); } } return 1; } /* * This one is called from sb_dsp_init. * * Return values: * 0: Failed * 1: Succeeded or doesn't apply (not SUBMDL_ES1887) */ int ess_dsp_init (sb_devc *devc, struct address_info *hw_config) { /* * Caller also checks this, but anyway */ if (devc->model != MDL_ESS) { printk (KERN_INFO "ess_dsp_init for non ESS chip\n"); return 1; } /* * This for ES1887 to run Full Duplex. Actually ES1888 * is allowed to do so too. I have no idea yet if this * will work for ES1888 however. * * For SB16 having both dma8 and dma16 means enable * Full Duplex. Let's try this for ES1887 too * */ if (devc->submodel == SUBMDL_ES1887) { if (hw_config->dma2 != -1) { devc->dma16 = hw_config->dma2; } /* * devc->duplex initialization is put here, cause * ess_set_dma_hw needs it. */ if (devc->dma8 != devc->dma16 && devc->dma16 != -1) { devc->duplex = 1; } } if (!ess_set_dma_hw (devc)) { free_irq(devc->irq, devc); return 0; } return 1; } /**************************************************************************** * * * ESS mixer * * * ****************************************************************************/ #define ES688_RECORDING_DEVICES \ ( SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD ) #define ES688_MIXER_DEVICES \ ( SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE \ | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_VOLUME \ | SOUND_MASK_LINE2 | SOUND_MASK_SPEAKER ) #define ES1688_RECORDING_DEVICES \ ( ES688_RECORDING_DEVICES ) #define ES1688_MIXER_DEVICES \ ( ES688_MIXER_DEVICES | SOUND_MASK_RECLEV ) #define ES1887_RECORDING_DEVICES \ ( ES1688_RECORDING_DEVICES | SOUND_MASK_LINE2 | SOUND_MASK_SYNTH) #define ES1887_MIXER_DEVICES \ ( ES1688_MIXER_DEVICES ) /* * Mixer registers of ES1887 * * These registers specifically take care of recording levels. To make the * mapping from playback devices to recording devices every recording * devices = playback device + ES_REC_MIXER_RECDIFF */ #define ES_REC_MIXER_RECBASE (SOUND_MIXER_LINE3 + 1) #define ES_REC_MIXER_RECDIFF (ES_REC_MIXER_RECBASE - SOUND_MIXER_SYNTH) #define ES_REC_MIXER_RECSYNTH (SOUND_MIXER_SYNTH + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECPCM (SOUND_MIXER_PCM + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECSPEAKER (SOUND_MIXER_SPEAKER + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECLINE (SOUND_MIXER_LINE + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECMIC (SOUND_MIXER_MIC + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECCD (SOUND_MIXER_CD + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECIMIX (SOUND_MIXER_IMIX + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECALTPCM (SOUND_MIXER_ALTPCM + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECRECLEV (SOUND_MIXER_RECLEV + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECIGAIN (SOUND_MIXER_IGAIN + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECOGAIN (SOUND_MIXER_OGAIN + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECLINE1 (SOUND_MIXER_LINE1 + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECLINE2 (SOUND_MIXER_LINE2 + ES_REC_MIXER_RECDIFF) #define ES_REC_MIXER_RECLINE3 (SOUND_MIXER_LINE3 + ES_REC_MIXER_RECDIFF) static mixer_tab es688_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x32, 7, 4, 0x32, 3, 4), MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4), MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4), MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4), MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4), MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4), MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4), MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0) }; /* * The ES1688 specifics... hopefully correct... * - 6 bit master volume * I was wrong, ES1888 docs say ES1688 didn't have it. * - RECLEV control * These may apply to ES688 too. I have no idea. */ static mixer_tab es1688_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x32, 7, 4, 0x32, 3, 4), MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4), MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4), MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4), MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4), MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4), MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4), MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4), MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0) }; static mixer_tab es1688later_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6), MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4), MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4), MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4), MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4), MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4), MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4), MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4), MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0) }; /* * This one is for all ESS chips with a record mixer. * It's not used (yet) however */ static mixer_tab es_rec_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6), MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4), MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4), MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4), MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4), MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4), MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4), MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4), MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECSYNTH, 0x6b, 7, 4, 0x6b, 3, 4), MIX_ENT(ES_REC_MIXER_RECPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECSPEAKER, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECLINE, 0x6e, 7, 4, 0x6e, 3, 4), MIX_ENT(ES_REC_MIXER_RECMIC, 0x68, 7, 4, 0x68, 3, 4), MIX_ENT(ES_REC_MIXER_RECCD, 0x6a, 7, 4, 0x6a, 3, 4), MIX_ENT(ES_REC_MIXER_RECIMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECRECLEV, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECIGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECOGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECLINE1, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECLINE2, 0x6c, 7, 4, 0x6c, 3, 4), MIX_ENT(ES_REC_MIXER_RECLINE3, 0x00, 0, 0, 0x00, 0, 0) }; /* * This one is for ES1887. It's little different from es_rec_mix: it * has 0x7c for PCM playback level. This is because ES1887 uses * Audio 2 for playback. */ static mixer_tab es1887_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6), MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4), MIX_ENT(SOUND_MIXER_PCM, 0x7c, 7, 4, 0x7c, 3, 4), MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4), MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4), MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4), MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4), MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4), MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECSYNTH, 0x6b, 7, 4, 0x6b, 3, 4), MIX_ENT(ES_REC_MIXER_RECPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECSPEAKER, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECLINE, 0x6e, 7, 4, 0x6e, 3, 4), MIX_ENT(ES_REC_MIXER_RECMIC, 0x68, 7, 4, 0x68, 3, 4), MIX_ENT(ES_REC_MIXER_RECCD, 0x6a, 7, 4, 0x6a, 3, 4), MIX_ENT(ES_REC_MIXER_RECIMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECRECLEV, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECIGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECOGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECLINE1, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(ES_REC_MIXER_RECLINE2, 0x6c, 7, 4, 0x6c, 3, 4), MIX_ENT(ES_REC_MIXER_RECLINE3, 0x00, 0, 0, 0x00, 0, 0) }; static int ess_has_rec_mixer (int submodel) { switch (submodel) { case SUBMDL_ES1887: return 1; default: return 0; }; }; #ifdef FKS_LOGGING static int ess_mixer_mon_regs[] = { 0x70, 0x71, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7d, 0x7f , 0xa1, 0xa2, 0xa4, 0xa5, 0xa8, 0xa9 , 0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb9 , 0x00}; static void ess_show_mixerregs (sb_devc *devc) { int *mp = ess_mixer_mon_regs; return; while (*mp != 0) { printk (KERN_INFO "res (%x)=%x\n", *mp, (int)(ess_getmixer (devc, *mp))); mp++; } } #endif void ess_setmixer (sb_devc * devc, unsigned int port, unsigned int value) { unsigned long flags; #ifdef FKS_LOGGING printk(KERN_INFO "FKS: write mixer %x: %x\n", port, value); #endif spin_lock_irqsave(&devc->lock, flags); if (port >= 0xa0) { ess_write (devc, port, value); } else { outb(((unsigned char) (port & 0xff)), MIXER_ADDR); udelay(20); outb(((unsigned char) (value & 0xff)), MIXER_DATA); udelay(20); } spin_unlock_irqrestore(&devc->lock, flags); } unsigned int ess_getmixer (sb_devc * devc, unsigned int port) { unsigned int val; unsigned long flags; spin_lock_irqsave(&devc->lock, flags); if (port >= 0xa0) { val = ess_read (devc, port); } else { outb(((unsigned char) (port & 0xff)), MIXER_ADDR); udelay(20); val = inb(MIXER_DATA); udelay(20); } spin_unlock_irqrestore(&devc->lock, flags); return val; } static void ess_chgmixer (sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val) { int value; value = ess_getmixer (devc, reg); value = (value & ~mask) | (val & mask); ess_setmixer (devc, reg, value); } /* * ess_mixer_init must be called from sb_mixer_init */ void ess_mixer_init (sb_devc * devc) { devc->mixer_caps = SOUND_CAP_EXCL_INPUT; /* * Take care of ES1887 specifics... */ switch (devc->submodel) { case SUBMDL_ES1887: devc->supported_devices = ES1887_MIXER_DEVICES; devc->supported_rec_devices = ES1887_RECORDING_DEVICES; #ifdef FKS_LOGGING printk (KERN_INFO "FKS: ess_mixer_init dup = %d\n", devc->duplex); #endif if (devc->duplex) { devc->iomap = &es1887_mix; devc->iomap_sz = ARRAY_SIZE(es1887_mix); } else { devc->iomap = &es_rec_mix; devc->iomap_sz = ARRAY_SIZE(es_rec_mix); } break; default: if (devc->submodel < 8) { devc->supported_devices = ES688_MIXER_DEVICES; devc->supported_rec_devices = ES688_RECORDING_DEVICES; devc->iomap = &es688_mix; devc->iomap_sz = ARRAY_SIZE(es688_mix); } else { /* * es1688 has 4 bits master vol. * later chips have 6 bits (?) */ devc->supported_devices = ES1688_MIXER_DEVICES; devc->supported_rec_devices = ES1688_RECORDING_DEVICES; if (devc->submodel < 0x10) { devc->iomap = &es1688_mix; devc->iomap_sz = ARRAY_SIZE(es688_mix); } else { devc->iomap = &es1688later_mix; devc->iomap_sz = ARRAY_SIZE(es1688later_mix); } } } } /* * Changing playback levels at an ESS chip with record mixer means having to * take care of recording levels of recorded inputs (devc->recmask) too! */ int ess_mixer_set(sb_devc *devc, int dev, int left, int right) { if (ess_has_rec_mixer (devc->submodel) && (devc->recmask & (1 << dev))) { sb_common_mixer_set (devc, dev + ES_REC_MIXER_RECDIFF, left, right); } return sb_common_mixer_set (devc, dev, left, right); } /* * After a sb_dsp_reset extended register 0xb4 (RECLEV) is reset too. After * sb_dsp_reset RECLEV has to be restored. This is where ess_mixer_reload * helps. */ void ess_mixer_reload (sb_devc *devc, int dev) { int left, right, value; value = devc->levels[dev]; left = value & 0x000000ff; right = (value & 0x0000ff00) >> 8; sb_common_mixer_set(devc, dev, left, right); } static int es_rec_set_recmask(sb_devc * devc, int mask) { int i, i_mask, cur_mask, diff_mask; int value, left, right; #ifdef FKS_LOGGING printk (KERN_INFO "FKS: es_rec_set_recmask mask = %x\n", mask); #endif /* * Changing the recmask on an ESS chip with recording mixer means: * (1) Find the differences * (2) For "turned-on" inputs: make the recording level the playback level * (3) For "turned-off" inputs: make the recording level zero */ cur_mask = devc->recmask; diff_mask = (cur_mask ^ mask); for (i = 0; i < 32; i++) { i_mask = (1 << i); if (diff_mask & i_mask) { /* Difference? (1) */ if (mask & i_mask) { /* Turn it on (2) */ value = devc->levels[i]; left = value & 0x000000ff; right = (value & 0x0000ff00) >> 8; } else { /* Turn it off (3) */ left = 0; right = 0; } sb_common_mixer_set(devc, i + ES_REC_MIXER_RECDIFF, left, right); } } return mask; } int ess_set_recmask(sb_devc * devc, int *mask) { /* This applies to ESS chips with record mixers only! */ if (ess_has_rec_mixer (devc->submodel)) { *mask = es_rec_set_recmask (devc, *mask); return 1; /* Applied */ } else { return 0; /* Not applied */ } } /* * ess_mixer_reset must be called from sb_mixer_reset */ int ess_mixer_reset (sb_devc * devc) { /* * Separate actions for ESS chips with a record mixer: */ if (ess_has_rec_mixer (devc->submodel)) { switch (devc->submodel) { case SUBMDL_ES1887: /* * Separate actions for ES1887: * Change registers 7a and 1c to make the record mixer the * actual recording source. */ ess_chgmixer(devc, 0x7a, 0x18, 0x08); ess_chgmixer(devc, 0x1c, 0x07, 0x07); break; } /* * Call set_recmask for proper initialization */ devc->recmask = devc->supported_rec_devices; es_rec_set_recmask(devc, 0); devc->recmask = 0; return 1; /* We took care of recmask. */ } else { return 0; /* We didn't take care; caller do it */ } } /**************************************************************************** * * * ESS midi * * * ****************************************************************************/ /* * FKS: IRQ may be shared. Hm. And if so? Then What? */ int ess_midi_init(sb_devc * devc, struct address_info *hw_config) { unsigned char cfg, tmp; cfg = ess_getmixer (devc, 0x40) & 0x03; if (devc->submodel < 8) { ess_setmixer (devc, 0x40, cfg | 0x03); /* Enable OPL3 & joystick */ return 0; /* ES688 doesn't support MPU401 mode */ } tmp = (hw_config->io_base & 0x0f0) >> 4; if (tmp > 3) { ess_setmixer (devc, 0x40, cfg); return 0; } cfg |= tmp << 3; tmp = 1; /* MPU enabled without interrupts */ /* May be shared: if so the value is -ve */ switch (abs(hw_config->irq)) { case 9: tmp = 0x4; break; case 5: tmp = 0x5; break; case 7: tmp = 0x6; break; case 10: tmp = 0x7; break; default: return 0; } cfg |= tmp << 5; ess_setmixer (devc, 0x40, cfg | 0x03); return 1; }
gpl-2.0
yun3195/android_kernel_ZTE_Z5S
drivers/staging/iio/meter/ade7854-spi.c
5006
7982
/* * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (SPI Bus) * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/module.h> #include "../iio.h" #include "ade7854.h" static int ade7854_spi_write_reg_8(struct device *dev, u16 reg_address, u8 value) { int ret; struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); struct spi_transfer xfer = { .tx_buf = st->tx, .bits_per_word = 8, .len = 4, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_WRITE_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; st->tx[3] = value & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->spi, &msg); mutex_unlock(&st->buf_lock); return ret; } static int ade7854_spi_write_reg_16(struct device *dev, u16 reg_address, u16 value) { int ret; struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); struct spi_transfer xfer = { .tx_buf = st->tx, .bits_per_word = 8, .len = 5, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_WRITE_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; st->tx[3] = (value >> 8) & 0xFF; st->tx[4] = value & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->spi, &msg); mutex_unlock(&st->buf_lock); return ret; } static int ade7854_spi_write_reg_24(struct device *dev, u16 reg_address, u32 value) { int ret; struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); struct spi_transfer xfer = { .tx_buf = st->tx, .bits_per_word = 8, .len = 6, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_WRITE_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; st->tx[3] = (value >> 16) & 0xFF; st->tx[4] = (value >> 8) & 0xFF; st->tx[5] = value & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->spi, &msg); mutex_unlock(&st->buf_lock); return ret; } static int ade7854_spi_write_reg_32(struct device *dev, u16 reg_address, u32 value) { int ret; struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); struct spi_transfer xfer = { .tx_buf = st->tx, .bits_per_word = 8, .len = 7, }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_WRITE_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; st->tx[3] = (value >> 24) & 0xFF; st->tx[4] = (value >> 16) & 0xFF; st->tx[5] = (value >> 8) & 0xFF; st->tx[6] = value & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->spi, &msg); mutex_unlock(&st->buf_lock); return ret; } static int ade7854_spi_read_reg_8(struct device *dev, u16 reg_address, u8 *val) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 3, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 1, } }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_READ_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->spi, &msg); if (ret) { dev_err(&st->spi->dev, "problem when reading 8 bit register 0x%02X", reg_address); goto error_ret; } *val = st->rx[0]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static int ade7854_spi_read_reg_16(struct device *dev, u16 reg_address, u16 *val) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 3, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 2, } }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_READ_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->spi, &msg); if (ret) { dev_err(&st->spi->dev, "problem when reading 16 bit register 0x%02X", reg_address); goto error_ret; } *val = be16_to_cpup((const __be16 *)st->rx); error_ret: mutex_unlock(&st->buf_lock); return ret; } static int ade7854_spi_read_reg_24(struct device *dev, u16 reg_address, u32 *val) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 3, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 3, } }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_READ_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->spi, &msg); if (ret) { dev_err(&st->spi->dev, "problem when reading 24 bit register 0x%02X", reg_address); goto error_ret; } *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static int ade7854_spi_read_reg_32(struct device *dev, u16 reg_address, u32 *val) { struct spi_message msg; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ade7854_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 3, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 4, } }; mutex_lock(&st->buf_lock); st->tx[0] = ADE7854_READ_REG; st->tx[1] = (reg_address >> 8) & 0xFF; st->tx[2] = reg_address & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->spi, &msg); if (ret) { dev_err(&st->spi->dev, "problem when reading 32 bit register 0x%02X", reg_address); goto error_ret; } *val = be32_to_cpup((const __be32 *)st->rx); error_ret: mutex_unlock(&st->buf_lock); return ret; } static int __devinit ade7854_spi_probe(struct spi_device *spi) { int ret; struct ade7854_state *st; struct iio_dev *indio_dev; indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); st->read_reg_8 = ade7854_spi_read_reg_8; st->read_reg_16 = ade7854_spi_read_reg_16; st->read_reg_24 = ade7854_spi_read_reg_24; st->read_reg_32 = ade7854_spi_read_reg_32; st->write_reg_8 = ade7854_spi_write_reg_8; st->write_reg_16 = ade7854_spi_write_reg_16; st->write_reg_24 = ade7854_spi_write_reg_24; st->write_reg_32 = ade7854_spi_write_reg_32; st->irq = spi->irq; st->spi = spi; ret = ade7854_probe(indio_dev, &spi->dev); if (ret) iio_free_device(indio_dev); return 0; } static int ade7854_spi_remove(struct spi_device *spi) { ade7854_remove(spi_get_drvdata(spi)); return 0; } static const struct spi_device_id ade7854_id[] = { { "ade7854", 0 }, { "ade7858", 0 }, { "ade7868", 0 }, { "ade7878", 0 }, { } }; MODULE_DEVICE_TABLE(spi, ade7854_id); static struct spi_driver ade7854_driver = { .driver = { .name = "ade7854", .owner = THIS_MODULE, }, .probe = ade7854_spi_probe, .remove = __devexit_p(ade7854_spi_remove), .id_table = ade7854_id, }; module_spi_driver(ade7854_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 SPI Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
darklordIN/android_kernel_sony_nicki-1
fs/reiserfs/xattr_security.c
7822
3005
#include "reiserfs.h" #include <linux/errno.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include <linux/slab.h> #include "xattr.h" #include <linux/security.h> #include <asm/uaccess.h> static int security_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int handler_flags) { if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) return -EINVAL; if (IS_PRIVATE(dentry->d_inode)) return -EPERM; return reiserfs_xattr_get(dentry->d_inode, name, buffer, size); } static int security_set(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int handler_flags) { if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) return -EINVAL; if (IS_PRIVATE(dentry->d_inode)) return -EPERM; return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags); } static size_t security_list(struct dentry *dentry, char *list, size_t list_len, const char *name, size_t namelen, int handler_flags) { const size_t len = namelen + 1; if (IS_PRIVATE(dentry->d_inode)) return 0; if (list && len <= list_len) { memcpy(list, name, namelen); list[namelen] = '\0'; } return len; } /* Initializes the security context for a new inode and returns the number * of blocks needed for the transaction. If successful, reiserfs_security * must be released using reiserfs_security_free when the caller is done. */ int reiserfs_security_init(struct inode *dir, struct inode *inode, const struct qstr *qstr, struct reiserfs_security_handle *sec) { int blocks = 0; int error; sec->name = NULL; /* Don't add selinux attributes on xattrs - they'll never get used */ if (IS_PRIVATE(dir)) return 0; error = security_old_inode_init_security(inode, dir, qstr, &sec->name, &sec->value, &sec->length); if (error) { if (error == -EOPNOTSUPP) error = 0; sec->name = NULL; sec->value = NULL; sec->length = 0; return error; } if (sec->length && reiserfs_xattrs_initialized(inode->i_sb)) { blocks = reiserfs_xattr_jcreate_nblocks(inode) + reiserfs_xattr_nblocks(inode, sec->length); /* We don't want to count the directories twice if we have * a default ACL. */ REISERFS_I(inode)->i_flags |= i_has_xattr_dir; } return blocks; } int reiserfs_security_write(struct reiserfs_transaction_handle *th, struct inode *inode, struct reiserfs_security_handle *sec) { int error; if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX)) return -EINVAL; error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value, sec->length, XATTR_CREATE); if (error == -ENODATA || error == -EOPNOTSUPP) error = 0; return error; } void reiserfs_security_free(struct reiserfs_security_handle *sec) { kfree(sec->name); kfree(sec->value); sec->name = NULL; sec->value = NULL; } const struct xattr_handler reiserfs_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .get = security_get, .set = security_set, .list = security_list, };
gpl-2.0
antaril/AGK-LOLLIPOP_GPE
drivers/char/pc8736x_gpio.c
8334
8996
/* linux/drivers/char/pc8736x_gpio.c National Semiconductor PC8736x GPIO driver. Allows a user space process to play with the GPIO pins. Copyright (c) 2005,2006 Jim Cromie <jim.cromie@gmail.com> adapted from linux/drivers/char/scx200_gpio.c Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>, */ #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/cdev.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/mutex.h> #include <linux/nsc_gpio.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #define DEVNAME "pc8736x_gpio" MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>"); MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver"); MODULE_LICENSE("GPL"); static int major; /* default to dynamic major */ module_param(major, int, 0); MODULE_PARM_DESC(major, "Major device number"); static DEFINE_MUTEX(pc8736x_gpio_config_lock); static unsigned pc8736x_gpio_base; static u8 pc8736x_gpio_shadow[4]; #define SIO_BASE1 0x2E /* 1st command-reg to check */ #define SIO_BASE2 0x4E /* alt command-reg to check */ #define SIO_SID 0x20 /* SuperI/O ID Register */ #define SIO_SID_PC87365 0xe5 /* Expected value in ID Register for PC87365 */ #define SIO_SID_PC87366 0xe9 /* Expected value in ID Register for PC87366 */ #define SIO_CF1 0x21 /* chip config, bit0 is chip enable */ #define PC8736X_GPIO_RANGE 16 /* ioaddr range */ #define PC8736X_GPIO_CT 32 /* minors matching 4 8 bit ports */ #define SIO_UNIT_SEL 0x7 /* unit select reg */ #define SIO_UNIT_ACT 0x30 /* unit enable */ #define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */ #define SIO_VLM_UNIT 0x0D #define SIO_TMS_UNIT 0x0E /* config-space addrs to read/write each unit's runtime addr */ #define SIO_BASE_HADDR 0x60 #define SIO_BASE_LADDR 0x61 /* GPIO config-space pin-control addresses */ #define SIO_GPIO_PIN_SELECT 0xF0 #define SIO_GPIO_PIN_CONFIG 0xF1 #define SIO_GPIO_PIN_EVENT 0xF2 static unsigned char superio_cmd = 0; static unsigned char selected_device = 0xFF; /* bogus start val */ /* GPIO port runtime access, functionality */ static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */ /* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */ #define PORT_OUT 0 #define PORT_IN 1 #define PORT_EVT_EN 2 #define PORT_EVT_STST 3 static struct platform_device *pdev; /* use in dev_*() */ static inline void superio_outb(int addr, int val) { outb_p(addr, superio_cmd); outb_p(val, superio_cmd + 1); } static inline int superio_inb(int addr) { outb_p(addr, superio_cmd); return inb_p(superio_cmd + 1); } static int pc8736x_superio_present(void) { int id; /* try the 2 possible values, read a hardware reg to verify */ superio_cmd = SIO_BASE1; id = superio_inb(SIO_SID); if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366) return superio_cmd; superio_cmd = SIO_BASE2; id = superio_inb(SIO_SID); if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366) return superio_cmd; return 0; } static void device_select(unsigned devldn) { superio_outb(SIO_UNIT_SEL, devldn); selected_device = devldn; } static void select_pin(unsigned iminor) { /* select GPIO port/pin from device minor number */ device_select(SIO_GPIO_UNIT); superio_outb(SIO_GPIO_PIN_SELECT, ((iminor << 1) & 0xF0) | (iminor & 0x7)); } static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits, u32 func_slct) { u32 config, new_config; mutex_lock(&pc8736x_gpio_config_lock); device_select(SIO_GPIO_UNIT); select_pin(index); /* read current config value */ config = superio_inb(func_slct); /* set new config */ new_config = (config & mask) | bits; superio_outb(func_slct, new_config); mutex_unlock(&pc8736x_gpio_config_lock); return config; } static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits) { return pc8736x_gpio_configure_fn(index, mask, bits, SIO_GPIO_PIN_CONFIG); } static int pc8736x_gpio_get(unsigned minor) { int port, bit, val; port = minor >> 3; bit = minor & 7; val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); val >>= bit; val &= 1; dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n", minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit, val); return val; } static void pc8736x_gpio_set(unsigned minor, int val) { int port, bit, curval; minor &= 0x1f; port = minor >> 3; bit = minor & 7; curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n", pc8736x_gpio_base + port_offset[port] + PORT_OUT, curval, bit, (curval & ~(1 << bit)), val, (val << bit)); val = (curval & ~(1 << bit)) | (val << bit); dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)" " %2x -> %2x\n", minor, port, bit, curval, val); outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT); curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN); dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val); pc8736x_gpio_shadow[port] = val; } static int pc8736x_gpio_current(unsigned minor) { int port, bit; minor &= 0x1f; port = minor >> 3; bit = minor & 7; return ((pc8736x_gpio_shadow[port] >> bit) & 0x01); } static void pc8736x_gpio_change(unsigned index) { pc8736x_gpio_set(index, !pc8736x_gpio_current(index)); } static struct nsc_gpio_ops pc8736x_gpio_ops = { .owner = THIS_MODULE, .gpio_config = pc8736x_gpio_configure, .gpio_dump = nsc_gpio_dump, .gpio_get = pc8736x_gpio_get, .gpio_set = pc8736x_gpio_set, .gpio_change = pc8736x_gpio_change, .gpio_current = pc8736x_gpio_current }; static int pc8736x_gpio_open(struct inode *inode, struct file *file) { unsigned m = iminor(inode); file->private_data = &pc8736x_gpio_ops; dev_dbg(&pdev->dev, "open %d\n", m); if (m >= PC8736X_GPIO_CT) return -EINVAL; return nonseekable_open(inode, file); } static const struct file_operations pc8736x_gpio_fileops = { .owner = THIS_MODULE, .open = pc8736x_gpio_open, .write = nsc_gpio_write, .read = nsc_gpio_read, .llseek = no_llseek, }; static void __init pc8736x_init_shadow(void) { int port; /* read the current values driven on the GPIO signals */ for (port = 0; port < 4; ++port) pc8736x_gpio_shadow[port] = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT); } static struct cdev pc8736x_gpio_cdev; static int __init pc8736x_gpio_init(void) { int rc; dev_t devid; pdev = platform_device_alloc(DEVNAME, 0); if (!pdev) return -ENOMEM; rc = platform_device_add(pdev); if (rc) { rc = -ENODEV; goto undo_platform_dev_alloc; } dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n"); if (!pc8736x_superio_present()) { rc = -ENODEV; dev_err(&pdev->dev, "no device found\n"); goto undo_platform_dev_add; } pc8736x_gpio_ops.dev = &pdev->dev; /* Verify that chip and it's GPIO unit are both enabled. My BIOS does this, so I take minimum action here */ rc = superio_inb(SIO_CF1); if (!(rc & 0x01)) { rc = -ENODEV; dev_err(&pdev->dev, "device not enabled\n"); goto undo_platform_dev_add; } device_select(SIO_GPIO_UNIT); if (!superio_inb(SIO_UNIT_ACT)) { rc = -ENODEV; dev_err(&pdev->dev, "GPIO unit not enabled\n"); goto undo_platform_dev_add; } /* read the GPIO unit base addr that chip responds to */ pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8 | superio_inb(SIO_BASE_LADDR)); if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) { rc = -ENODEV; dev_err(&pdev->dev, "GPIO ioport %x busy\n", pc8736x_gpio_base); goto undo_platform_dev_add; } dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base); if (major) { devid = MKDEV(major, 0); rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME); } else { rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME); major = MAJOR(devid); } if (rc < 0) { dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc); goto undo_request_region; } if (!major) { major = rc; dev_dbg(&pdev->dev, "got dynamic major %d\n", major); } pc8736x_init_shadow(); /* ignore minor errs, and succeed */ cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops); cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT); return 0; undo_request_region: release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE); undo_platform_dev_add: platform_device_del(pdev); undo_platform_dev_alloc: platform_device_put(pdev); return rc; } static void __exit pc8736x_gpio_cleanup(void) { dev_dbg(&pdev->dev, "cleanup\n"); cdev_del(&pc8736x_gpio_cdev); unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT); release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE); platform_device_del(pdev); platform_device_put(pdev); } module_init(pc8736x_gpio_init); module_exit(pc8736x_gpio_cleanup);
gpl-2.0
martyborya/N3-CM-Unified
drivers/char/snsc_event.c
11662
7425
/* * SN Platform system controller communication support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved. */ /* * System controller event handler * * These routines deal with environmental events arriving from the * system controllers. */ #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/sn/sn_sal.h> #include <asm/unaligned.h> #include "snsc.h" static struct subch_data_s *event_sd; void scdrv_event(unsigned long); DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0); /* * scdrv_event_interrupt * * Pull incoming environmental events off the physical link to the * system controller and put them in a temporary holding area in SAL. * Schedule scdrv_event() to move them along to their ultimate * destination. */ static irqreturn_t scdrv_event_interrupt(int irq, void *subch_data) { struct subch_data_s *sd = subch_data; unsigned long flags; int status; spin_lock_irqsave(&sd->sd_rlock, flags); status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch); if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) { tasklet_schedule(&sn_sysctl_event); } spin_unlock_irqrestore(&sd->sd_rlock, flags); return IRQ_HANDLED; } /* * scdrv_parse_event * * Break an event (as read from SAL) into useful pieces so we can decide * what to do with it. */ static int scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) { char *desc_end; /* record event source address */ *src = get_unaligned_be32(event); event += 4; /* move on to event code */ /* record the system controller's event code */ *code = get_unaligned_be32(event); event += 4; /* move on to event arguments */ /* how many arguments are in the packet? */ if (*event++ != 2) { /* if not 2, give up */ return -1; } /* parse out the ESP code */ if (*event++ != IR_ARG_INT) { /* not an integer argument, so give up */ return -1; } *esp_code = get_unaligned_be32(event); event += 4; /* parse out the event description */ if (*event++ != IR_ARG_ASCII) { /* not an ASCII string, so give up */ return -1; } event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */ event += 2; /* skip leading CR/LF */ desc_end = desc + sprintf(desc, "%s", event); /* strip trailing CR/LF (if any) */ for (desc_end--; (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa)); desc_end--) { *desc_end = '\0'; } return 0; } /* * scdrv_event_severity * * Figure out how urgent a message we should write to the console/syslog * via printk. */ static char * scdrv_event_severity(int code) { int ev_class = (code & EV_CLASS_MASK); int ev_severity = (code & EV_SEVERITY_MASK); char *pk_severity = KERN_NOTICE; switch (ev_class) { case EV_CLASS_POWER: switch (ev_severity) { case EV_SEVERITY_POWER_LOW_WARNING: case EV_SEVERITY_POWER_HIGH_WARNING: pk_severity = KERN_WARNING; break; case EV_SEVERITY_POWER_HIGH_FAULT: case EV_SEVERITY_POWER_LOW_FAULT: pk_severity = KERN_ALERT; break; } break; case EV_CLASS_FAN: switch (ev_severity) { case EV_SEVERITY_FAN_WARNING: pk_severity = KERN_WARNING; break; case EV_SEVERITY_FAN_FAULT: pk_severity = KERN_CRIT; break; } break; case EV_CLASS_TEMP: switch (ev_severity) { case EV_SEVERITY_TEMP_ADVISORY: pk_severity = KERN_WARNING; break; case EV_SEVERITY_TEMP_CRITICAL: pk_severity = KERN_CRIT; break; case EV_SEVERITY_TEMP_FAULT: pk_severity = KERN_ALERT; break; } break; case EV_CLASS_ENV: pk_severity = KERN_ALERT; break; case EV_CLASS_TEST_FAULT: pk_severity = KERN_ALERT; break; case EV_CLASS_TEST_WARNING: pk_severity = KERN_WARNING; break; case EV_CLASS_PWRD_NOTIFY: pk_severity = KERN_ALERT; break; } return pk_severity; } /* * scdrv_dispatch_event * * Do the right thing with an incoming event. That's often nothing * more than printing it to the system log. For power-down notifications * we start a graceful shutdown. */ static void scdrv_dispatch_event(char *event, int len) { static int snsc_shutting_down = 0; int code, esp_code, src, class; char desc[CHUNKSIZE]; char *severity; if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) { /* ignore uninterpretible event */ return; } /* how urgent is the message? */ severity = scdrv_event_severity(code); class = (code & EV_CLASS_MASK); if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) { if (snsc_shutting_down) return; snsc_shutting_down = 1; /* give a message for each type of event */ if (class == EV_CLASS_PWRD_NOTIFY) printk(KERN_NOTICE "Power off indication received." " Sending SIGPWR to init...\n"); else if (code == ENV_PWRDN_PEND) printk(KERN_CRIT "WARNING: Shutting down the system" " due to a critical environmental condition." " Sending SIGPWR to init...\n"); /* give a SIGPWR signal to init proc */ kill_cad_pid(SIGPWR, 0); } else { /* print to system log */ printk("%s|$(0x%x)%s\n", severity, esp_code, desc); } } /* * scdrv_event * * Called as a tasklet when an event arrives from the L1. Read the event * from where it's temporarily stored in SAL and call scdrv_dispatch_event() * to send it on its way. Keep trying to read events until SAL indicates * that there are no more immediately available. */ void scdrv_event(unsigned long dummy) { int status; int len; unsigned long flags; struct subch_data_s *sd = event_sd; /* anything to read? */ len = CHUNKSIZE; spin_lock_irqsave(&sd->sd_rlock, flags); status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, &len); while (!(status < 0)) { spin_unlock_irqrestore(&sd->sd_rlock, flags); scdrv_dispatch_event(sd->sd_rb, len); len = CHUNKSIZE; spin_lock_irqsave(&sd->sd_rlock, flags); status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, &len); } spin_unlock_irqrestore(&sd->sd_rlock, flags); } /* * scdrv_event_init * * Sets up a system controller subchannel to begin receiving event * messages. This is sort of a specialized version of scdrv_open() * in drivers/char/sn_sysctl.c. */ void scdrv_event_init(struct sysctl_data_s *scd) { int rv; event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); if (event_sd == NULL) { printk(KERN_WARNING "%s: couldn't allocate subchannel info" " for event monitoring\n", __func__); return; } /* initialize subch_data_s fields */ event_sd->sd_nasid = scd->scd_nasid; spin_lock_init(&event_sd->sd_rlock); /* ask the system controllers to send events to this node */ event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid); if (event_sd->sd_subch < 0) { kfree(event_sd); printk(KERN_WARNING "%s: couldn't open event subchannel\n", __func__); return; } /* hook event subchannel up to the system controller interrupt */ rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt, IRQF_SHARED | IRQF_DISABLED, "system controller events", event_sd); if (rv) { printk(KERN_WARNING "%s: irq request failed (%d)\n", __func__, rv); ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch); kfree(event_sd); return; } }
gpl-2.0
CaptainThrowback/kernel_htc_m8gpe
arch/frv/mm/dma-alloc.c
12942
4594
/* dma-alloc.c: consistent DMA memory allocation * * Derived from arch/ppc/mm/cachemap.c * * PowerPC version derived from arch/arm/mm/consistent.c * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) * * linux/arch/arm/mm/consistent.c * * Copyright (C) 2000 Russell King * * Consistent memory allocators. Used for DMA devices that want to * share uncached memory with the processor core. The function return * is the virtual address and 'dma_handle' is the physical address. * Mostly stolen from the ARM port, with some changes for PowerPC. * -- Dan * Modified for 36-bit support. -Matt * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/hardirq.h> #include <linux/gfp.h> #include <asm/pgalloc.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/uaccess.h> #include <asm/smp.h> static int map_page(unsigned long va, unsigned long pa, pgprot_t prot) { pgd_t *pge; pud_t *pue; pmd_t *pme; pte_t *pte; int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ pge = pgd_offset_k(va); pue = pud_offset(pge, va); pme = pmd_offset(pue, va); /* Use middle 10 bits of VA to index the second-level map */ pte = pte_alloc_kernel(pme, va); if (pte != 0) { err = 0; set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot)); } return err; } /* * This function will allocate the requested contiguous pages and * map them into the kernel's vmalloc() space. This is done so we * get unique mapping for these pages, outside of the kernel's 1:1 * virtual:physical mapping. This is necessary so we can cover large * portions of the kernel with single large page TLB entries, and * still get unique uncached pages for consistent DMA. */ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) { struct vm_struct *area; unsigned long page, va, pa; void *ret; int order, err, i; if (in_interrupt()) BUG(); /* only allocate page size areas */ size = PAGE_ALIGN(size); order = get_order(size); page = __get_free_pages(gfp, order); if (!page) { BUG(); return NULL; } /* allocate some common virtual space to map the new pages */ area = get_vm_area(size, VM_ALLOC); if (area == 0) { free_pages(page, order); return NULL; } va = VMALLOC_VMADDR(area->addr); ret = (void *) va; /* this gives us the real physical address of the first page */ *dma_handle = pa = virt_to_bus((void *) page); /* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free * all pages that were allocated. */ if (order > 0) { struct page *rpage = virt_to_page(page); split_page(rpage, order); } err = 0; for (i = 0; i < size && err == 0; i += PAGE_SIZE) err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE); if (err) { vfree((void *) va); return NULL; } /* we need to ensure that there are no cachelines in use, or worse dirty in this area * - can't do until after virtual address mappings are created */ frv_cache_invalidate(va, va + size); return ret; } /* * free page(s) as defined by the above mapping. */ void consistent_free(void *vaddr) { if (in_interrupt()) BUG(); vfree(vaddr); } /* * make an area consistent. */ void consistent_sync(void *vaddr, size_t size, int direction) { unsigned long start = (unsigned long) vaddr; unsigned long end = start + size; switch (direction) { case PCI_DMA_NONE: BUG(); case PCI_DMA_FROMDEVICE: /* invalidate only */ frv_cache_invalidate(start, end); break; case PCI_DMA_TODEVICE: /* writeback only */ frv_dcache_writeback(start, end); break; case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ frv_dcache_writeback(start, end); break; } } /* * consistent_sync_page make a page are consistent. identical * to consistent_sync, but takes a struct page instead of a virtual address */ void consistent_sync_page(struct page *page, unsigned long offset, size_t size, int direction) { void *start; start = page_address(page) + offset; consistent_sync(start, size, direction); }
gpl-2.0
Alphix/linuxtv
fs/xfs/xfs_qm.c
143
52440
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_ialloc.h" #include "xfs_itable.h" #include "xfs_quota.h" #include "xfs_error.h" #include "xfs_bmap.h" #include "xfs_bmap_btree.h" #include "xfs_trans.h" #include "xfs_trans_space.h" #include "xfs_qm.h" #include "xfs_trace.h" #include "xfs_icache.h" #include "xfs_cksum.h" #include "xfs_dinode.h" /* * The global quota manager. There is only one of these for the entire * system, _not_ one per file system. XQM keeps track of the overall * quota functionality, including maintaining the freelist and hash * tables of dquots. */ STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); /* * We use the batch lookup interface to iterate over the dquots as it * currently is the only interface into the radix tree code that allows * fuzzy lookups instead of exact matches. Holding the lock over multiple * operations is fine as all callers are used either during mount/umount * or quotaoff. */ #define XFS_DQ_LOOKUP_BATCH 32 STATIC int xfs_qm_dquot_walk( struct xfs_mount *mp, int type, int (*execute)(struct xfs_dquot *dqp, void *data), void *data) { struct xfs_quotainfo *qi = mp->m_quotainfo; struct radix_tree_root *tree = xfs_dquot_tree(qi, type); uint32_t next_index; int last_error = 0; int skipped; int nr_found; restart: skipped = 0; next_index = 0; nr_found = 0; while (1) { struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; int error = 0; int i; mutex_lock(&qi->qi_tree_lock); nr_found = radix_tree_gang_lookup(tree, (void **)batch, next_index, XFS_DQ_LOOKUP_BATCH); if (!nr_found) { mutex_unlock(&qi->qi_tree_lock); break; } for (i = 0; i < nr_found; i++) { struct xfs_dquot *dqp = batch[i]; next_index = be32_to_cpu(dqp->q_core.d_id) + 1; error = execute(batch[i], data); if (error == EAGAIN) { skipped++; continue; } if (error && last_error != EFSCORRUPTED) last_error = error; } mutex_unlock(&qi->qi_tree_lock); /* bail out if the filesystem is corrupted. */ if (last_error == EFSCORRUPTED) { skipped = 0; break; } } if (skipped) { delay(1); goto restart; } return last_error; } /* * Purge a dquot from all tracking data structures and free it. */ STATIC int xfs_qm_dqpurge( struct xfs_dquot *dqp, void *data) { struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; xfs_dqlock(dqp); if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { xfs_dqunlock(dqp); return EAGAIN; } dqp->dq_flags |= XFS_DQ_FREEING; xfs_dqflock(dqp); /* * If we are turning this type of quotas off, we don't care * about the dirty metadata sitting in this dquot. OTOH, if * we're unmounting, we do care, so we flush it and wait. */ if (XFS_DQ_IS_DIRTY(dqp)) { struct xfs_buf *bp = NULL; int error; /* * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ error = xfs_qm_dqflush(dqp, &bp); if (error) { xfs_warn(mp, "%s: dquot %p flush failed", __func__, dqp); } else { error = xfs_bwrite(bp); xfs_buf_relse(bp); } xfs_dqflock(dqp); } ASSERT(atomic_read(&dqp->q_pincount) == 0); ASSERT(XFS_FORCED_SHUTDOWN(mp) || !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), be32_to_cpu(dqp->q_core.d_id)); qi->qi_dquots--; /* * We move dquots to the freelist as soon as their reference count * hits zero, so it really should be on the freelist here. */ ASSERT(!list_empty(&dqp->q_lru)); list_lru_del(&qi->qi_lru, &dqp->q_lru); XFS_STATS_DEC(xs_qm_dquot_unused); xfs_qm_dqdestroy(dqp); return 0; } /* * Release the group or project dquot pointers the user dquots maybe carrying * around as a hint, and proceed to purge the user dquot cache if requested. */ STATIC int xfs_qm_dqpurge_hints( struct xfs_dquot *dqp, void *data) { struct xfs_dquot *gdqp = NULL; struct xfs_dquot *pdqp = NULL; uint flags = *((uint *)data); xfs_dqlock(dqp); if (dqp->dq_flags & XFS_DQ_FREEING) { xfs_dqunlock(dqp); return EAGAIN; } /* If this quota has a hint attached, prepare for releasing it now */ gdqp = dqp->q_gdquot; if (gdqp) dqp->q_gdquot = NULL; pdqp = dqp->q_pdquot; if (pdqp) dqp->q_pdquot = NULL; xfs_dqunlock(dqp); if (gdqp) xfs_qm_dqrele(gdqp); if (pdqp) xfs_qm_dqrele(pdqp); if (flags & XFS_QMOPT_UQUOTA) return xfs_qm_dqpurge(dqp, NULL); return 0; } /* * Purge the dquot cache. */ void xfs_qm_dqpurge_all( struct xfs_mount *mp, uint flags) { /* * We have to release group/project dquot hint(s) from the user dquot * at first if they are there, otherwise we would run into an infinite * loop while walking through radix tree to purge other type of dquots * since their refcount is not zero if the user dquot refers to them * as hint. * * Call the special xfs_qm_dqpurge_hints() will end up go through the * general xfs_qm_dqpurge() against user dquot cache if requested. */ xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); if (flags & XFS_QMOPT_GQUOTA) xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); if (flags & XFS_QMOPT_PQUOTA) xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); } /* * Just destroy the quotainfo structure. */ void xfs_qm_unmount( struct xfs_mount *mp) { if (mp->m_quotainfo) { xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); xfs_qm_destroy_quotainfo(mp); } } /* * This is called from xfs_mountfs to start quotas and initialize all * necessary data structures like quotainfo. This is also responsible for * running a quotacheck as necessary. We are guaranteed that the superblock * is consistently read in at this point. * * If we fail here, the mount will continue with quota turned off. We don't * need to inidicate success or failure at all. */ void xfs_qm_mount_quotas( xfs_mount_t *mp) { int error = 0; uint sbf; /* * If quotas on realtime volumes is not supported, we disable * quotas immediately. */ if (mp->m_sb.sb_rextents) { xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); mp->m_qflags = 0; goto write_changes; } ASSERT(XFS_IS_QUOTA_RUNNING(mp)); /* * Allocate the quotainfo structure inside the mount struct, and * create quotainode(s), and change/rev superblock if necessary. */ error = xfs_qm_init_quotainfo(mp); if (error) { /* * We must turn off quotas. */ ASSERT(mp->m_quotainfo == NULL); mp->m_qflags = 0; goto write_changes; } /* * If any of the quotas are not consistent, do a quotacheck. */ if (XFS_QM_NEED_QUOTACHECK(mp)) { error = xfs_qm_quotacheck(mp); if (error) { /* Quotacheck failed and disabled quotas. */ return; } } /* * If one type of quotas is off, then it will lose its * quotachecked status, since we won't be doing accounting for * that type anymore. */ if (!XFS_IS_UQUOTA_ON(mp)) mp->m_qflags &= ~XFS_UQUOTA_CHKD; if (!XFS_IS_GQUOTA_ON(mp)) mp->m_qflags &= ~XFS_GQUOTA_CHKD; if (!XFS_IS_PQUOTA_ON(mp)) mp->m_qflags &= ~XFS_PQUOTA_CHKD; write_changes: /* * We actually don't have to acquire the m_sb_lock at all. * This can only be called from mount, and that's single threaded. XXX */ spin_lock(&mp->m_sb_lock); sbf = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; spin_unlock(&mp->m_sb_lock); if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { /* * We could only have been turning quotas off. * We aren't in very good shape actually because * the incore structures are convinced that quotas are * off, but the on disk superblock doesn't know that ! */ ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); xfs_alert(mp, "%s: Superblock update failed!", __func__); } } if (error) { xfs_warn(mp, "Failed to initialize disk quotas."); return; } } /* * Called from the vfsops layer. */ void xfs_qm_unmount_quotas( xfs_mount_t *mp) { /* * Release the dquots that root inode, et al might be holding, * before we flush quotas and blow away the quotainfo structure. */ ASSERT(mp->m_rootip); xfs_qm_dqdetach(mp->m_rootip); if (mp->m_rbmip) xfs_qm_dqdetach(mp->m_rbmip); if (mp->m_rsumip) xfs_qm_dqdetach(mp->m_rsumip); /* * Release the quota inodes. */ if (mp->m_quotainfo) { if (mp->m_quotainfo->qi_uquotaip) { IRELE(mp->m_quotainfo->qi_uquotaip); mp->m_quotainfo->qi_uquotaip = NULL; } if (mp->m_quotainfo->qi_gquotaip) { IRELE(mp->m_quotainfo->qi_gquotaip); mp->m_quotainfo->qi_gquotaip = NULL; } if (mp->m_quotainfo->qi_pquotaip) { IRELE(mp->m_quotainfo->qi_pquotaip); mp->m_quotainfo->qi_pquotaip = NULL; } } } STATIC int xfs_qm_dqattach_one( xfs_inode_t *ip, xfs_dqid_t id, uint type, uint doalloc, xfs_dquot_t *udqhint, /* hint */ xfs_dquot_t **IO_idqpp) { xfs_dquot_t *dqp; int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); error = 0; /* * See if we already have it in the inode itself. IO_idqpp is * &i_udquot or &i_gdquot. This made the code look weird, but * made the logic a lot simpler. */ dqp = *IO_idqpp; if (dqp) { trace_xfs_dqattach_found(dqp); return 0; } /* * udqhint is the i_udquot field in inode, and is non-NULL only * when the type arg is group/project. Its purpose is to save a * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside * the user dquot. */ if (udqhint) { ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); xfs_dqlock(udqhint); /* * No need to take dqlock to look at the id. * * The ID can't change until it gets reclaimed, and it won't * be reclaimed as long as we have a ref from inode and we * hold the ilock. */ if (type == XFS_DQ_GROUP) dqp = udqhint->q_gdquot; else dqp = udqhint->q_pdquot; if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { ASSERT(*IO_idqpp == NULL); *IO_idqpp = xfs_qm_dqhold(dqp); xfs_dqunlock(udqhint); return 0; } /* * We can't hold a dquot lock when we call the dqget code. * We'll deadlock in no time, because of (not conforming to) * lock ordering - the inodelock comes before any dquot lock, * and we may drop and reacquire the ilock in xfs_qm_dqget(). */ xfs_dqunlock(udqhint); } /* * Find the dquot from somewhere. This bumps the * reference count of dquot and returns it locked. * This can return ENOENT if dquot didn't exist on * disk and we didn't ask it to allocate; * ESRCH if quotas got turned off suddenly. */ error = xfs_qm_dqget(ip->i_mount, ip, id, type, doalloc | XFS_QMOPT_DOWARN, &dqp); if (error) return error; trace_xfs_dqattach_get(dqp); /* * dqget may have dropped and re-acquired the ilock, but it guarantees * that the dquot returned is the one that should go in the inode. */ *IO_idqpp = dqp; xfs_dqunlock(dqp); return 0; } /* * Given a udquot and group/project type, attach the group/project * dquot pointer to the udquot as a hint for future lookups. */ STATIC void xfs_qm_dqattach_hint( struct xfs_inode *ip, int type) { struct xfs_dquot **dqhintp; struct xfs_dquot *dqp; struct xfs_dquot *udq = ip->i_udquot; ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); xfs_dqlock(udq); if (type == XFS_DQ_GROUP) { dqp = ip->i_gdquot; dqhintp = &udq->q_gdquot; } else { dqp = ip->i_pdquot; dqhintp = &udq->q_pdquot; } if (*dqhintp) { struct xfs_dquot *tmp; if (*dqhintp == dqp) goto done; tmp = *dqhintp; *dqhintp = NULL; xfs_qm_dqrele(tmp); } *dqhintp = xfs_qm_dqhold(dqp); done: xfs_dqunlock(udq); } static bool xfs_qm_need_dqattach( struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; if (!XFS_IS_QUOTA_RUNNING(mp)) return false; if (!XFS_IS_QUOTA_ON(mp)) return false; if (!XFS_NOT_DQATTACHED(mp, ip)) return false; if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) return false; return true; } /* * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON * into account. * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. * Inode may get unlocked and relocked in here, and the caller must deal with * the consequences. */ int xfs_qm_dqattach_locked( xfs_inode_t *ip, uint flags) { xfs_mount_t *mp = ip->i_mount; uint nquotas = 0; int error = 0; if (!xfs_qm_need_dqattach(ip)) return 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (XFS_IS_UQUOTA_ON(mp)) { error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, flags & XFS_QMOPT_DQALLOC, NULL, &ip->i_udquot); if (error) goto done; nquotas++; } ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (XFS_IS_GQUOTA_ON(mp)) { error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, flags & XFS_QMOPT_DQALLOC, ip->i_udquot, &ip->i_gdquot); /* * Don't worry about the udquot that we may have * attached above. It'll get detached, if not already. */ if (error) goto done; nquotas++; } ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (XFS_IS_PQUOTA_ON(mp)) { error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, flags & XFS_QMOPT_DQALLOC, ip->i_udquot, &ip->i_pdquot); /* * Don't worry about the udquot that we may have * attached above. It'll get detached, if not already. */ if (error) goto done; nquotas++; } /* * Attach this group/project quota to the user quota as a hint. * This WON'T, in general, result in a thrash. */ if (nquotas > 1 && ip->i_udquot) { ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp)); ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp)); /* * We do not have i_udquot locked at this point, but this check * is OK since we don't depend on the i_gdquot to be accurate * 100% all the time. It is just a hint, and this will * succeed in general. */ if (ip->i_udquot->q_gdquot != ip->i_gdquot) xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP); if (ip->i_udquot->q_pdquot != ip->i_pdquot) xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ); } done: #ifdef DEBUG if (!error) { if (XFS_IS_UQUOTA_ON(mp)) ASSERT(ip->i_udquot); if (XFS_IS_GQUOTA_ON(mp)) ASSERT(ip->i_gdquot); if (XFS_IS_PQUOTA_ON(mp)) ASSERT(ip->i_pdquot); } ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); #endif return error; } int xfs_qm_dqattach( struct xfs_inode *ip, uint flags) { int error; if (!xfs_qm_need_dqattach(ip)) return 0; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_qm_dqattach_locked(ip, flags); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } /* * Release dquots (and their references) if any. * The inode should be locked EXCL except when this's called by * xfs_ireclaim. */ void xfs_qm_dqdetach( xfs_inode_t *ip) { if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) return; trace_xfs_dquot_dqdetach(ip); ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); if (ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } if (ip->i_pdquot) { xfs_qm_dqrele(ip->i_pdquot); ip->i_pdquot = NULL; } } struct xfs_qm_isolate { struct list_head buffers; struct list_head dispose; }; static enum lru_status xfs_qm_dquot_isolate( struct list_head *item, spinlock_t *lru_lock, void *arg) { struct xfs_dquot *dqp = container_of(item, struct xfs_dquot, q_lru); struct xfs_qm_isolate *isol = arg; if (!xfs_dqlock_nowait(dqp)) goto out_miss_busy; /* * This dquot has acquired a reference in the meantime remove it from * the freelist and try again. */ if (dqp->q_nrefs) { xfs_dqunlock(dqp); XFS_STATS_INC(xs_qm_dqwants); trace_xfs_dqreclaim_want(dqp); list_del_init(&dqp->q_lru); XFS_STATS_DEC(xs_qm_dquot_unused); return LRU_REMOVED; } /* * If the dquot is dirty, flush it. If it's already being flushed, just * skip it so there is time for the IO to complete before we try to * reclaim it again on the next LRU pass. */ if (!xfs_dqflock_nowait(dqp)) { xfs_dqunlock(dqp); goto out_miss_busy; } if (XFS_DQ_IS_DIRTY(dqp)) { struct xfs_buf *bp = NULL; int error; trace_xfs_dqreclaim_dirty(dqp); /* we have to drop the LRU lock to flush the dquot */ spin_unlock(lru_lock); error = xfs_qm_dqflush(dqp, &bp); if (error) { xfs_warn(dqp->q_mount, "%s: dquot %p flush failed", __func__, dqp); goto out_unlock_dirty; } xfs_buf_delwri_queue(bp, &isol->buffers); xfs_buf_relse(bp); goto out_unlock_dirty; } xfs_dqfunlock(dqp); /* * Prevent lookups now that we are past the point of no return. */ dqp->dq_flags |= XFS_DQ_FREEING; xfs_dqunlock(dqp); ASSERT(dqp->q_nrefs == 0); list_move_tail(&dqp->q_lru, &isol->dispose); XFS_STATS_DEC(xs_qm_dquot_unused); trace_xfs_dqreclaim_done(dqp); XFS_STATS_INC(xs_qm_dqreclaims); return LRU_REMOVED; out_miss_busy: trace_xfs_dqreclaim_busy(dqp); XFS_STATS_INC(xs_qm_dqreclaim_misses); return LRU_SKIP; out_unlock_dirty: trace_xfs_dqreclaim_busy(dqp); XFS_STATS_INC(xs_qm_dqreclaim_misses); xfs_dqunlock(dqp); spin_lock(lru_lock); return LRU_RETRY; } static unsigned long xfs_qm_shrink_scan( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_quotainfo *qi = container_of(shrink, struct xfs_quotainfo, qi_shrinker); struct xfs_qm_isolate isol; unsigned long freed; int error; unsigned long nr_to_scan = sc->nr_to_scan; if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) return 0; INIT_LIST_HEAD(&isol.buffers); INIT_LIST_HEAD(&isol.dispose); freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol, &nr_to_scan); error = xfs_buf_delwri_submit(&isol.buffers); if (error) xfs_warn(NULL, "%s: dquot reclaim failed", __func__); while (!list_empty(&isol.dispose)) { struct xfs_dquot *dqp; dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); list_del_init(&dqp->q_lru); xfs_qm_dqfree_one(dqp); } return freed; } static unsigned long xfs_qm_shrink_count( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_quotainfo *qi = container_of(shrink, struct xfs_quotainfo, qi_shrinker); return list_lru_count_node(&qi->qi_lru, sc->nid); } /* * This initializes all the quota information that's kept in the * mount structure */ STATIC int xfs_qm_init_quotainfo( xfs_mount_t *mp) { xfs_quotainfo_t *qinf; int error; xfs_dquot_t *dqp; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); if ((error = list_lru_init(&qinf->qi_lru))) { kmem_free(qinf); mp->m_quotainfo = NULL; return error; } /* * See if quotainodes are setup, and if not, allocate them, * and change the superblock accordingly. */ if ((error = xfs_qm_init_quotainos(mp))) { list_lru_destroy(&qinf->qi_lru); kmem_free(qinf); mp->m_quotainfo = NULL; return error; } INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); mutex_init(&qinf->qi_tree_lock); /* mutex used to serialize quotaoffs */ mutex_init(&qinf->qi_quotaofflock); /* Precalc some constants */ qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(mp, qinf->qi_dqchunklen); mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); /* * We try to get the limits from the superuser's limits fields. * This is quite hacky, but it is standard quota practice. * * We look at the USR dquot with id == 0 first, but if user quotas * are not enabled we goto the GRP dquot with id == 0. * We don't really care to keep separate default limits for user * and group quotas, at least not at this point. * * Since we may not have done a quotacheck by this point, just read * the dquot without attaching it to any hashtables or lists. */ error = xfs_qm_dqread(mp, 0, XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : XFS_DQ_PROJ), XFS_QMOPT_DOWARN, &dqp); if (!error) { xfs_disk_dquot_t *ddqp = &dqp->q_core; /* * The warnings and timers set the grace period given to * a user or group before he or she can not perform any * more writing. If it is zero, a default is used. */ qinf->qi_btimelimit = ddqp->d_btimer ? be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; qinf->qi_itimelimit = ddqp->d_itimer ? be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; qinf->qi_bwarnlimit = ddqp->d_bwarns ? be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; qinf->qi_iwarnlimit = ddqp->d_iwarns ? be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); xfs_qm_dqdestroy(dqp); } else { qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; } qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; qinf->qi_shrinker.seeks = DEFAULT_SEEKS; qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; register_shrinker(&qinf->qi_shrinker); return 0; } /* * Gets called when unmounting a filesystem or when all quotas get * turned off. * This purges the quota inodes, destroys locks and frees itself. */ void xfs_qm_destroy_quotainfo( xfs_mount_t *mp) { xfs_quotainfo_t *qi; qi = mp->m_quotainfo; ASSERT(qi != NULL); unregister_shrinker(&qi->qi_shrinker); list_lru_destroy(&qi->qi_lru); if (qi->qi_uquotaip) { IRELE(qi->qi_uquotaip); qi->qi_uquotaip = NULL; /* paranoia */ } if (qi->qi_gquotaip) { IRELE(qi->qi_gquotaip); qi->qi_gquotaip = NULL; } if (qi->qi_pquotaip) { IRELE(qi->qi_pquotaip); qi->qi_pquotaip = NULL; } mutex_destroy(&qi->qi_quotaofflock); kmem_free(qi); mp->m_quotainfo = NULL; } /* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes */ STATIC int xfs_qm_qino_alloc( xfs_mount_t *mp, xfs_inode_t **ip, __int64_t sbfields, uint flags) { xfs_trans_t *tp; int error; int committed; *ip = NULL; /* * With superblock that doesn't have separate pquotino, we * share an inode between gquota and pquota. If the on-disk * superblock has GQUOTA and the filesystem is now mounted * with PQUOTA, just use sb_gquotino for sb_pquotino and * vice-versa. */ if (!xfs_sb_version_has_pquotino(&mp->m_sb) && (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { xfs_ino_t ino = NULLFSINO; if ((flags & XFS_QMOPT_PQUOTA) && (mp->m_sb.sb_gquotino != NULLFSINO)) { ino = mp->m_sb.sb_gquotino; ASSERT(mp->m_sb.sb_pquotino == NULLFSINO); } else if ((flags & XFS_QMOPT_GQUOTA) && (mp->m_sb.sb_pquotino != NULLFSINO)) { ino = mp->m_sb.sb_pquotino; ASSERT(mp->m_sb.sb_gquotino == NULLFSINO); } if (ino != NULLFSINO) { error = xfs_iget(mp, NULL, ino, 0, 0, ip); if (error) return error; mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; } } tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create, XFS_QM_QINOCREATE_SPACE_RES(mp), 0); if (error) { xfs_trans_cancel(tp, 0); return error; } if (!*ip) { error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); return error; } } /* * Make the changes in the superblock, and log those too. * sbfields arg may contain fields other than *QUOTINO; * VERSIONNUM for example. */ spin_lock(&mp->m_sb_lock); if (flags & XFS_QMOPT_SBVERSION) { ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) == (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)); xfs_sb_version_addquota(&mp->m_sb); mp->m_sb.sb_uquotino = NULLFSINO; mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; /* qflags will get updated fully _after_ quotacheck */ mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; } if (flags & XFS_QMOPT_UQUOTA) mp->m_sb.sb_uquotino = (*ip)->i_ino; else if (flags & XFS_QMOPT_GQUOTA) mp->m_sb.sb_gquotino = (*ip)->i_ino; else mp->m_sb.sb_pquotino = (*ip)->i_ino; spin_unlock(&mp->m_sb_lock); xfs_mod_sb(tp, sbfields); if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { xfs_alert(mp, "%s failed (error %d)!", __func__, error); return error; } return 0; } STATIC void xfs_qm_reset_dqcounts( xfs_mount_t *mp, xfs_buf_t *bp, xfs_dqid_t id, uint type) { struct xfs_dqblk *dqb; int j; trace_xfs_reset_dqcounts(bp, _RET_IP_); /* * Reset all counters and timers. They'll be * started afresh by xfs_qm_quotacheck. */ #ifdef DEBUG j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); do_div(j, sizeof(xfs_dqblk_t)); ASSERT(mp->m_quotainfo->qi_dqperchunk == j); #endif dqb = bp->b_addr; for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { struct xfs_disk_dquot *ddq; ddq = (struct xfs_disk_dquot *)&dqb[j]; /* * Do a sanity check, and if needed, repair the dqblk. Don't * output any warnings because it's perfectly possible to * find uninitialised dquot blks. See comment in xfs_dqcheck. */ xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, "xfs_quotacheck"); ddq->d_bcount = 0; ddq->d_icount = 0; ddq->d_rtbcount = 0; ddq->d_btimer = 0; ddq->d_itimer = 0; ddq->d_rtbtimer = 0; ddq->d_bwarns = 0; ddq->d_iwarns = 0; ddq->d_rtbwarns = 0; if (xfs_sb_version_hascrc(&mp->m_sb)) { xfs_update_cksum((char *)&dqb[j], sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF); } } } STATIC int xfs_qm_dqiter_bufs( struct xfs_mount *mp, xfs_dqid_t firstid, xfs_fsblock_t bno, xfs_filblks_t blkcnt, uint flags, struct list_head *buffer_list) { struct xfs_buf *bp; int error; int type; ASSERT(blkcnt > 0); type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); error = 0; /* * Blkcnt arg can be a very big number, and might even be * larger than the log itself. So, we have to break it up into * manageable-sized transactions. * Note that we don't start a permanent transaction here; we might * not be able to get a log reservation for the whole thing up front, * and we don't really care to either, because we just discard * everything if we were to crash in the middle of this loop. */ while (blkcnt--) { error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, bno), mp->m_quotainfo->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops); /* * CRC and validation errors will return a EFSCORRUPTED here. If * this occurs, re-read without CRC validation so that we can * repair the damage via xfs_qm_reset_dqcounts(). This process * will leave a trace in the log indicating corruption has * been detected. */ if (error == EFSCORRUPTED) { error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, bno), mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL); } if (error) break; xfs_qm_reset_dqcounts(mp, bp, firstid, type); xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); /* goto the next block. */ bno++; firstid += mp->m_quotainfo->qi_dqperchunk; } return error; } /* * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a * caller supplied function for every chunk of dquots that we find. */ STATIC int xfs_qm_dqiterate( struct xfs_mount *mp, struct xfs_inode *qip, uint flags, struct list_head *buffer_list) { struct xfs_bmbt_irec *map; int i, nmaps; /* number of map entries */ int error; /* return value */ xfs_fileoff_t lblkno; xfs_filblks_t maxlblkcnt; xfs_dqid_t firstid; xfs_fsblock_t rablkno; xfs_filblks_t rablkcnt; error = 0; /* * This looks racy, but we can't keep an inode lock across a * trans_reserve. But, this gets called during quotacheck, and that * happens only at mount time which is single threaded. */ if (qip->i_d.di_nblocks == 0) return 0; map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); lblkno = 0; maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); do { uint lock_mode; nmaps = XFS_DQITER_MAP_SIZE; /* * We aren't changing the inode itself. Just changing * some of its data. No new blocks are added here, and * the inode is never added to the transaction. */ lock_mode = xfs_ilock_data_map_shared(qip); error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, map, &nmaps, 0); xfs_iunlock(qip, lock_mode); if (error) break; ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); for (i = 0; i < nmaps; i++) { ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); ASSERT(map[i].br_blockcount); lblkno += map[i].br_blockcount; if (map[i].br_startblock == HOLESTARTBLOCK) continue; firstid = (xfs_dqid_t) map[i].br_startoff * mp->m_quotainfo->qi_dqperchunk; /* * Do a read-ahead on the next extent. */ if ((i+1 < nmaps) && (map[i+1].br_startblock != HOLESTARTBLOCK)) { rablkcnt = map[i+1].br_blockcount; rablkno = map[i+1].br_startblock; while (rablkcnt--) { xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, rablkno), mp->m_quotainfo->qi_dqchunklen, NULL); rablkno++; } } /* * Iterate thru all the blks in the extent and * reset the counters of all the dquots inside them. */ error = xfs_qm_dqiter_bufs(mp, firstid, map[i].br_startblock, map[i].br_blockcount, flags, buffer_list); if (error) goto out; } } while (nmaps > 0); out: kmem_free(map); return error; } /* * Called by dqusage_adjust in doing a quotacheck. * * Given the inode, and a dquot id this updates both the incore dqout as well * as the buffer copy. This is so that once the quotacheck is done, we can * just log all the buffers, as opposed to logging numerous updates to * individual dquots. */ STATIC int xfs_qm_quotacheck_dqadjust( struct xfs_inode *ip, xfs_dqid_t id, uint type, xfs_qcnt_t nblks, xfs_qcnt_t rtblks) { struct xfs_mount *mp = ip->i_mount; struct xfs_dquot *dqp; int error; error = xfs_qm_dqget(mp, ip, id, type, XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); if (error) { /* * Shouldn't be able to turn off quotas here. */ ASSERT(error != ESRCH); ASSERT(error != ENOENT); return error; } trace_xfs_dqadjust(dqp); /* * Adjust the inode count and the block count to reflect this inode's * resource usage. */ be64_add_cpu(&dqp->q_core.d_icount, 1); dqp->q_res_icount++; if (nblks) { be64_add_cpu(&dqp->q_core.d_bcount, nblks); dqp->q_res_bcount += nblks; } if (rtblks) { be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); dqp->q_res_rtbcount += rtblks; } /* * Set default limits, adjust timers (since we changed usages) * * There are no timers for the default values set in the root dquot. */ if (dqp->q_core.d_id) { xfs_qm_adjust_dqlimits(mp, dqp); xfs_qm_adjust_dqtimers(mp, &dqp->q_core); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_qm_dqput(dqp); return 0; } STATIC int xfs_qm_get_rtblks( xfs_inode_t *ip, xfs_qcnt_t *O_rtblks) { xfs_filblks_t rtblks; /* total rt blks */ xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t nextents; /* number of extent entries */ int error; ASSERT(XFS_IS_REALTIME_INODE(ip)); ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); if (!(ifp->if_flags & XFS_IFEXTENTS)) { if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) return error; } rtblks = 0; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); for (idx = 0; idx < nextents; idx++) rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); *O_rtblks = (xfs_qcnt_t)rtblks; return 0; } /* * callback routine supplied to bulkstat(). Given an inumber, find its * dquots and update them to account for resources taken by that inode. */ /* ARGSUSED */ STATIC int xfs_qm_dqusage_adjust( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* not used */ int ubsize, /* not used */ int *ubused, /* not used */ int *res) /* result code value */ { xfs_inode_t *ip; xfs_qcnt_t nblks, rtblks = 0; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); /* * rootino must have its resources accounted for, not so with the quota * inodes. */ if (xfs_is_quota_inode(&mp->m_sb, ino)) { *res = BULKSTAT_RV_NOTHING; return XFS_ERROR(EINVAL); } /* * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget * interface expects the inode to be exclusively locked because that's * the case in all other instances. It's OK that we do this because * quotacheck is done only at mount time. */ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); if (error) { *res = BULKSTAT_RV_NOTHING; return error; } ASSERT(ip->i_delayed_blks == 0); if (XFS_IS_REALTIME_INODE(ip)) { /* * Walk thru the extent list and count the realtime blocks. */ error = xfs_qm_get_rtblks(ip, &rtblks); if (error) goto error0; } nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; /* * Add the (disk blocks and inode) resources occupied by this * inode to its dquots. We do this adjustment in the incore dquot, * and also copy the changes to its buffer. * We don't care about putting these changes in a transaction * envelope because if we crash in the middle of a 'quotacheck' * we have to start from the beginning anyway. * Once we're done, we'll log all the dquot bufs. * * The *QUOTA_ON checks below may look pretty racy, but quotachecks * and quotaoffs don't race. (Quotachecks happen at mount time only). */ if (XFS_IS_UQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, XFS_DQ_USER, nblks, rtblks); if (error) goto error0; } if (XFS_IS_GQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, XFS_DQ_GROUP, nblks, rtblks); if (error) goto error0; } if (XFS_IS_PQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), XFS_DQ_PROJ, nblks, rtblks); if (error) goto error0; } xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); *res = BULKSTAT_RV_DIDONE; return 0; error0: xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); *res = BULKSTAT_RV_GIVEUP; return error; } STATIC int xfs_qm_flush_one( struct xfs_dquot *dqp, void *data) { struct list_head *buffer_list = data; struct xfs_buf *bp = NULL; int error = 0; xfs_dqlock(dqp); if (dqp->dq_flags & XFS_DQ_FREEING) goto out_unlock; if (!XFS_DQ_IS_DIRTY(dqp)) goto out_unlock; xfs_dqflock(dqp); error = xfs_qm_dqflush(dqp, &bp); if (error) goto out_unlock; xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); out_unlock: xfs_dqunlock(dqp); return error; } /* * Walk thru all the filesystem inodes and construct a consistent view * of the disk quota world. If the quotacheck fails, disable quotas. */ int xfs_qm_quotacheck( xfs_mount_t *mp) { int done, count, error, error2; xfs_ino_t lastino; size_t structsz; uint flags; LIST_HEAD (buffer_list); struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; count = INT_MAX; structsz = 1; lastino = 0; flags = 0; ASSERT(uip || gip || pip); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); xfs_notice(mp, "Quotacheck needed: Please wait."); /* * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset * their counters to zero. We need a clean slate. * We don't log our changes till later. */ if (uip) { error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, &buffer_list); if (error) goto error_return; flags |= XFS_UQUOTA_CHKD; } if (gip) { error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA, &buffer_list); if (error) goto error_return; flags |= XFS_GQUOTA_CHKD; } if (pip) { error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA, &buffer_list); if (error) goto error_return; flags |= XFS_PQUOTA_CHKD; } do { /* * Iterate thru all the inodes in the file system, * adjusting the corresponding dquot counters in core. */ error = xfs_bulkstat(mp, &lastino, &count, xfs_qm_dqusage_adjust, structsz, NULL, &done); if (error) break; } while (!done); /* * We've made all the changes that we need to make incore. Flush them * down to disk buffers if everything was updated successfully. */ if (XFS_IS_UQUOTA_ON(mp)) { error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, &buffer_list); } if (XFS_IS_GQUOTA_ON(mp)) { error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, &buffer_list); if (!error) error = error2; } if (XFS_IS_PQUOTA_ON(mp)) { error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, &buffer_list); if (!error) error = error2; } error2 = xfs_buf_delwri_submit(&buffer_list); if (!error) error = error2; /* * We can get this error if we couldn't do a dquot allocation inside * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the * dirty dquots that might be cached, we just want to get rid of them * and turn quotaoff. The dquots won't be attached to any of the inodes * at this point (because we intentionally didn't in dqget_noattach). */ if (error) { xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); goto error_return; } /* * If one type of quotas is off, then it will lose its * quotachecked status, since we won't be doing accounting for * that type anymore. */ mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; mp->m_qflags |= flags; error_return: while (!list_empty(&buffer_list)) { struct xfs_buf *bp = list_first_entry(&buffer_list, struct xfs_buf, b_list); list_del_init(&bp->b_list); xfs_buf_relse(bp); } if (error) { xfs_warn(mp, "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", error); /* * We must turn off quotas. */ ASSERT(mp->m_quotainfo != NULL); xfs_qm_destroy_quotainfo(mp); if (xfs_mount_reset_sbqflags(mp)) { xfs_warn(mp, "Quotacheck: Failed to reset quota flags."); } } else xfs_notice(mp, "Quotacheck: Done."); return (error); } /* * This is called after the superblock has been read in and we're ready to * iget the quota inodes. */ STATIC int xfs_qm_init_quotainos( xfs_mount_t *mp) { struct xfs_inode *uip = NULL; struct xfs_inode *gip = NULL; struct xfs_inode *pip = NULL; int error; __int64_t sbflags = 0; uint flags = 0; ASSERT(mp->m_quotainfo); /* * Get the uquota and gquota inodes */ if (xfs_sb_version_hasquota(&mp->m_sb)) { if (XFS_IS_UQUOTA_ON(mp) && mp->m_sb.sb_uquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_uquotino > 0); error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip); if (error) return XFS_ERROR(error); } if (XFS_IS_GQUOTA_ON(mp) && mp->m_sb.sb_gquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_gquotino > 0); error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip); if (error) goto error_rele; } if (XFS_IS_PQUOTA_ON(mp) && mp->m_sb.sb_pquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_pquotino > 0); error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 0, 0, &pip); if (error) goto error_rele; } } else { flags |= XFS_QMOPT_SBVERSION; sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS); } /* * Create the three inodes, if they don't exist already. The changes * made above will get added to a transaction and logged in one of * the qino_alloc calls below. If the device is readonly, * temporarily switch to read-write to do this. */ if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { error = xfs_qm_qino_alloc(mp, &uip, sbflags | XFS_SB_UQUOTINO, flags | XFS_QMOPT_UQUOTA); if (error) goto error_rele; flags &= ~XFS_QMOPT_SBVERSION; } if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { error = xfs_qm_qino_alloc(mp, &gip, sbflags | XFS_SB_GQUOTINO, flags | XFS_QMOPT_GQUOTA); if (error) goto error_rele; flags &= ~XFS_QMOPT_SBVERSION; } if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { error = xfs_qm_qino_alloc(mp, &pip, sbflags | XFS_SB_PQUOTINO, flags | XFS_QMOPT_PQUOTA); if (error) goto error_rele; } mp->m_quotainfo->qi_uquotaip = uip; mp->m_quotainfo->qi_gquotaip = gip; mp->m_quotainfo->qi_pquotaip = pip; return 0; error_rele: if (uip) IRELE(uip); if (gip) IRELE(gip); if (pip) IRELE(pip); return XFS_ERROR(error); } STATIC void xfs_qm_dqfree_one( struct xfs_dquot *dqp) { struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; mutex_lock(&qi->qi_tree_lock); radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), be32_to_cpu(dqp->q_core.d_id)); qi->qi_dquots--; mutex_unlock(&qi->qi_tree_lock); xfs_qm_dqdestroy(dqp); } /* * Start a transaction and write the incore superblock changes to * disk. flags parameter indicates which fields have changed. */ int xfs_qm_write_sb_changes( xfs_mount_t *mp, __int64_t flags) { xfs_trans_t *tp; int error; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); if (error) { xfs_trans_cancel(tp, 0); return error; } xfs_mod_sb(tp, flags); error = xfs_trans_commit(tp, 0); return error; } /* --------------- utility functions for vnodeops ---------------- */ /* * Given an inode, a uid, gid and prid make sure that we have * allocated relevant dquot(s) on disk, and that we won't exceed inode * quotas by creating this file. * This also attaches dquot(s) to the given inode after locking it, * and returns the dquots corresponding to the uid and/or gid. * * in : inode (unlocked) * out : udquot, gdquot with references taken and unlocked */ int xfs_qm_vop_dqalloc( struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid, prid_t prid, uint flags, struct xfs_dquot **O_udqpp, struct xfs_dquot **O_gdqpp, struct xfs_dquot **O_pdqpp) { struct xfs_mount *mp = ip->i_mount; struct xfs_dquot *uq = NULL; struct xfs_dquot *gq = NULL; struct xfs_dquot *pq = NULL; int error; uint lockflags; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return 0; lockflags = XFS_ILOCK_EXCL; xfs_ilock(ip, lockflags); if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) gid = ip->i_d.di_gid; /* * Attach the dquot(s) to this inode, doing a dquot allocation * if necessary. The dquot(s) will not be locked. */ if (XFS_NOT_DQATTACHED(mp, ip)) { error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); if (error) { xfs_iunlock(ip, lockflags); return error; } } if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { if (ip->i_d.di_uid != uid) { /* * What we need is the dquot that has this uid, and * if we send the inode to dqget, the uid of the inode * takes priority over what's sent in the uid argument. * We must unlock inode here before calling dqget if * we're not sending the inode, because otherwise * we'll deadlock by doing trans_reserve while * holding ilock. */ xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, NULL, uid, XFS_DQ_USER, XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &uq); if (error) { ASSERT(error != ENOENT); return error; } /* * Get the ilock in the right order. */ xfs_dqunlock(uq); lockflags = XFS_ILOCK_SHARED; xfs_ilock(ip, lockflags); } else { /* * Take an extra reference, because we'll return * this to caller */ ASSERT(ip->i_udquot); uq = xfs_qm_dqhold(ip->i_udquot); } } if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { if (ip->i_d.di_gid != gid) { xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, NULL, gid, XFS_DQ_GROUP, XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &gq); if (error) { ASSERT(error != ENOENT); goto error_rele; } xfs_dqunlock(gq); lockflags = XFS_ILOCK_SHARED; xfs_ilock(ip, lockflags); } else { ASSERT(ip->i_gdquot); gq = xfs_qm_dqhold(ip->i_gdquot); } } if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { if (xfs_get_projid(ip) != prid) { xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, XFS_DQ_PROJ, XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &pq); if (error) { ASSERT(error != ENOENT); goto error_rele; } xfs_dqunlock(pq); lockflags = XFS_ILOCK_SHARED; xfs_ilock(ip, lockflags); } else { ASSERT(ip->i_pdquot); pq = xfs_qm_dqhold(ip->i_pdquot); } } if (uq) trace_xfs_dquot_dqalloc(ip); xfs_iunlock(ip, lockflags); if (O_udqpp) *O_udqpp = uq; else if (uq) xfs_qm_dqrele(uq); if (O_gdqpp) *O_gdqpp = gq; else if (gq) xfs_qm_dqrele(gq); if (O_pdqpp) *O_pdqpp = pq; else if (pq) xfs_qm_dqrele(pq); return 0; error_rele: if (gq) xfs_qm_dqrele(gq); if (uq) xfs_qm_dqrele(uq); return error; } /* * Actually transfer ownership, and do dquot modifications. * These were already reserved. */ xfs_dquot_t * xfs_qm_vop_chown( xfs_trans_t *tp, xfs_inode_t *ip, xfs_dquot_t **IO_olddq, xfs_dquot_t *newdq) { xfs_dquot_t *prevdq; uint bfield = XFS_IS_REALTIME_INODE(ip) ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); /* old dquot */ prevdq = *IO_olddq; ASSERT(prevdq); ASSERT(prevdq != newdq); xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); /* the sparkling new dquot */ xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); /* * Take an extra reference, because the inode is going to keep * this dquot pointer even after the trans_commit. */ *IO_olddq = xfs_qm_dqhold(newdq); return prevdq; } /* * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). */ int xfs_qm_vop_chown_reserve( struct xfs_trans *tp, struct xfs_inode *ip, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, uint flags) { struct xfs_mount *mp = ip->i_mount; uint delblks, blkflags, prjflags = 0; struct xfs_dquot *udq_unres = NULL; struct xfs_dquot *gdq_unres = NULL; struct xfs_dquot *pdq_unres = NULL; struct xfs_dquot *udq_delblks = NULL; struct xfs_dquot *gdq_delblks = NULL; struct xfs_dquot *pdq_delblks = NULL; int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); delblks = ip->i_delayed_blks; blkflags = XFS_IS_REALTIME_INODE(ip) ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; if (XFS_IS_UQUOTA_ON(mp) && udqp && ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) { udq_delblks = udqp; /* * If there are delayed allocation blocks, then we have to * unreserve those from the old dquot, and add them to the * new dquot. */ if (delblks) { ASSERT(ip->i_udquot); udq_unres = ip->i_udquot; } } if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) { gdq_delblks = gdqp; if (delblks) { ASSERT(ip->i_gdquot); gdq_unres = ip->i_gdquot; } } if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) { prjflags = XFS_QMOPT_ENOSPC; pdq_delblks = pdqp; if (delblks) { ASSERT(ip->i_pdquot); pdq_unres = ip->i_pdquot; } } error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, udq_delblks, gdq_delblks, pdq_delblks, ip->i_d.di_nblocks, 1, flags | blkflags | prjflags); if (error) return error; /* * Do the delayed blks reservations/unreservations now. Since, these * are done without the help of a transaction, if a reservation fails * its previous reservations won't be automatically undone by trans * code. So, we have to do it manually here. */ if (delblks) { /* * Do the reservations first. Unreservation can't fail. */ ASSERT(udq_delblks || gdq_delblks || pdq_delblks); ASSERT(udq_unres || gdq_unres || pdq_unres); error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, udq_delblks, gdq_delblks, pdq_delblks, (xfs_qcnt_t)delblks, 0, flags | blkflags | prjflags); if (error) return error; xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, udq_unres, gdq_unres, pdq_unres, -((xfs_qcnt_t)delblks), 0, blkflags); } return (0); } int xfs_qm_vop_rename_dqattach( struct xfs_inode **i_tab) { struct xfs_mount *mp = i_tab[0]->i_mount; int i; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return 0; for (i = 0; (i < 4 && i_tab[i]); i++) { struct xfs_inode *ip = i_tab[i]; int error; /* * Watch out for duplicate entries in the table. */ if (i == 0 || ip != i_tab[i-1]) { if (XFS_NOT_DQATTACHED(mp, ip)) { error = xfs_qm_dqattach(ip, 0); if (error) return error; } } } return 0; } void xfs_qm_vop_create_dqattach( struct xfs_trans *tp, struct xfs_inode *ip, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp) { struct xfs_mount *mp = tp->t_mountp; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if (udqp && XFS_IS_UQUOTA_ON(mp)) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); ip->i_udquot = xfs_qm_dqhold(udqp); xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); } if (gdqp && XFS_IS_GQUOTA_ON(mp)) { ASSERT(ip->i_gdquot == NULL); ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); ip->i_gdquot = xfs_qm_dqhold(gdqp); xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); } if (pdqp && XFS_IS_PQUOTA_ON(mp)) { ASSERT(ip->i_pdquot == NULL); ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); ip->i_pdquot = xfs_qm_dqhold(pdqp); xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); } }
gpl-2.0
nandra/omap_850_kernel
net/ipv4/ipvs/ip_vs_nq.c
143
3800
/* * IPVS: Never Queue scheduling module * * Version: $Id: ip_vs_nq.c,v 1.2 2003/06/08 09:31:19 wensong Exp $ * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * */ /* * The NQ algorithm adopts a two-speed model. When there is an idle server * available, the job will be sent to the idle server, instead of waiting * for a fast one. When there is no idle server available, the job will be * sent to the server that minimize its expected delay (The Shortest * Expected Delay scheduling algorithm). * * See the following paper for more information: * A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing * in large heterogeneous systems. In Proceedings IEEE INFOCOM'88, * pages 986-994, 1988. * * Thanks must go to Marko Buuri <marko@buuri.name> for talking NQ to me. * * The difference between NQ and SED is that NQ can improve overall * system utilization. * */ #include <linux/module.h> #include <linux/kernel.h> #include <net/ip_vs.h> static int ip_vs_nq_init_svc(struct ip_vs_service *svc) { return 0; } static int ip_vs_nq_done_svc(struct ip_vs_service *svc) { return 0; } static int ip_vs_nq_update_svc(struct ip_vs_service *svc) { return 0; } static inline unsigned int ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) { /* * We only use the active connection number in the cost * calculation here. */ return atomic_read(&dest->activeconns) + 1; } /* * Weighted Least Connection scheduling */ static struct ip_vs_dest * ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_dest *dest, *least = NULL; unsigned int loh = 0, doh; IP_VS_DBG(6, "ip_vs_nq_schedule(): Scheduling...\n"); /* * We calculate the load of each dest server as follows: * (server expected overhead) / dest->weight * * Remember -- no floats in kernel mode!!! * The comparison of h1*w2 > h2*w1 is equivalent to that of * h1/w1 > h2/w2 * if every weight is larger than zero. * * The server with weight=0 is quiesced and will not receive any * new connections. */ list_for_each_entry(dest, &svc->destinations, n_list) { if (dest->flags & IP_VS_DEST_F_OVERLOAD || !atomic_read(&dest->weight)) continue; doh = ip_vs_nq_dest_overhead(dest); /* return the server directly if it is idle */ if (atomic_read(&dest->activeconns) == 0) { least = dest; loh = doh; goto out; } if (!least || (loh * atomic_read(&dest->weight) > doh * atomic_read(&least->weight))) { least = dest; loh = doh; } } if (!least) return NULL; out: IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u " "activeconns %d refcnt %d weight %d overhead %d\n", NIPQUAD(least->addr), ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->refcnt), atomic_read(&least->weight), loh); return least; } static struct ip_vs_scheduler ip_vs_nq_scheduler = { .name = "nq", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .init_service = ip_vs_nq_init_svc, .done_service = ip_vs_nq_done_svc, .update_service = ip_vs_nq_update_svc, .schedule = ip_vs_nq_schedule, }; static int __init ip_vs_nq_init(void) { INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list); return register_ip_vs_scheduler(&ip_vs_nq_scheduler); } static void __exit ip_vs_nq_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_nq_scheduler); } module_init(ip_vs_nq_init); module_exit(ip_vs_nq_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
GargoyleSoftware/voip-client-ios
submodules/build-armv7-apple-darwin/externals/openssl/crypto/jpake/jpake_err.c
143
4221
/* crypto/jpake/jpake_err.c */ /* ==================================================================== * Copyright (c) 1999-2008 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* NOTE: this file was auto generated by the mkerr.pl script: any changes * made to it will be overwritten when the script next updates this file, * only reason strings will be preserved. */ #include <stdio.h> #include <openssl/err.h> #include <openssl/jpake.h> /* BEGIN ERROR CODES */ #ifndef OPENSSL_NO_ERR #define ERR_FUNC(func) ERR_PACK(ERR_LIB_JPAKE,func,0) #define ERR_REASON(reason) ERR_PACK(ERR_LIB_JPAKE,0,reason) static ERR_STRING_DATA JPAKE_str_functs[]= { {ERR_FUNC(JPAKE_F_JPAKE_STEP1_PROCESS), "JPAKE_STEP1_process"}, {ERR_FUNC(JPAKE_F_JPAKE_STEP2_PROCESS), "JPAKE_STEP2_process"}, {ERR_FUNC(JPAKE_F_JPAKE_STEP3A_PROCESS), "JPAKE_STEP3A_process"}, {ERR_FUNC(JPAKE_F_JPAKE_STEP3B_PROCESS), "JPAKE_STEP3B_process"}, {ERR_FUNC(JPAKE_F_VERIFY_ZKP), "VERIFY_ZKP"}, {0,NULL} }; static ERR_STRING_DATA JPAKE_str_reasons[]= { {ERR_REASON(JPAKE_R_G_TO_THE_X4_IS_ONE) ,"g to the x4 is one"}, {ERR_REASON(JPAKE_R_HASH_OF_HASH_OF_KEY_MISMATCH),"hash of hash of key mismatch"}, {ERR_REASON(JPAKE_R_HASH_OF_KEY_MISMATCH),"hash of key mismatch"}, {ERR_REASON(JPAKE_R_VERIFY_B_FAILED) ,"verify b failed"}, {ERR_REASON(JPAKE_R_VERIFY_X3_FAILED) ,"verify x3 failed"}, {ERR_REASON(JPAKE_R_VERIFY_X4_FAILED) ,"verify x4 failed"}, {ERR_REASON(JPAKE_R_ZKP_VERIFY_FAILED) ,"zkp verify failed"}, {0,NULL} }; #endif void ERR_load_JPAKE_strings(void) { #ifndef OPENSSL_NO_ERR if (ERR_func_error_string(JPAKE_str_functs[0].error) == NULL) { ERR_load_strings(0,JPAKE_str_functs); ERR_load_strings(0,JPAKE_str_reasons); } #endif }
gpl-2.0
yiyiadam/u-boot-2012.04.01
board/esd/hh405/logo_320_240_8bpp.c
143
50010
0x1f, 0x8b, 0x08, 0x08, 0x72, 0xd1, 0xe2, 0x40, 0x00, 0x03, 0x48, 0x6f, 0x6c, 0x7a, 0x2d, 0x48, 0x65, 0x72, 0x5f, 0x64, 0x74, 0x5f, 0x33, 0x43, 0x5f, 0x33, 0x32, 0x30, 0x78, 0x32, 0x34, 0x30, 0x5f, 0x38, 0x62, 0x70, 0x70, 0x2e, 0x62, 0x6d, 0x70, 0x00, 0xec, 0x5d, 0x0f, 0x6c, 0x1b, 0xd7, 0x79, 0xff, 0x12, 0x09, 0xc5, 0xc4, 0xd2, 0x01, 0x26, 0x0a, 0x03, 0x56, 0x88, 0x1e, 0x59, 0xa9, 0x14, 0x43, 0xa8, 0xb6, 0x10, 0x08, 0x72, 0x4d, 0x10, 0xf4, 0xe6, 0x28, 0x09, 0x90, 0x2a, 0x02, 0x1c, 0x69, 0xe2, 0x98, 0x6a, 0xbc, 0xcd, 0xa3, 0x38, 0x33, 0x64, 0x4c, 0xd7, 0x54, 0xe4, 0x36, 0xd3, 0x6c, 0xd4, 0x70, 0x9a, 0xc0, 0xf0, 0xcc, 0xce, 0x73, 0xb3, 0x2d, 0xd1, 0x42, 0x25, 0xd0, 0x1c, 0x29, 0x11, 0x27, 0x42, 0x15, 0x25, 0x81, 0xf3, 0x5a, 0x36, 0x54, 0x77, 0x66, 0xa8, 0x0e, 0x72, 0xed, 0x25, 0x59, 0x24, 0xb4, 0x69, 0xfe, 0xac, 0x68, 0xa7, 0x85, 0x4a, 0x42, 0xbb, 0x89, 0x77, 0x20, 0xf6, 0xbd, 0x3f, 0x47, 0x1e, 0x29, 0xc9, 0x8a, 0x5d, 0x2b, 0x8e, 0x8b, 0xf7, 0x3b, 0xfe, 0xb9, 0xf7, 0xee, 0xbd, 0xef, 0xde, 0xfb, 0xbd, 0xdf, 0xfb, 0xbe, 0x77, 0xf4, 0x9d, 0xbc, 0xfb, 0x81, 0xb6, 0x96, 0xdb, 0x80, 0xa0, 0xad, 0x1a, 0xc0, 0x8a, 0xdf, 0x7f, 0x88, 0xc9, 0x15, 0xfc, 0xbe, 0x0d, 0x7e, 0x87, 0xe6, 0x43, 0xf3, 0x6d, 0xd0, 0xb0, 0x0d, 0xe8, 0x1b, 0x58, 0x51, 0xa8, 0xbe, 0xbd, 0x1a, 0x5a, 0x8f, 0x2e, 0x43, 0xfd, 0xee, 0x41, 0x18, 0x7a, 0xfa, 0x9f, 0xa0, 0xfb, 0xc1, 0x6e, 0x18, 0xf9, 0x8f, 0x2b, 0xd0, 0xbc, 0x77, 0x04, 0xbc, 0x67, 0x3e, 0x84, 0xba, 0x2f, 0xee, 0x84, 0xc1, 0xa3, 0xc7, 0xe0, 0xe2, 0xc5, 0x57, 0x61, 0x64, 0x7a, 0x1e, 0x9a, 0xef, 0x6c, 0x86, 0xfa, 0x7b, 0x9e, 0x84, 0xc6, 0xbd, 0x73, 0xd0, 0xf1, 0xd4, 0x7b, 0x70, 0xf1, 0x57, 0x0a, 0x34, 0x1e, 0xbc, 0x08, 0x75, 0x4d, 0x3d, 0x70, 0xf6, 0xdf, 0x52, 0xa0, 0xaf, 0x33, 0x43, 0xf7, 0xc1, 0x13, 0xd0, 0xdc, 0xf5, 0x04, 0x1c, 0x4b, 0xe6, 0xa1, 0xfd, 0x1b, 0x67, 0xa1, 0xfb, 0xe8, 0x1c, 0xcc, 0x2d, 0x2c, 0x41, 0xf0, 0xd9, 0x25, 0x70, 0xdc, 0x7d, 0x2f, 0x34, 0x3e, 0xfa, 0x36, 0xd4, 0x7f, 0xc5, 0x0b, 0x43, 0x2f, 0x9e, 0x85, 0xe5, 0xf7, 0x72, 0x50, 0xff, 0x85, 0x7a, 0x38, 0x36, 0x7a, 0x11, 0x96, 0x7e, 0xfe, 0x36, 0x0c, 0xfd, 0xeb, 0xcf, 0xa0, 0xe3, 0xe4, 0x12, 0x9c, 0xf8, 0x9b, 0x53, 0xd0, 0xfb, 0xe7, 0x7b, 0xa1, 0xb5, 0x6d, 0x07, 0xe8, 0x7f, 0x7f, 0x3b, 0x04, 0x1f, 0x09, 0xc1, 0xd9, 0x9f, 0x2a, 0xd0, 0xf1, 0xe0, 0x1f, 0x43, 0xfd, 0x17, 0x9b, 0xe0, 0xec, 0xbf, 0x5f, 0x84, 0xc6, 0x9e, 0x97, 0xa0, 0xf1, 0xb1, 0x65, 0xe8, 0x9f, 0xcc, 0x43, 0xfd, 0xfd, 0x27, 0xa0, 0xf7, 0x1f, 0xde, 0x84, 0xe5, 0xcb, 0x1f, 0x43, 0xfb, 0xe3, 0xff, 0x05, 0x8d, 0x0f, 0xcf, 0x43, 0xf5, 0xe7, 0x6b, 0xa1, 0xdd, 0x77, 0x02, 0xea, 0xda, 0x82, 0xf0, 0xce, 0xbb, 0xef, 0xc2, 0xd0, 0xb9, 0x8f, 0xa1, 0xff, 0xd0, 0x37, 0xe1, 0xec, 0x1b, 0x0a, 0x8c, 0xbf, 0x14, 0x83, 0xa1, 0xe7, 0x47, 0xa0, 0x7f, 0xf4, 0x2d, 0xe8, 0x3f, 0x79, 0x16, 0xea, 0xb6, 0xf5, 0x42, 0xfb, 0xfd, 0x5f, 0x85, 0xba, 0x2f, 0x98, 0xe0, 0xc4, 0xcb, 0x79, 0xe8, 0xf6, 0x61, 0x1f, 0xbf, 0x36, 0x0e, 0xcd, 0x9d, 0xc7, 0xc0, 0x71, 0x7c, 0x19, 0xa6, 0x5e, 0x55, 0xa0, 0x75, 0xff, 0x14, 0x9c, 0x7d, 0x79, 0x0e, 0x2e, 0xbe, 0xfb, 0x31, 0x38, 0xec, 0x0e, 0xa8, 0xb7, 0x39, 0xa0, 0xf5, 0xaf, 0xdf, 0xc4, 0x73, 0x9f, 0x06, 0xef, 0xb3, 0x6f, 0x42, 0xdd, 0x1d, 0x75, 0x30, 0xfe, 0xca, 0x0a, 0xb4, 0x3e, 0x18, 0x84, 0xee, 0x1e, 0x17, 0x34, 0x6f, 0xdb, 0x06, 0xad, 0x07, 0xd1, 0xb6, 0xa1, 0x0e, 0xbc, 0x78, 0x8e, 0xf9, 0x57, 0xb1, 0x5f, 0xa7, 0xfe, 0x0e, 0x7a, 0x7d, 0x41, 0x50, 0x0a, 0x0a, 0xe8, 0xf5, 0x7a, 0x70, 0x1c, 0xfd, 0x29, 0x4c, 0xfd, 0xe4, 0x23, 0x70, 0x3c, 0x3c, 0x0e, 0x75, 0xb6, 0x76, 0x38, 0xf6, 0xed, 0xe3, 0x70, 0xf1, 0x8d, 0x45, 0x98, 0x9a, 0x49, 0x42, 0x73, 0xdf, 0x59, 0x78, 0xe7, 0x57, 0xbf, 0x84, 0xf9, 0x85, 0xf3, 0x70, 0xec, 0x29, 0x6c, 0xcb, 0xc0, 0xcf, 0x61, 0xf9, 0x83, 0x0f, 0x40, 0xbf, 0x75, 0x07, 0x34, 0xda, 0xb6, 0xc1, 0xfc, 0x2b, 0x3f, 0x86, 0xc6, 0x47, 0xfe, 0x13, 0xea, 0x9d, 0x21, 0x18, 0x79, 0xe5, 0xff, 0x60, 0xf0, 0xf0, 0xb7, 0x60, 0x7c, 0x72, 0x0a, 0x86, 0xce, 0x4c, 0x41, 0xdd, 0x5d, 0x5e, 0x68, 0xfc, 0xd3, 0x59, 0xe8, 0x8d, 0x7e, 0x08, 0xcd, 0xbb, 0xbd, 0xd0, 0x8a, 0xf6, 0x83, 0xff, 0x92, 0x07, 0xbd, 0xa5, 0x1d, 0xba, 0x9f, 0xcd, 0x43, 0x63, 0xd7, 0x3f, 0x42, 0xc7, 0xbd, 0x1d, 0xd0, 0xe8, 0xfc, 0x13, 0xe8, 0xde, 0x3f, 0x04, 0xed, 0x78, 0xfc, 0xd8, 0x74, 0x1e, 0x8e, 0x3d, 0x3f, 0x0f, 0x8e, 0x07, 0x07, 0xa1, 0xfb, 0x3b, 0x6f, 0x82, 0xf7, 0xe1, 0x20, 0x78, 0x8f, 0x4e, 0xc1, 0xe0, 0x53, 0x67, 0xa1, 0xfd, 0xa1, 0x7e, 0x70, 0x1c, 0x5e, 0x82, 0x66, 0x67, 0x37, 0xb4, 0x23, 0x1f, 0xc7, 0xbe, 0x33, 0x0a, 0xa7, 0xff, 0xfe, 0x69, 0x98, 0x7f, 0x5b, 0x81, 0xd3, 0x67, 0xe6, 0x60, 0xf0, 0xf8, 0x10, 0x0c, 0x4d, 0xce, 0x41, 0xeb, 0xfd, 0xdf, 0x80, 0xd3, 0x49, 0x1c, 0x13, 0x3c, 0x57, 0xeb, 0x63, 0x17, 0xe1, 0x9d, 0x0f, 0x14, 0xac, 0x37, 0x0f, 0x43, 0x2f, 0x2f, 0x43, 0xf0, 0x60, 0x3f, 0x74, 0x1c, 0x47, 0x1b, 0x7d, 0x53, 0xd0, 0x7e, 0x12, 0x35, 0xb7, 0xf5, 0x0f, 0xc0, 0xe1, 0x74, 0x42, 0xff, 0xd1, 0x21, 0xe8, 0x3e, 0x7e, 0x11, 0xba, 0x07, 0xc6, 0xe1, 0x9d, 0xf7, 0x50, 0x4f, 0xbb, 0x83, 0x30, 0x3e, 0x3d, 0x07, 0x8d, 0xf7, 0x9e, 0x00, 0xef, 0x5e, 0x2f, 0xcc, 0x9d, 0x7b, 0x05, 0x3a, 0x8e, 0xa2, 0xf6, 0x02, 0x3f, 0x80, 0x8e, 0xbd, 0x83, 0xd0, 0xd1, 0xf1, 0x00, 0x9c, 0x4e, 0x7f, 0x08, 0xbd, 0x0f, 0xf5, 0x42, 0xeb, 0x5d, 0xad, 0xd0, 0x7b, 0xea, 0x22, 0x1c, 0x3b, 0x7e, 0x02, 0xda, 0x0f, 0x4e, 0xc1, 0xc8, 0x8b, 0xe3, 0x30, 0xf7, 0xf2, 0x8f, 0xe0, 0xf4, 0xa9, 0xef, 0xc2, 0xd2, 0x1b, 0x4b, 0x90, 0xff, 0x20, 0x0f, 0x75, 0x5b, 0xb7, 0x83, 0xfe, 0x73, 0x7a, 0x18, 0x3f, 0x7f, 0x05, 0x82, 0x2f, 0xe2, 0xd8, 0x3e, 0x8d, 0x9c, 0x84, 0xcf, 0xe3, 0xf8, 0x61, 0x5f, 0xee, 0xb9, 0x07, 0x1a, 0xed, 0x7f, 0x06, 0x23, 0xc9, 0x25, 0x68, 0xfc, 0xd2, 0x97, 0xa0, 0xf9, 0xaf, 0x96, 0x61, 0xf0, 0x7b, 0x39, 0x68, 0x7c, 0xe0, 0x49, 0xc8, 0x7f, 0x74, 0x19, 0x75, 0x7a, 0x00, 0x06, 0x1f, 0x1b, 0x84, 0xb9, 0x37, 0xf2, 0x30, 0x32, 0x32, 0x02, 0xf5, 0x77, 0xf5, 0x42, 0xb3, 0x67, 0x14, 0x1c, 0x07, 0xc7, 0x61, 0xe9, 0xbf, 0x91, 0xc7, 0xaf, 0x7e, 0x17, 0x46, 0xce, 0x8c, 0x83, 0xf7, 0xb9, 0xf7, 0xa1, 0xae, 0xf1, 0x7e, 0xe8, 0xf0, 0x0c, 0x42, 0xab, 0x6f, 0x1c, 0x3a, 0x4e, 0xfd, 0x2f, 0xd4, 0xdf, 0x7b, 0x1a, 0x1a, 0x7d, 0xf3, 0xb0, 0x84, 0x73, 0xe4, 0xc4, 0x59, 0x2c, 0x3b, 0xb0, 0x0c, 0x4b, 0x6f, 0x2d, 0xc3, 0xdc, 0xcf, 0x14, 0xe8, 0x7f, 0x11, 0xfb, 0xbf, 0xad, 0x03, 0x5a, 0x0f, 0x2f, 0x43, 0x6b, 0x0f, 0xf2, 0xfd, 0x90, 0x17, 0xf5, 0x82, 0x7a, 0x7a, 0x74, 0x1e, 0xea, 0x7e, 0xaf, 0x11, 0xbc, 0xa7, 0xe6, 0x41, 0x7f, 0x47, 0x3d, 0x38, 0xbe, 0xbd, 0x0c, 0xf3, 0x3f, 0x59, 0x82, 0x63, 0xa7, 0xa6, 0x90, 0x8f, 0x41, 0x68, 0xde, 0x3f, 0x0f, 0xa7, 0x7f, 0x84, 0x5a, 0xdc, 0x3b, 0x04, 0x5e, 0x9f, 0x0f, 0xa6, 0x26, 0x13, 0xd8, 0xa6, 0x20, 0x9c, 0x98, 0x5e, 0x86, 0xf6, 0x07, 0xfb, 0xa1, 0x1f, 0x39, 0x6e, 0xdc, 0xda, 0x08, 0xed, 0xce, 0x76, 0xd4, 0xea, 0x3c, 0x6a, 0xe9, 0x1d, 0xd4, 0x8c, 0x82, 0x1a, 0x19, 0x84, 0x11, 0x1c, 0x9f, 0xea, 0xcf, 0xe1, 0x7c, 0x47, 0x9b, 0x43, 0xcf, 0x3e, 0x0f, 0x5e, 0xe4, 0xc2, 0x23, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0xf0, 0x19, 0xc2, 0x58, 0x2c, 0x16, 0x73, 0xde, 0xec, 0x46, 0xdc, 0xc2, 0x70, 0x02, 0x40, 0xef, 0xcd, 0x6e, 0xc4, 0x2d, 0x0c, 0xc2, 0xdf, 0xf4, 0xcd, 0x6e, 0xc4, 0x2d, 0x8c, 0x7e, 0xe4, 0xcf, 0x72, 0xb3, 0x1b, 0x71, 0x0b, 0x43, 0xcc, 0xdf, 0x6b, 0x46, 0x70, 0xc6, 0xef, 0xd7, 0xed, 0xe3, 0x09, 0x36, 0x7f, 0xe5, 0x86, 0x90, 0xce, 0x54, 0x2a, 0x21, 0x37, 0x60, 0x09, 0x9e, 0x96, 0x65, 0xf5, 0x45, 0x53, 0xec, 0x0b, 0x77, 0xfa, 0x42, 0x0d, 0x6c, 0x47, 0x9b, 0x2d, 0xab, 0x47, 0x3d, 0x95, 0x3b, 0x26, 0x9d, 0xdf, 0x54, 0x2c, 0x8e, 0xa9, 0x90, 0xdf, 0xae, 0x39, 0x5f, 0x5f, 0xc8, 0x3f, 0x63, 0xd2, 0x16, 0x57, 0x2d, 0x69, 0x0c, 0x55, 0x18, 0xf4, 0x78, 0x66, 0x49, 0xad, 0x75, 0x4f, 0xb8, 0x59, 0xc8, 0xa6, 0x2e, 0x49, 0xa0, 0x48, 0xee, 0x54, 0x81, 0x9e, 0x88, 0xce, 0x5f, 0x43, 0xa7, 0xa4, 0x48, 0xd1, 0x3c, 0xe7, 0x74, 0x9f, 0x57, 0xdf, 0x95, 0x04, 0xc9, 0x1c, 0x9b, 0xc2, 0x36, 0x5a, 0xf5, 0x69, 0x75, 0x1b, 0x96, 0x3d, 0x3a, 0x57, 0x5a, 0xef, 0x25, 0xad, 0xac, 0xd2, 0x4b, 0x49, 0x69, 0xd8, 0x20, 0xc7, 0xf5, 0x69, 0xa7, 0xc7, 0xa3, 0xd3, 0xb3, 0x6c, 0x9b, 0x3e, 0x1d, 0x27, 0xb4, 0x98, 0xac, 0x7a, 0x7d, 0x8e, 0x98, 0x72, 0x62, 0xb5, 0x15, 0xfc, 0xb6, 0x8f, 0xba, 0x93, 0x8a, 0x39, 0x67, 0x5f, 0x49, 0xeb, 0xad, 0x41, 0x8f, 0xc1, 0xea, 0xc6, 0xf3, 0x75, 0x75, 0xb2, 0x16, 0x78, 0xec, 0xf9, 0xb4, 0xa4, 0x28, 0x92, 0xb9, 0xe0, 0x99, 0x71, 0xe9, 0xd3, 0x19, 0x9a, 0xd7, 0xa4, 0xd7, 0x5b, 0x4d, 0xc4, 0xb0, 0x3e, 0x4d, 0x0c, 0x67, 0xd2, 0x7a, 0x17, 0x1d, 0x2e, 0xbf, 0x5e, 0xaf, 0x9f, 0xa4, 0x25, 0xfc, 0xa3, 0x51, 0x29, 0xa9, 0x48, 0xe9, 0xd1, 0x10, 0x39, 0x0f, 0x36, 0xc0, 0x49, 0xb3, 0xe3, 0x69, 0x7d, 0xcf, 0xa6, 0xf2, 0x67, 0x03, 0x15, 0x29, 0xd2, 0x55, 0xa2, 0xbf, 0x94, 0xc4, 0x32, 0xe2, 0x54, 0x21, 0xa1, 0xb4, 0x5a, 0x40, 0xb2, 0x7b, 0x82, 0xee, 0x62, 0x71, 0x30, 0xcb, 0x1e, 0x7f, 0x12, 0xc0, 0x46, 0x8c, 0x28, 0xac, 0x40, 0x04, 0x6b, 0x62, 0xbb, 0xfd, 0x0a, 0xcb, 0xae, 0xc6, 0x2c, 0xd2, 0x4d, 0x3b, 0xd6, 0x8a, 0x11, 0x5b, 0x11, 0x2c, 0x94, 0xc5, 0xf4, 0x7e, 0x66, 0xc1, 0x95, 0xa1, 0x56, 0xb2, 0xaa, 0xc5, 0x51, 0x59, 0x7b, 0xbe, 0x69, 0x8f, 0x0e, 0xcd, 0x8f, 0xd2, 0x56, 0xa6, 0x00, 0xdc, 0xd8, 0x3c, 0x3f, 0x30, 0xc3, 0xa3, 0x68, 0x78, 0x86, 0xe4, 0x8f, 0xe1, 0x99, 0xf2, 0x64, 0xa7, 0x4a, 0x52, 0x8d, 0x0c, 0x63, 0x6a, 0x17, 0x7e, 0x17, 0x48, 0xb6, 0x8c, 0x67, 0xee, 0xdc, 0x54, 0xfe, 0xbc, 0xd8, 0x14, 0xb3, 0x39, 0x49, 0xce, 0x1c, 0xc3, 0xe6, 0x3b, 0x29, 0x13, 0xee, 0xcb, 0x7a, 0xf2, 0x3d, 0x41, 0xfa, 0x6e, 0xc6, 0x9d, 0x74, 0x93, 0xad, 0x3a, 0x4d, 0xb8, 0x30, 0x11, 0xfe, 0x14, 0xe9, 0x17, 0x12, 0x22, 0x2d, 0x7b, 0x42, 0x8c, 0xa8, 0x04, 0x56, 0x97, 0xaa, 0x6d, 0xc3, 0x49, 0x62, 0xc6, 0x87, 0x14, 0x00, 0xeb, 0x15, 0xf2, 0xd7, 0x55, 0xc6, 0x5f, 0x15, 0xe3, 0x6f, 0x94, 0x98, 0x1c, 0x1d, 0x45, 0x8b, 0xf8, 0x2d, 0x7b, 0x7c, 0x49, 0x7d, 0x93, 0x8d, 0x0e, 0x5a, 0x15, 0x96, 0x25, 0xf4, 0x25, 0xf5, 0x29, 0xbd, 0x54, 0xf0, 0xe8, 0xd0, 0x7c, 0xae, 0xc8, 0x1f, 0x4e, 0x07, 0x1d, 0xe7, 0xaf, 0x09, 0xcf, 0xa7, 0xa3, 0xfc, 0xf1, 0x33, 0x91, 0x66, 0x27, 0xad, 0x39, 0x5b, 0xca, 0x4d, 0xf9, 0x2b, 0x14, 0xf9, 0xeb, 0x42, 0xfe, 0x36, 0x75, 0x06, 0x67, 0x63, 0x59, 0xbb, 0x2c, 0xeb, 0x72, 0x84, 0xb6, 0x04, 0xd3, 0x1f, 0x51, 0xa2, 0x6c, 0x49, 0xb2, 0x36, 0xa2, 0x3e, 0x95, 0xcc, 0x2c, 0x69, 0x8a, 0xa1, 0xda, 0x4e, 0x29, 0x01, 0xf3, 0x98, 0x9d, 0x02, 0x89, 0x62, 0xfa, 0xab, 0xc1, 0x4c, 0x32, 0x2d, 0x0b, 0x84, 0x83, 0x0e, 0x8f, 0x4a, 0xab, 0xa7, 0x99, 0x6a, 0x76, 0x95, 0xfe, 0x64, 0x3d, 0x80, 0x1e, 0xb5, 0x6d, 0x22, 0xc6, 0xf4, 0x78, 0x38, 0xe1, 0xe1, 0x7d, 0xc6, 0x42, 0x0e, 0x72, 0x02, 0x03, 0x76, 0x79, 0xa6, 0xc1, 0xe3, 0x97, 0xb4, 0xfc, 0xad, 0xa5, 0x3f, 0x32, 0x72, 0xc8, 0x9f, 0x8c, 0xa4, 0xbb, 0xfb, 0x49, 0x86, 0xa9, 0x2a, 0xa7, 0xe5, 0xcf, 0x23, 0x6d, 0xb6, 0xfe, 0xd4, 0xc1, 0x39, 0x82, 0xa7, 0xac, 0x61, 0x03, 0xe9, 0xd6, 0xa9, 0x19, 0x16, 0x8f, 0xc9, 0xcc, 0x74, 0xa9, 0x16, 0x35, 0xc5, 0x31, 0xfb, 0x72, 0x90, 0x57, 0xe2, 0x13, 0x35, 0xc3, 0xe7, 0xa9, 0x67, 0x02, 0x0f, 0x8e, 0xb1, 0x6e, 0x72, 0xfd, 0x49, 0x6b, 0xe8, 0x4f, 0xbe, 0x04, 0xd0, 0x4c, 0x92, 0x0d, 0x6e, 0xca, 0x1f, 0x03, 0xd1, 0x56, 0x5c, 0x9e, 0x4d, 0xf3, 0xb1, 0xa0, 0x59, 0x65, 0xf3, 0x57, 0xa3, 0x3f, 0xc2, 0x5f, 0x1f, 0xc9, 0xe7, 0xf3, 0xd7, 0xc7, 0xb4, 0x5b, 0x44, 0x99, 0xfe, 0xac, 0x37, 0x92, 0xae, 0x75, 0x11, 0x06, 0x3a, 0xd6, 0x4e, 0xe0, 0x5d, 0xa5, 0x6d, 0x68, 0xa2, 0x0d, 0xcc, 0x6a, 0xcb, 0xcd, 0xa4, 0xf9, 0xc4, 0x26, 0xe0, 0xfa, 0xcb, 0xaa, 0x8b, 0x6e, 0xb2, 0xe3, 0x2f, 0x66, 0xaf, 0x9e, 0xbf, 0xdc, 0xff, 0x21, 0x1b, 0x66, 0x2a, 0x9f, 0x18, 0x52, 0xc6, 0x2c, 0xc9, 0xba, 0x18, 0x3d, 0x33, 0x61, 0x28, 0xad, 0x8e, 0x69, 0xf9, 0xfc, 0x35, 0x95, 0xf4, 0xd7, 0xa4, 0xf5, 0x7f, 0x0e, 0xda, 0x78, 0x77, 0x83, 0xa6, 0x91, 0xbb, 0xe0, 0x53, 0x9b, 0xbf, 0x1c, 0xb3, 0x2e, 0xa6, 0x19, 0x12, 0x7f, 0x59, 0xc8, 0x4b, 0x60, 0xd3, 0x52, 0x94, 0x4f, 0x0c, 0x76, 0x2e, 0x3d, 0xd9, 0xe2, 0xa4, 0x8d, 0x63, 0xc4, 0x05, 0x86, 0x59, 0x2d, 0xae, 0x3f, 0x13, 0x0a, 0x2a, 0xd9, 0x93, 0x35, 0x24, 0x08, 0x3f, 0xa1, 0x52, 0x37, 0x9b, 0xd7, 0xd6, 0x9f, 0x27, 0x8b, 0xd5, 0xa2, 0x19, 0x67, 0xa2, 0xe3, 0x08, 0xe7, 0x6f, 0x26, 0x6c, 0xa5, 0xfe, 0xbf, 0x8a, 0x2a, 0x49, 0x1d, 0x1d, 0x8f, 0x4e, 0xaa, 0x88, 0x1f, 0x5a, 0xfd, 0x31, 0xff, 0xc7, 0xe6, 0xef, 0x28, 0x71, 0x99, 0x2e, 0xde, 0xcc, 0x89, 0x4a, 0xff, 0xf7, 0x29, 0xb0, 0x67, 0xb0, 0x92, 0x78, 0xe1, 0x64, 0xfa, 0x9b, 0x2c, 0x36, 0x7e, 0xb8, 0x14, 0x1b, 0x69, 0x7c, 0xa5, 0x63, 0xee, 0xc4, 0x03, 0x49, 0xda, 0xba, 0xa2, 0xd0, 0x12, 0xe9, 0x52, 0x21, 0x3f, 0xf3, 0x7f, 0xea, 0xfc, 0x5d, 0xc3, 0xff, 0xa1, 0x5e, 0x08, 0x5b, 0x2c, 0x66, 0x13, 0xfe, 0x22, 0x2c, 0x78, 0x4a, 0x36, 0xd9, 0x43, 0xfe, 0x77, 0xbc, 0xd1, 0x22, 0x7f, 0x4a, 0xc5, 0xfc, 0xd5, 0xea, 0x8f, 0xf2, 0x97, 0x60, 0xa3, 0xde, 0xa3, 0x6d, 0x65, 0x67, 0xa5, 0xff, 0xdb, 0x6c, 0xfd, 0xd9, 0x2d, 0x71, 0x7a, 0x62, 0x57, 0x90, 0xf1, 0x47, 0x17, 0x04, 0x1e, 0x03, 0xee, 0x1d, 0xf1, 0xf8, 0xb0, 0x0b, 0xf1, 0xcb, 0x29, 0xdc, 0xd2, 0xac, 0x0b, 0x88, 0x5e, 0x64, 0xcd, 0x6d, 0x20, 0x7b, 0xea, 0x42, 0xc5, 0xb3, 0xcf, 0x12, 0x4b, 0xbb, 0xdd, 0x6e, 0x6c, 0xad, 0x12, 0x2a, 0xc5, 0x0f, 0xe4, 0x4f, 0x21, 0xdd, 0xdc, 0xa7, 0xae, 0x22, 0xb8, 0xfe, 0xb0, 0x84, 0xcd, 0x1a, 0xc5, 0xf2, 0x49, 0xca, 0xdf, 0x18, 0xa5, 0xcf, 0xdc, 0x64, 0xe0, 0x67, 0x8d, 0x15, 0xf9, 0x93, 0xd4, 0xc4, 0x6a, 0xfd, 0x91, 0x38, 0xc5, 0x26, 0x49, 0x9e, 0x06, 0xb9, 0xe4, 0x30, 0x36, 0x32, 0x75, 0xb9, 0x8b, 0xd6, 0x28, 0x30, 0x67, 0xfe, 0xa9, 0xe8, 0x6f, 0xd6, 0x62, 0x66, 0xe3, 0x96, 0x46, 0xe5, 0x50, 0xfe, 0xe2, 0x74, 0xc0, 0x2c, 0xb8, 0xf7, 0x84, 0x67, 0x46, 0x52, 0x47, 0x12, 0xdb, 0xe8, 0x62, 0x35, 0xe4, 0x3c, 0x1e, 0x8a, 0x12, 0x62, 0x54, 0xfd, 0xd1, 0x5c, 0x93, 0xdd, 0x8e, 0xb5, 0x93, 0xfe, 0x92, 0x4c, 0xaa, 0x99, 0xa6, 0x3d, 0x0d, 0x68, 0xc4, 0x4c, 0x16, 0x93, 0xaa, 0xfe, 0x08, 0x4c, 0x58, 0xbe, 0x99, 0xf6, 0x8e, 0x84, 0x6f, 0xbd, 0x93, 0x5d, 0xdf, 0x60, 0x51, 0xc5, 0xad, 0x5e, 0xfa, 0x10, 0xfd, 0x45, 0x69, 0x22, 0x5e, 0xee, 0xff, 0x08, 0x7f, 0x64, 0x1d, 0x4d, 0xa7, 0x3b, 0xfa, 0xbf, 0x69, 0xee, 0x68, 0x3d, 0x9e, 0x34, 0x4d, 0x17, 0xd4, 0x51, 0x30, 0x6d, 0xba, 0xfe, 0xe4, 0x1e, 0x36, 0x8f, 0xdc, 0x36, 0x7a, 0x01, 0x45, 0xe2, 0xaf, 0x42, 0xfa, 0x6c, 0x72, 0xb1, 0x39, 0x80, 0x5f, 0x29, 0xd2, 0x80, 0xa0, 0x5e, 0xf5, 0xe5, 0xa4, 0x0e, 0x51, 0xab, 0x49, 0xa3, 0x3f, 0x1d, 0x3b, 0x30, 0xc9, 0x26, 0xac, 0x3a, 0x7f, 0x91, 0x1e, 0x20, 0xab, 0xff, 0x84, 0x6a, 0x94, 0x2c, 0xd6, 0x71, 0x7d, 0xe8, 0xb1, 0xb3, 0x8b, 0x35, 0x19, 0x69, 0xa9, 0xf6, 0x78, 0xc8, 0x70, 0x44, 0x54, 0xd3, 0x9d, 0x25, 0xef, 0x4a, 0xe3, 0x2f, 0x3d, 0x42, 0x02, 0xf5, 0xa5, 0x60, 0xd9, 0xfa, 0x8f, 0xae, 0x80, 0xe8, 0x6a, 0xc7, 0xc2, 0x88, 0xa6, 0xbf, 0x7a, 0x24, 0x24, 0x7a, 0x82, 0x02, 0x56, 0x4c, 0x92, 0x0b, 0x11, 0x9f, 0x56, 0xce, 0x9b, 0x82, 0x08, 0xa5, 0x4f, 0xef, 0xe5, 0xe1, 0x8b, 0xae, 0xff, 0xdc, 0x91, 0x19, 0x3f, 0x5d, 0xe8, 0xd9, 0xe9, 0xd0, 0x02, 0x52, 0x6b, 0xc7, 0x26, 0x27, 0x71, 0x9d, 0xe6, 0x8b, 0x44, 0x22, 0x55, 0x11, 0xaf, 0xc4, 0xa2, 0xb5, 0xaa, 0xbf, 0xa0, 0xbe, 0xc7, 0x30, 0x2b, 0xcb, 0x2b, 0x66, 0x26, 0x5e, 0xad, 0xfe, 0x94, 0xf0, 0x8c, 0xff, 0x32, 0x31, 0x1a, 0x75, 0xda, 0x9d, 0x24, 0xf4, 0x90, 0x75, 0xc7, 0x4a, 0xd4, 0x8b, 0x57, 0xaa, 0xf6, 0x3c, 0xeb, 0x3d, 0x39, 0x05, 0x59, 0x72, 0xce, 0x38, 0x65, 0x16, 0xc2, 0x93, 0x39, 0xbf, 0xdd, 0x5f, 0x25, 0x53, 0x5a, 0xc0, 0x3d, 0xdd, 0x40, 0x63, 0xf3, 0x11, 0x4f, 0xb9, 0xfe, 0x20, 0x95, 0xb0, 0x67, 0xd1, 0xa0, 0x42, 0xa6, 0x3d, 0x16, 0xe8, 0x2a, 0xcc, 0xca, 0x21, 0x17, 0xe3, 0xb5, 0x40, 0xe7, 0x93, 0xd3, 0x9e, 0xd0, 0xab, 0x42, 0xdd, 0x2c, 0xd0, 0xf1, 0x8e, 0x3a, 0x8b, 0x12, 0x27, 0xfa, 0x8b, 0xaa, 0xbe, 0x9d, 0x9e, 0x99, 0x74, 0x3e, 0xd9, 0x9e, 0xe4, 0x7e, 0x5d, 0xe3, 0xa9, 0x63, 0x25, 0xfd, 0xcd, 0x46, 0xc9, 0x55, 0x0c, 0x75, 0x63, 0x64, 0x1d, 0xa3, 0x89, 0x1f, 0xee, 0x2e, 0x66, 0x2b, 0x39, 0xcc, 0x6b, 0xd1, 0xd5, 0xd8, 0x14, 0xe1, 0x85, 0x5e, 0xf4, 0x10, 0xaf, 0xd6, 0x40, 0x1c, 0x88, 0x62, 0xee, 0x52, 0xc8, 0x1c, 0x97, 0x27, 0xd4, 0x60, 0xb5, 0x8f, 0xfa, 0x3f, 0x37, 0x6b, 0x0a, 0xd3, 0xaf, 0x36, 0xfe, 0x76, 0xf1, 0x72, 0xd4, 0xdb, 0xe8, 0xc8, 0xc8, 0x48, 0xc4, 0x4e, 0x12, 0xaf, 0xd2, 0x29, 0x7f, 0xdc, 0x2b, 0x41, 0x97, 0x6e, 0x33, 0xf9, 0x23, 0xeb, 0xe3, 0xa4, 0xaf, 0x94, 0x26, 0xfa, 0xcb, 0xb2, 0xbe, 0x26, 0xd9, 0x74, 0xdd, 0x17, 0x63, 0x3d, 0x48, 0x4e, 0x04, 0x19, 0x25, 0x1a, 0xfe, 0x8a, 0xfa, 0x73, 0xf1, 0xbc, 0x64, 0x8e, 0x5e, 0xc2, 0x26, 0x8b, 0xeb, 0x97, 0x78, 0x96, 0x5e, 0x31, 0x2b, 0x36, 0x13, 0x33, 0xe3, 0xa6, 0xd7, 0x1a, 0x2b, 0xea, 0xd5, 0x6a, 0x3b, 0x5d, 0x2a, 0x1b, 0xa2, 0x9c, 0x32, 0x32, 0xad, 0x83, 0xa3, 0x3c, 0x1a, 0xeb, 0xa8, 0xfe, 0x9a, 0x32, 0x2c, 0x49, 0x0d, 0x6b, 0xe2, 0x6f, 0xd4, 0xc9, 0x08, 0x32, 0x53, 0x83, 0x9e, 0xc4, 0x7e, 0xd6, 0xca, 0x2e, 0xba, 0x0e, 0x25, 0xfc, 0xf5, 0xb3, 0xa0, 0x28, 0x45, 0x3c, 0x9b, 0x09, 0xca, 0x9f, 0x66, 0x84, 0x74, 0xe1, 0x4c, 0xb8, 0x6f, 0x76, 0x3a, 0xe6, 0xea, 0xa4, 0xe1, 0x90, 0x40, 0x5e, 0x69, 0xea, 0x74, 0x75, 0xda, 0x18, 0xc9, 0xd9, 0x4c, 0x26, 0xe3, 0x08, 0x67, 0x08, 0x30, 0x10, 0x34, 0xe0, 0x1e, 0x65, 0xc0, 0xe4, 0xcc, 0xa7, 0xe2, 0xae, 0x94, 0x8d, 0xd5, 0x51, 0xf5, 0x97, 0xcd, 0x38, 0x22, 0x1e, 0xbf, 0xad, 0x33, 0x1e, 0xc3, 0xb2, 0x72, 0xb6, 0x3a, 0x6e, 0xcd, 0x85, 0xf8, 0x69, 0x22, 0x13, 0x9d, 0x2e, 0x6b, 0xcc, 0x3b, 0xc3, 0x92, 0x76, 0x4b, 0x2c, 0xee, 0x1a, 0x9e, 0xb0, 0xb0, 0xc0, 0x31, 0x66, 0x1b, 0x8e, 0xc7, 0x53, 0x79, 0x3b, 0x5f, 0xff, 0x19, 0x46, 0xad, 0xf1, 0x6a, 0x16, 0x75, 0xec, 0xe1, 0x8c, 0x83, 0x9c, 0xcf, 0x99, 0xc9, 0x4c, 0xca, 0x0d, 0xe1, 0x94, 0xab, 0x33, 0xdf, 0xa7, 0x76, 0x64, 0x3a, 0x86, 0x95, 0x6a, 0x58, 0x57, 0xfc, 0xd8, 0x3c, 0xbb, 0x29, 0x82, 0x39, 0xc5, 0x5e, 0x6c, 0x12, 0x88, 0x0b, 0x57, 0xd6, 0xf8, 0xc1, 0x59, 0x2e, 0x0f, 0x5a, 0x9f, 0x24, 0x84, 0x95, 0xaa, 0xa8, 0x32, 0xa9, 0xac, 0x5d, 0x6e, 0xb4, 0xc2, 0x66, 0xf9, 0x31, 0x9a, 0x2a, 0xae, 0xff, 0xd6, 0x3b, 0x7d, 0x45, 0x23, 0x57, 0x15, 0xfb, 0x14, 0xae, 0x3c, 0x32, 0x44, 0xe3, 0xe1, 0x90, 0xce, 0x60, 0x49, 0x5d, 0x8f, 0xa3, 0x58, 0xb3, 0x85, 0xaa, 0xfe, 0x7e, 0x53, 0x94, 0xae, 0x7f, 0x3f, 0xbb, 0xb0, 0x97, 0x2e, 0x1d, 0xae, 0x5d, 0xea, 0xfe, 0x74, 0x34, 0x1a, 0x5e, 0x23, 0xbb, 0x42, 0x7f, 0xd7, 0x8b, 0xd2, 0xf5, 0x87, 0x9a, 0x11, 0x5d, 0xf3, 0x7c, 0x37, 0x15, 0x7e, 0xab, 0xca, 0x5f, 0xd5, 0xc6, 0x85, 0x2b, 0xeb, 0x26, 0xd7, 0x24, 0xea, 0x86, 0xe9, 0x4f, 0xaa, 0xe4, 0xef, 0x06, 0x0d, 0xcc, 0x0d, 0xc5, 0x6c, 0xb6, 0xda, 0x15, 0xbd, 0xe4, 0x4a, 0x65, 0xfa, 0x36, 0x2e, 0x5b, 0x01, 0xf5, 0x42, 0xad, 0x02, 0x37, 0x52, 0x7f, 0x39, 0x6d, 0xc6, 0x8d, 0x32, 0x7c, 0xa3, 0x21, 0x9b, 0x82, 0xd7, 0xe5, 0x6a, 0x67, 0xaa, 0x63, 0xb1, 0x35, 0xd6, 0x07, 0x7e, 0x49, 0x92, 0x6e, 0x8c, 0xfe, 0x24, 0xa9, 0x8c, 0xbf, 0x99, 0xd8, 0x9a, 0xe7, 0xfb, 0x6d, 0x83, 0x6c, 0xb7, 0xdb, 0x4d, 0x1b, 0x17, 0xfb, 0xd4, 0xec, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x5c, 0x37, 0x82, 0xf6, 0x90, 0xcf, 0xe9, 0x9c, 0x2a, 0x7f, 0xa9, 0xd0, 0x66, 0x4c, 0xad, 0xca, 0x9c, 0x52, 0x13, 0xb8, 0x4d, 0xb1, 0x2f, 0x6d, 0x4d, 0x35, 0x67, 0xbd, 0x97, 0x73, 0x8d, 0x9c, 0xf5, 0x8a, 0xac, 0x55, 0xbd, 0x7f, 0xe5, 0x8c, 0xef, 0x8c, 0x8a, 0xd2, 0x5e, 0x65, 0xce, 0xea, 0x23, 0xc5, 0x12, 0xdb, 0x3e, 0xda, 0xf7, 0x1b, 0xfd, 0xa4, 0x15, 0x5c, 0xc9, 0xa5, 0xdd, 0x92, 0xa2, 0xd0, 0x1f, 0xe8, 0x8b, 0x2f, 0x2d, 0x2a, 0x92, 0xab, 0xa1, 0x68, 0xb6, 0x0d, 0x4a, 0xf3, 0x42, 0x1b, 0x64, 0x5c, 0xc5, 0x48, 0x45, 0x69, 0x50, 0x94, 0xba, 0x3b, 0x2f, 0x5c, 0xa8, 0xfd, 0x0b, 0xba, 0xd5, 0x5e, 0xa8, 0x25, 0xfb, 0x17, 0x6a, 0xd9, 0x86, 0x19, 0xf8, 0xbe, 0xc0, 0xde, 0x9a, 0xd7, 0x05, 0x56, 0xb4, 0x96, 0x16, 0xa8, 0xfd, 0xc1, 0xfc, 0x0f, 0x1f, 0x3f, 0xf9, 0xcb, 0xeb, 0x64, 0xcf, 0x17, 0x73, 0xaf, 0xd3, 0xd0, 0x5b, 0x09, 0x75, 0x8f, 0xd4, 0xee, 0x26, 0x1b, 0xa2, 0x76, 0xf7, 0x7c, 0x71, 0x4f, 0xcd, 0xac, 0xad, 0x48, 0x14, 0xf7, 0x6a, 0xe9, 0x17, 0x25, 0xf2, 0x9b, 0xcf, 0x5d, 0xfb, 0x4f, 0x25, 0x9e, 0x44, 0x6a, 0x43, 0x6d, 0xdd, 0x22, 0xa8, 0xbb, 0x30, 0xf0, 0xfe, 0xc0, 0xc0, 0xfc, 0xc0, 0xc0, 0xd6, 0x01, 0xc4, 0xce, 0x79, 0xdc, 0xdd, 0x49, 0xf6, 0x71, 0x6f, 0x60, 0x7e, 0x27, 0x49, 0xe1, 0x8b, 0x95, 0xa0, 0xaf, 0xad, 0xb4, 0xe4, 0xb9, 0x81, 0x81, 0xdd, 0x58, 0x6e, 0x9e, 0xb2, 0x38, 0xff, 0xdc, 0xd2, 0xb5, 0xb1, 0xd7, 0xd0, 0x23, 0x6d, 0xdc, 0xb0, 0x5b, 0x06, 0x77, 0x9e, 0xbb, 0x82, 0xa0, 0x1f, 0x57, 0xbe, 0x4e, 0xdf, 0x6c, 0xbf, 0x98, 0x49, 0x73, 0xaf, 0x68, 0xf2, 0x68, 0x89, 0x73, 0x57, 0xce, 0x91, 0x17, 0x92, 0xbe, 0xbb, 0xf6, 0x87, 0x27, 0xaf, 0xc5, 0x13, 0x1a, 0xf4, 0x37, 0xbb, 0xcb, 0x37, 0x14, 0xca, 0x8e, 0x73, 0x0b, 0x5b, 0x16, 0xf6, 0xec, 0x79, 0x61, 0xcf, 0x0b, 0x0b, 0xaf, 0xd1, 0x6d, 0x0f, 0x49, 0x63, 0x72, 0x0f, 0xfb, 0xc4, 0xed, 0x2e, 0x9a, 0xfd, 0x1a, 0x49, 0xd3, 0x4c, 0x5a, 0x60, 0x61, 0xcb, 0x96, 0x2d, 0x84, 0x50, 0x54, 0x65, 0xed, 0xbd, 0xf6, 0x8d, 0x79, 0xe3, 0xd8, 0x65, 0xde, 0xb8, 0x4d, 0xb7, 0x16, 0x76, 0x7c, 0x7d, 0xfb, 0x75, 0xe1, 0xf0, 0xf6, 0x05, 0xa4, 0xf1, 0xf6, 0x2b, 0x57, 0x50, 0x83, 0xa7, 0x3f, 0x69, 0x20, 0xf1, 0x76, 0x49, 0xbf, 0x2d, 0xbe, 0xaf, 0x08, 0x65, 0xcb, 0x1d, 0xd7, 0x85, 0x43, 0x77, 0x1c, 0xba, 0x6f, 0xfb, 0xc2, 0x0b, 0x5b, 0x6e, 0xc7, 0x79, 0xbc, 0xf5, 0xd7, 0x9f, 0x88, 0xbe, 0x6c, 0x7b, 0x97, 0x04, 0xc9, 0x9b, 0xdd, 0xe1, 0x1b, 0x8d, 0x1d, 0x7b, 0x0e, 0x70, 0xb4, 0xe1, 0x76, 0x55, 0xb4, 0x95, 0x76, 0xda, 0x5e, 0x7f, 0xfd, 0x75, 0x24, 0x71, 0xfb, 0xf6, 0x17, 0x5e, 0xc3, 0x59, 0xfc, 0xcf, 0x9f, 0xe4, 0x9f, 0x1c, 0x3b, 0xd2, 0x66, 0xf7, 0x6f, 0x9f, 0xfe, 0x00, 0x7e, 0x71, 0x87, 0xf1, 0x77, 0x8d, 0x15, 0x08, 0x94, 0x27, 0xbf, 0x6f, 0xac, 0x2c, 0xb2, 0x78, 0xc0, 0x88, 0x34, 0xbe, 0x8e, 0x0c, 0x1e, 0x7e, 0xed, 0xf6, 0x73, 0x3b, 0x1f, 0xdf, 0xd8, 0x07, 0x36, 0x74, 0x46, 0xa3, 0xf4, 0x4e, 0xbb, 0x6b, 0x83, 0x3b, 0x66, 0x71, 0xfa, 0x7c, 0x85, 0x7c, 0xfc, 0xb3, 0xcb, 0xfc, 0x23, 0xc6, 0xc0, 0x62, 0x00, 0x29, 0xc3, 0xcf, 0x45, 0xfa, 0x69, 0x5c, 0x34, 0x92, 0xdd, 0x45, 0x9e, 0x63, 0x5c, 0xe4, 0x7b, 0xa5, 0xcd, 0x48, 0x8a, 0x20, 0x83, 0x07, 0x08, 0x83, 0x0b, 0x5b, 0xae, 0xbc, 0xff, 0x95, 0x0d, 0xa3, 0xb0, 0x2d, 0xad, 0xff, 0xb2, 0x59, 0x2a, 0x13, 0x60, 0x57, 0x76, 0x2a, 0x11, 0x1a, 0x0b, 0x85, 0xa6, 0x59, 0x52, 0x9a, 0xce, 0x76, 0x8c, 0x8d, 0x85, 0xc6, 0x7c, 0xa5, 0x25, 0x4e, 0xd4, 0xa2, 0x0e, 0x8c, 0x1c, 0x6a, 0x2a, 0x72, 0x9f, 0xda, 0xb5, 0x42, 0xaa, 0xe1, 0x8b, 0xa1, 0x13, 0xd2, 0x59, 0x67, 0xc2, 0x8f, 0x55, 0xfd, 0xc5, 0xbc, 0x50, 0x28, 0x52, 0xd1, 0x4d, 0x62, 0x1d, 0xb3, 0x55, 0xeb, 0xf9, 0x82, 0x81, 0x24, 0xd9, 0x2d, 0xab, 0x5d, 0x05, 0x67, 0x82, 0x55, 0x25, 0x16, 0x88, 0x19, 0xda, 0x26, 0x73, 0x81, 0x9a, 0x55, 0x1b, 0x98, 0xee, 0xa7, 0x85, 0xc6, 0x2c, 0x15, 0x96, 0xb7, 0x2c, 0xb6, 0xcc, 0x2d, 0xb6, 0x04, 0x16, 0xe7, 0x8c, 0x73, 0x8b, 0xb8, 0x05, 0x5a, 0x02, 0x01, 0x7c, 0x2f, 0x06, 0xe6, 0x16, 0x49, 0xb2, 0x25, 0x30, 0x47, 0x3e, 0xf9, 0x41, 0xfc, 0xa4, 0xc7, 0x28, 0xd3, 0x28, 0xc3, 0x36, 0x46, 0xe0, 0xb9, 0x47, 0x37, 0xa0, 0x6f, 0x8c, 0xdc, 0x15, 0xfc, 0x65, 0xb7, 0xa4, 0x15, 0xa0, 0x7b, 0x17, 0xbb, 0x5b, 0x71, 0x8c, 0x91, 0xaa, 0x44, 0x0a, 0x94, 0x2d, 0x93, 0x7a, 0x4b, 0x93, 0x94, 0x2b, 0xfb, 0x57, 0x9b, 0xb1, 0x38, 0xcf, 0x8f, 0x45, 0x12, 0x65, 0xc3, 0xd5, 0x03, 0x97, 0x76, 0xf9, 0x56, 0xfd, 0x03, 0x8f, 0xbf, 0xa2, 0x97, 0xaa, 0x75, 0xbe, 0x06, 0x08, 0x4f, 0xb3, 0xc5, 0x7f, 0x5c, 0xdb, 0x12, 0xcd, 0xc9, 0x38, 0x7f, 0x34, 0x3b, 0xc1, 0x29, 0x0f, 0xcd, 0xd2, 0xa1, 0xb4, 0x55, 0x58, 0x96, 0xda, 0x16, 0x91, 0xa2, 0x16, 0xca, 0x8e, 0x71, 0xae, 0xa5, 0x05, 0xc9, 0x6a, 0x09, 0xb4, 0x50, 0xe2, 0x8c, 0xc8, 0x26, 0x39, 0x64, 0x2c, 0x52, 0x48, 0x8f, 0x52, 0x2a, 0x17, 0x89, 0x0e, 0x8d, 0x28, 0xc1, 0xfb, 0x16, 0x5e, 0xbb, 0x72, 0x7a, 0xe4, 0xea, 0xfc, 0x35, 0xb9, 0x3a, 0xad, 0xae, 0x74, 0xbb, 0x94, 0xd4, 0x10, 0x28, 0x15, 0xce, 0xd3, 0x2e, 0xe9, 0x58, 0x5e, 0xb2, 0xb7, 0x8a, 0xde, 0xfd, 0x37, 0xdb, 0xae, 0xea, 0x91, 0xb3, 0x34, 0x1b, 0x9c, 0xa5, 0x7b, 0x26, 0x7e, 0xd7, 0x5a, 0x2a, 0x62, 0x98, 0xe5, 0x66, 0xe9, 0x03, 0x70, 0x39, 0xc5, 0x9d, 0xed, 0x2f, 0xf6, 0x5f, 0xe6, 0x98, 0xa9, 0xd4, 0x5f, 0x84, 0xde, 0x25, 0x34, 0xcb, 0x6f, 0xb2, 0x72, 0x58, 0x74, 0xd4, 0xe8, 0x30, 0x6b, 0xc9, 0x14, 0x17, 0xba, 0xcc, 0x5f, 0x7e, 0x7a, 0x57, 0x5b, 0x61, 0x8a, 0xde, 0x2a, 0xac, 0x63, 0xed, 0x71, 0xf6, 0xb1, 0x66, 0x5c, 0xaa, 0xb0, 0x0c, 0x3b, 0xe7, 0x5a, 0x8c, 0x84, 0xab, 0x16, 0x26, 0xb7, 0x45, 0xc2, 0x18, 0x61, 0xd0, 0x48, 0xb8, 0x63, 0x94, 0x19, 0x8b, 0x6f, 0xb6, 0x51, 0x0a, 0x71, 0xa6, 0x07, 0x08, 0x81, 0x87, 0x50, 0x81, 0x27, 0xaf, 0x4a, 0x9f, 0xc1, 0x35, 0x9c, 0x4a, 0x59, 0xf7, 0x63, 0x04, 0xd1, 0x5c, 0x8b, 0x4b, 0x55, 0xbb, 0x68, 0xab, 0x67, 0xf8, 0x94, 0xb2, 0xb0, 0x1e, 0xca, 0x69, 0x7e, 0x94, 0x91, 0x64, 0xea, 0x4b, 0xf8, 0x0c, 0x63, 0x0d, 0xf4, 0xd9, 0x19, 0x46, 0x60, 0x67, 0x95, 0xa1, 0x95, 0x1d, 0x9a, 0xd1, 0x7d, 0x4d, 0xa7, 0xd3, 0xd9, 0xa0, 0x6b, 0x2a, 0xcb, 0xfb, 0x6f, 0xea, 0xd3, 0xf9, 0xd9, 0xe6, 0xaf, 0xf0, 0x98, 0x96, 0x48, 0x83, 0x5c, 0xb2, 0x0e, 0x79, 0xce, 0x1f, 0xb9, 0x27, 0x12, 0x94, 0x02, 0xaf, 0x2f, 0xdb, 0x69, 0x65, 0x9d, 0x8e, 0xdc, 0x41, 0x5a, 0x1c, 0xe0, 0x06, 0x5a, 0x23, 0xce, 0x9e, 0x2d, 0x94, 0x9d, 0xab, 0x5c, 0xb1, 0xd4, 0x46, 0xb9, 0xe2, 0x1c, 0x51, 0x95, 0x2d, 0x32, 0x32, 0x71, 0xe3, 0x74, 0xd1, 0xcf, 0x40, 0x0b, 0xd7, 0x1f, 0x27, 0x31, 0x40, 0xdd, 0x60, 0x1b, 0x2a, 0x70, 0xcb, 0xff, 0x5c, 0x35, 0x84, 0xe4, 0xad, 0xb1, 0xe6, 0x58, 0xca, 0x15, 0xc5, 0x08, 0x5c, 0x12, 0x60, 0x92, 0x4f, 0x29, 0x3b, 0xe7, 0xcf, 0x1b, 0x61, 0x71, 0x9c, 0x5d, 0xa4, 0x64, 0x98, 0xc1, 0x7d, 0x89, 0x2a, 0x6f, 0x26, 0xec, 0xcd, 0xfa, 0x09, 0x81, 0x26, 0x72, 0x9b, 0x25, 0xb8, 0x0a, 0x3e, 0xfa, 0xe4, 0x8c, 0xac, 0xcb, 0xf6, 0x5a, 0x2c, 0x96, 0xde, 0x3c, 0x24, 0x77, 0x59, 0xc6, 0xf8, 0x8d, 0xcb, 0x2b, 0x11, 0x0b, 0x83, 0xb3, 0xe2, 0x4a, 0xd1, 0xdb, 0xcb, 0xac, 0x73, 0x2f, 0x90, 0xb3, 0xf8, 0x29, 0x7f, 0x6c, 0x48, 0xaa, 0x76, 0xb1, 0x1b, 0x5f, 0xed, 0xfd, 0xd3, 0xac, 0xf6, 0x14, 0xe5, 0xaf, 0x8a, 0xd1, 0x6a, 0xa2, 0x8c, 0xd5, 0x84, 0xd8, 0x43, 0x9d, 0x13, 0xb0, 0x0a, 0x5b, 0x02, 0x8c, 0x3d, 0x42, 0x10, 0x9b, 0xa8, 0xb8, 0x17, 0x50, 0x69, 0x22, 0x07, 0x56, 0xeb, 0x8f, 0x6d, 0xc4, 0x0d, 0x22, 0x81, 0xdb, 0x17, 0x6e, 0xff, 0xdb, 0xab, 0xd0, 0x67, 0x4a, 0xa5, 0x7a, 0x9a, 0xde, 0x4a, 0xc5, 0xd3, 0x65, 0x0e, 0x50, 0x99, 0x66, 0x13, 0x56, 0x7d, 0x1a, 0x10, 0x5d, 0x92, 0x5c, 0xec, 0xa1, 0xb5, 0x8f, 0xca, 0xcf, 0x94, 0x38, 0x3f, 0x15, 0xd9, 0x65, 0xb1, 0x39, 0x22, 0x21, 0xc2, 0x99, 0x9f, 0xf8, 0xc6, 0x74, 0x61, 0x85, 0x91, 0xe5, 0x8f, 0xd4, 0x84, 0xc3, 0x61, 0xaf, 0x03, 0xa0, 0x90, 0xaf, 0xf2, 0xb3, 0xf2, 0x63, 0x11, 0x6f, 0x4d, 0x8d, 0x37, 0x5c, 0xe3, 0x75, 0x56, 0xfc, 0xca, 0x13, 0xe6, 0xfc, 0x75, 0xb2, 0x64, 0x93, 0x85, 0xd1, 0x41, 0x9f, 0x18, 0x83, 0x08, 0x6b, 0x89, 0xa7, 0xc1, 0x59, 0x43, 0x4c, 0x86, 0xc3, 0xf4, 0x52, 0x49, 0x1d, 0xe0, 0x20, 0x21, 0x33, 0x69, 0xe0, 0x45, 0xd6, 0xb8, 0x88, 0x7a, 0xc4, 0x48, 0xb9, 0x33, 0x6a, 0x94, 0x85, 0x19, 0x8c, 0xc1, 0x16, 0x23, 0xe1, 0x72, 0x4e, 0xa5, 0x93, 0xd2, 0xd6, 0xc2, 0x4a, 0x2d, 0x92, 0x32, 0x18, 0x9c, 0x0f, 0xb4, 0x1d, 0xda, 0xbe, 0xe5, 0x5b, 0x57, 0xe1, 0xcf, 0xd0, 0xd9, 0x3c, 0x6a, 0xcb, 0xf5, 0x58, 0xd3, 0x18, 0x81, 0x93, 0x45, 0xf9, 0x2b, 0x7c, 0xc2, 0xce, 0xf2, 0x16, 0x65, 0x78, 0x0f, 0x53, 0xe4, 0x98, 0x93, 0x3e, 0x43, 0x2b, 0xf7, 0xf9, 0x7a, 0x70, 0xd1, 0x9d, 0xaa, 0x0a, 0x3b, 0x0a, 0xf4, 0x20, 0xb9, 0x53, 0x3b, 0xca, 0xbd, 0xbd, 0x2e, 0xc6, 0x80, 0x7a, 0xcd, 0x84, 0x1d, 0xbb, 0xc8, 0x7c, 0x6c, 0xa8, 0x4e, 0x11, 0x5c, 0x26, 0xef, 0x8a, 0x69, 0x86, 0xd6, 0x65, 0xd5, 0x3a, 0xa2, 0x7a, 0x7a, 0x8c, 0x26, 0xf3, 0x34, 0xa5, 0xba, 0x8e, 0x1c, 0x37, 0x19, 0xa3, 0xbf, 0x12, 0x29, 0xdc, 0x23, 0x93, 0xc7, 0x27, 0x21, 0x15, 0x62, 0x4f, 0x0a, 0x57, 0xad, 0xa6, 0x0f, 0x94, 0xb6, 0x80, 0x51, 0x9d, 0xaa, 0x34, 0x82, 0x90, 0x8f, 0x40, 0x31, 0x8a, 0x68, 0xf4, 0x47, 0x75, 0xc9, 0xb2, 0xb0, 0xc8, 0x22, 0x0d, 0x2f, 0x24, 0x88, 0xdc, 0x77, 0x78, 0xe1, 0x2a, 0x13, 0x38, 0x9c, 0x6a, 0x7a, 0x26, 0xf3, 0xcc, 0x44, 0xbd, 0xde, 0x5c, 0x36, 0x81, 0x2d, 0x11, 0x1a, 0x03, 0x55, 0x97, 0xf4, 0x4c, 0x96, 0xf1, 0x47, 0x5c, 0xd2, 0x70, 0x82, 0x7a, 0xee, 0x59, 0x1f, 0xe3, 0x56, 0xca, 0x84, 0x6b, 0x0c, 0xa4, 0xfd, 0x33, 0xee, 0x52, 0x58, 0x0c, 0x95, 0x3a, 0x20, 0x4d, 0x66, 0x1c, 0xe7, 0x09, 0xe3, 0xd9, 0x75, 0x57, 0x98, 0x36, 0x6e, 0x9d, 0x07, 0xa1, 0xbb, 0xb3, 0x6c, 0xfe, 0x86, 0xcb, 0x5a, 0x52, 0xf1, 0xfb, 0x46, 0x6f, 0x84, 0x69, 0x8e, 0x34, 0x62, 0x9a, 0x95, 0x9f, 0x3d, 0xb2, 0x96, 0xf1, 0x3d, 0x45, 0xfd, 0x51, 0x95, 0xb5, 0xd0, 0x65, 0x8c, 0xaa, 0xbf, 0xa2, 0x22, 0x55, 0xfd, 0x19, 0x79, 0x1e, 0x9d, 0xe3, 0x64, 0x2d, 0x73, 0x60, 0x19, 0x67, 0xf0, 0xe7, 0xd7, 0xe7, 0xef, 0xad, 0x98, 0xad, 0x66, 0x32, 0x9c, 0xbb, 0x3b, 0x8e, 0x2b, 0x18, 0xcd, 0x0c, 0x0e, 0x47, 0x98, 0x24, 0xf8, 0x6d, 0xcb, 0x79, 0xf6, 0x2c, 0x38, 0x7d, 0xfc, 0xcf, 0x6b, 0xa0, 0x2e, 0xce, 0xae, 0xae, 0x59, 0x92, 0x11, 0x47, 0xd5, 0x0c, 0xef, 0x7e, 0x17, 0x8f, 0x96, 0x3a, 0x4d, 0x07, 0xdc, 0xd3, 0x8e, 0xb0, 0x93, 0x04, 0x88, 0xca, 0xc5, 0x59, 0x89, 0x3f, 0xfe, 0x70, 0xfb, 0x28, 0x4b, 0xa6, 0x74, 0x6c, 0xbc, 0x59, 0x79, 0xd5, 0x75, 0x58, 0xcb, 0xeb, 0xa8, 0x1e, 0x99, 0x3c, 0xb4, 0xc3, 0x9f, 0x4c, 0xd7, 0xad, 0xf9, 0x03, 0xdc, 0x4e, 0xad, 0x77, 0x6b, 0x99, 0x53, 0x43, 0x09, 0xe5, 0x4c, 0x13, 0x7b, 0x35, 0x25, 0xd4, 0xa0, 0xdc, 0x42, 0xa3, 0x08, 0xce, 0xe0, 0x85, 0x3f, 0x5a, 0x97, 0x3e, 0x39, 0x56, 0xed, 0xb0, 0x44, 0x2c, 0xcf, 0x54, 0x5b, 0xd3, 0x18, 0x41, 0xa4, 0xe2, 0xef, 0xe1, 0x99, 0x72, 0x97, 0x94, 0xe3, 0xeb, 0x95, 0x27, 0x50, 0x4f, 0xd9, 0x0e, 0x9a, 0x28, 0x2d, 0x82, 0xf5, 0x85, 0x49, 0x2a, 0x80, 0x42, 0xc9, 0xad, 0xdb, 0xb5, 0x33, 0xd4, 0x5c, 0xf5, 0x8c, 0x77, 0xc5, 0x2e, 0xb3, 0xa7, 0x6a, 0xd6, 0x42, 0x13, 0xb7, 0xce, 0x1f, 0xb2, 0xb6, 0xf2, 0x24, 0x9b, 0x8e, 0x61, 0x4b, 0xc9, 0x75, 0x68, 0x50, 0x33, 0x5d, 0x8c, 0x39, 0x13, 0x7c, 0x81, 0xb4, 0xf6, 0xf8, 0x5c, 0xd0, 0x04, 0x57, 0x1e, 0x39, 0x5a, 0x68, 0x28, 0x61, 0x81, 0x39, 0xc0, 0xdf, 0xc6, 0x62, 0x58, 0x69, 0x09, 0x18, 0x35, 0x14, 0x06, 0x02, 0x6d, 0x7f, 0xb9, 0xfd, 0xc9, 0x75, 0xf9, 0x33, 0xa5, 0x9a, 0x6a, 0x0a, 0xe7, 0x23, 0xe1, 0x89, 0x94, 0x8b, 0x0a, 0x50, 0x55, 0x60, 0xbe, 0x97, 0xcd, 0x89, 0x19, 0x1d, 0x85, 0xea, 0x00, 0x32, 0xe4, 0xa1, 0x89, 0x31, 0x1a, 0x0e, 0x2e, 0x17, 0x5b, 0xa8, 0x58, 0xbc, 0x09, 0x92, 0x85, 0x8b, 0x45, 0x29, 0xc2, 0xd6, 0x3d, 0xa6, 0xb2, 0xb9, 0x9a, 0x2e, 0xd8, 0xbc, 0x06, 0xfb, 0x1a, 0xf1, 0xb1, 0x33, 0x6b, 0xf0, 0x6b, 0xac, 0xdb, 0xd9, 0xc9, 0xf8, 0x2d, 0xe3, 0xfc, 0xc9, 0x3a, 0x07, 0x8f, 0xc6, 0x0d, 0x3a, 0x15, 0x74, 0x4c, 0x1d, 0x7c, 0x82, 0x78, 0x73, 0xa3, 0x09, 0x5e, 0xbe, 0x42, 0xa1, 0x1c, 0x77, 0x2e, 0xf2, 0x90, 0x1b, 0x60, 0x34, 0x1a, 0xf9, 0x64, 0xe5, 0xe1, 0x57, 0x15, 0x5c, 0xf1, 0x7b, 0xae, 0x18, 0x80, 0x89, 0x33, 0x9c, 0x23, 0x21, 0xe4, 0xc7, 0xdf, 0x5b, 0x97, 0xbf, 0x99, 0x54, 0xce, 0x72, 0xfe, 0xcc, 0x79, 0x4b, 0xee, 0x6e, 0x8c, 0x20, 0xee, 0x2e, 0x45, 0x0d, 0x21, 0x36, 0xde, 0xea, 0x0a, 0xd4, 0x60, 0x9f, 0x0b, 0xd4, 0xbb, 0xcb, 0x9a, 0x18, 0x3a, 0xd1, 0x4b, 0x25, 0x69, 0x72, 0x93, 0x0b, 0x15, 0xe6, 0xd6, 0xcb, 0xa7, 0x92, 0xb5, 0xca, 0x36, 0x49, 0x7c, 0xa4, 0xa9, 0x42, 0x44, 0xc3, 0xa5, 0xe5, 0xf6, 0x1a, 0xf0, 0xad, 0xd3, 0x12, 0xba, 0x2e, 0xcc, 0x5b, 0xfc, 0x15, 0xc5, 0xe5, 0xe9, 0x35, 0x7f, 0xc3, 0xac, 0x0b, 0xb4, 0xa8, 0xdc, 0xb4, 0x14, 0x99, 0x2a, 0xb1, 0x56, 0xcc, 0x53, 0xbf, 0xf0, 0x13, 0x6b, 0xb0, 0x17, 0xa5, 0x33, 0xb0, 0x7c, 0xe8, 0xe8, 0xba, 0xd7, 0xc0, 0xfe, 0xd4, 0x13, 0xd3, 0xfd, 0x06, 0x5f, 0xc4, 0x31, 0x31, 0xec, 0xba, 0xd4, 0x4e, 0x66, 0x30, 0x53, 0x4e, 0xd3, 0x74, 0x48, 0xf3, 0x07, 0x2c, 0x8a, 0x7f, 0xa3, 0x02, 0x2f, 0x37, 0x8f, 0x44, 0x7e, 0x4d, 0xd7, 0x7b, 0x1a, 0x85, 0x75, 0x66, 0xa9, 0x24, 0x65, 0xbc, 0x7e, 0xb0, 0xf0, 0xb0, 0x58, 0xd1, 0x95, 0x58, 0x24, 0xdf, 0x9b, 0x30, 0x69, 0x9c, 0x26, 0x43, 0x3c, 0xb2, 0x62, 0x22, 0xe6, 0x4b, 0xcd, 0xd3, 0x9e, 0xcc, 0x40, 0xcb, 0x8c, 0x5a, 0xd4, 0x96, 0xd0, 0xbf, 0xea, 0x81, 0x1f, 0xe4, 0xd9, 0x2c, 0xc8, 0x45, 0x8a, 0xd9, 0xac, 0x92, 0x87, 0xfc, 0x10, 0xb2, 0x06, 0x7f, 0x3b, 0xbe, 0xaf, 0xa5, 0x2e, 0xc0, 0xc3, 0xeb, 0x9c, 0x3a, 0xa1, 0x99, 0x36, 0xcb, 0x3c, 0xa0, 0xb1, 0x85, 0xd1, 0xca, 0x60, 0x0c, 0x2c, 0xbf, 0xb4, 0x37, 0xe8, 0x59, 0x07, 0x7d, 0x29, 0x5b, 0xc4, 0x37, 0x96, 0x40, 0x01, 0xc6, 0xd0, 0x03, 0x9a, 0xdd, 0xea, 0x22, 0xa6, 0x27, 0x32, 0xc6, 0xff, 0xd6, 0x85, 0x0e, 0xaf, 0x17, 0xc8, 0x1f, 0x27, 0xa1, 0x49, 0x74, 0x71, 0xa9, 0x69, 0xdf, 0xff, 0xb7, 0x73, 0xfe, 0x31, 0x6d, 0x5c, 0x77, 0x00, 0x2f, 0x23, 0x2c, 0xcd, 0x0d, 0x29, 0x6c, 0x66, 0x4d, 0xd5, 0x45, 0xaa, 0xa9, 0x5b, 0x9b, 0x90, 0x46, 0x4a, 0x9b, 0x55, 0x48, 0x07, 0x9c, 0xab, 0x35, 0x5b, 0x07, 0x0d, 0x14, 0x95, 0x0e, 0xea, 0x0a, 0xe1, 0x4c, 0xb3, 0xbc, 0x5a, 0x49, 0x6b, 0x05, 0x7b, 0x5a, 0xb4, 0x2d, 0xd5, 0xa4, 0xfc, 0x31, 0x6d, 0x19, 0x71, 0xb4, 0x69, 0x6b, 0x94, 0x36, 0x26, 0xaa, 0x32, 0xc9, 0x57, 0x8d, 0x06, 0x31, 0x1b, 0xbb, 0x06, 0x2a, 0x36, 0xa0, 0x56, 0x7a, 0xaa, 0x2a, 0x57, 0x1b, 0x12, 0x95, 0xe9, 0xb6, 0x2e, 0x51, 0x40, 0x5a, 0x16, 0xab, 0x62, 0x33, 0xd1, 0xc0, 0xdb, 0xf7, 0xfb, 0x7e, 0x9c, 0xcf, 0x10, 0x08, 0x99, 0xba, 0xfe, 0xf5, 0xfd, 0xbc, 0x77, 0xef, 0xee, 0x7d, 0xdf, 0xf7, 0x7d, 0xef, 0xf9, 0xeb, 0xf7, 0xee, 0xde, 0x3b, 0xff, 0x60, 0xfe, 0xb3, 0xf4, 0xb0, 0x6a, 0x8d, 0xf5, 0x05, 0xbc, 0x59, 0x8b, 0xd9, 0x46, 0xdf, 0xda, 0x85, 0x94, 0xcb, 0x09, 0xd3, 0xc4, 0x94, 0xf8, 0x71, 0xb5, 0xc9, 0x4b, 0x95, 0x6e, 0x1d, 0x6c, 0x07, 0x92, 0x72, 0x89, 0x92, 0x0c, 0xb0, 0x93, 0xa9, 0xec, 0x64, 0x6c, 0xa1, 0x76, 0xd7, 0x25, 0x27, 0xbf, 0x5e, 0xf4, 0xa9, 0x58, 0x08, 0xad, 0x51, 0x62, 0xfc, 0x0d, 0xd6, 0xe5, 0x3f, 0x9b, 0xa0, 0x38, 0xc9, 0xaa, 0xa8, 0xb7, 0xf8, 0x10, 0xa2, 0x56, 0xdc, 0x26, 0xe4, 0x3c, 0x1a, 0x73, 0x3c, 0xc3, 0xaf, 0x6f, 0xd3, 0x72, 0xd0, 0x4e, 0x97, 0x7c, 0x86, 0xd2, 0x69, 0x51, 0x30, 0x7d, 0xe4, 0xed, 0xbf, 0xff, 0xee, 0xe6, 0x46, 0xfe, 0x53, 0x1f, 0x6e, 0xa8, 0x2c, 0x2c, 0x06, 0xfd, 0x95, 0xcd, 0xdd, 0x30, 0x87, 0x1e, 0xf4, 0xf0, 0x6b, 0x60, 0x7a, 0x65, 0x28, 0xc2, 0x2e, 0x69, 0x7d, 0xa9, 0x40, 0x11, 0xe7, 0xfc, 0xa3, 0x95, 0xfc, 0x45, 0x68, 0x30, 0xe6, 0x7a, 0xf9, 0xca, 0xc9, 0xd2, 0xd4, 0x0e, 0x3e, 0x7f, 0xc5, 0xc9, 0xa2, 0xbc, 0x2d, 0xf6, 0xac, 0x7d, 0x19, 0x0d, 0xbe, 0xe6, 0x22, 0xce, 0xa3, 0x75, 0xeb, 0xdc, 0x79, 0x30, 0x12, 0x62, 0x4b, 0x8a, 0x88, 0x70, 0x91, 0x62, 0xc0, 0x1a, 0x63, 0xd4, 0x59, 0x69, 0xe3, 0x93, 0x48, 0xfe, 0x9f, 0x15, 0xe2, 0x91, 0x44, 0x06, 0x5a, 0xc2, 0x56, 0x20, 0x11, 0xf6, 0x90, 0x60, 0x9b, 0x78, 0x83, 0x53, 0x41, 0x26, 0x8e, 0xf8, 0x59, 0xa3, 0x6c, 0xeb, 0x9f, 0xa5, 0xed, 0x39, 0xb2, 0x7f, 0xba, 0xe4, 0x18, 0xcb, 0xe8, 0x45, 0x2f, 0x09, 0xef, 0x1d, 0xd9, 0x2f, 0x9d, 0xc6, 0xae, 0x79, 0xa5, 0x0a, 0x15, 0xd3, 0xd3, 0x9d, 0x9d, 0x47, 0x9e, 0x7b, 0x63, 0x23, 0xf7, 0xf5, 0xa5, 0xba, 0x1b, 0x22, 0x05, 0x45, 0xd1, 0xef, 0x77, 0x36, 0x0c, 0x5d, 0xe8, 0x81, 0x4b, 0x20, 0x38, 0xd0, 0xbb, 0xb2, 0x92, 0x9e, 0x2c, 0xda, 0x78, 0xab, 0x83, 0x11, 0x1f, 0x83, 0xaf, 0x2b, 0xe0, 0x92, 0x54, 0x1d, 0xe1, 0x13, 0xb2, 0x1b, 0xa5, 0x26, 0xc6, 0xf8, 0x02, 0x03, 0x7f, 0x84, 0x2a, 0xe7, 0x3d, 0xeb, 0x46, 0x92, 0xd7, 0x17, 0xba, 0x9c, 0xc5, 0x99, 0x88, 0xe5, 0x19, 0xd8, 0x5d, 0x75, 0xc6, 0x0e, 0xab, 0xf5, 0x8c, 0xe2, 0xe6, 0x59, 0xb1, 0xb6, 0x65, 0x9a, 0xf2, 0x91, 0x44, 0x4a, 0xb6, 0x04, 0x17, 0x35, 0xcc, 0xad, 0x5c, 0x0c, 0xab, 0x1a, 0xdf, 0x8e, 0xd0, 0x92, 0x18, 0xe5, 0x17, 0xd6, 0xf9, 0xef, 0xc5, 0xe9, 0x4d, 0xd8, 0x5f, 0xb1, 0x7f, 0xbd, 0x4c, 0x38, 0xae, 0xa2, 0x02, 0x52, 0x4c, 0x3a, 0xaf, 0x6c, 0x32, 0x7f, 0x71, 0xdd, 0xa8, 0xb4, 0x2d, 0x27, 0x95, 0x44, 0x71, 0xc7, 0x8d, 0xa1, 0x8e, 0x1e, 0x1c, 0xc2, 0x5d, 0xe0, 0x41, 0xb8, 0x4b, 0xf0, 0x56, 0x87, 0xd9, 0xa2, 0x61, 0xb2, 0x3b, 0xcb, 0x9f, 0xa2, 0xe8, 0xb8, 0x72, 0xf7, 0xe1, 0x95, 0xac, 0x2f, 0x68, 0xfa, 0x61, 0x25, 0xce, 0x06, 0x4f, 0xc6, 0x96, 0x66, 0xb7, 0x45, 0x66, 0x77, 0x68, 0xdd, 0xeb, 0xf0, 0xf6, 0x86, 0x7c, 0x1a, 0xf6, 0xd3, 0xa2, 0xac, 0x08, 0x93, 0xa5, 0x0b, 0xcc, 0xfa, 0xb6, 0x38, 0x9f, 0x75, 0x27, 0x87, 0xf8, 0xd9, 0x0c, 0xb1, 0xb6, 0x65, 0x5d, 0xb5, 0xde, 0xcd, 0x5b, 0xe2, 0x9f, 0x94, 0xb0, 0x4b, 0xc0, 0x64, 0x65, 0x81, 0x8f, 0x08, 0x26, 0x72, 0xb9, 0x7b, 0xb9, 0xff, 0x46, 0xd7, 0x9e, 0x76, 0xe5, 0x6b, 0x7f, 0x01, 0x77, 0xbc, 0x72, 0xe4, 0x8b, 0x47, 0x3a, 0x65, 0x97, 0xea, 0x14, 0xfe, 0x61, 0x61, 0x83, 0x23, 0xb9, 0x03, 0x0e, 0x9f, 0x3a, 0xf5, 0xc0, 0x86, 0xfe, 0xeb, 0x6b, 0x8e, 0xf5, 0xda, 0x94, 0x5c, 0x32, 0xe8, 0x8f, 0x5c, 0x76, 0x4d, 0x82, 0x03, 0xf7, 0x41, 0x17, 0x04, 0x0f, 0x0e, 0x67, 0xd9, 0x55, 0x0e, 0xe6, 0x10, 0x6c, 0x44, 0xc8, 0x75, 0x85, 0x02, 0xb7, 0x17, 0x5f, 0xc8, 0x60, 0xc3, 0x35, 0x06, 0xc7, 0x2c, 0x76, 0xf0, 0x6b, 0x63, 0x2a, 0x04, 0xba, 0x4b, 0x11, 0xb9, 0xf6, 0x4f, 0x63, 0x19, 0x06, 0x54, 0xc1, 0xbb, 0x92, 0xc7, 0x88, 0x64, 0x15, 0x34, 0xea, 0x83, 0xec, 0x0a, 0x93, 0xa5, 0xb9, 0x75, 0x8f, 0xa1, 0xf1, 0x87, 0x51, 0xdc, 0xb3, 0x69, 0x39, 0x09, 0xaa, 0x43, 0x23, 0xd5, 0x59, 0xfe, 0x48, 0x42, 0xe3, 0x73, 0xd3, 0x15, 0xfc, 0x9c, 0x06, 0xae, 0xd1, 0xf2, 0x0d, 0x76, 0xa3, 0x30, 0x7d, 0xd7, 0xa4, 0xce, 0x2f, 0xa1, 0x7e, 0x2c, 0xe3, 0x01, 0xcf, 0x01, 0xbb, 0x77, 0xc0, 0x05, 0x4f, 0xfe, 0xa8, 0xd6, 0x5b, 0xfb, 0xa5, 0xf3, 0xe8, 0x0d, 0xe6, 0x92, 0x8a, 0x0a, 0xd1, 0xbb, 0xa6, 0x6f, 0x71, 0xc4, 0x54, 0xa6, 0x57, 0x2b, 0x2a, 0x56, 0x51, 0x71, 0x61, 0x61, 0xe1, 0xb1, 0xc7, 0x36, 0x59, 0x7f, 0x64, 0x2f, 0xf9, 0xfc, 0x8a, 0xaa, 0x2e, 0xeb, 0x5a, 0xef, 0x92, 0xeb, 0x89, 0xfa, 0xe1, 0x29, 0xe8, 0x82, 0x57, 0x3d, 0x9e, 0x9e, 0x2c, 0xbf, 0x06, 0x69, 0xde, 0x34, 0x06, 0x8f, 0x11, 0x16, 0x43, 0x2a, 0xed, 0x9d, 0x8c, 0xc0, 0x5c, 0x24, 0x83, 0x93, 0x91, 0x34, 0xfe, 0x1f, 0x52, 0x7a, 0x90, 0xff, 0xe2, 0x2f, 0xa3, 0xd6, 0x43, 0x21, 0xdc, 0x16, 0x59, 0x2e, 0x06, 0x37, 0xf2, 0x34, 0xaf, 0x0b, 0x2a, 0x69, 0x4c, 0xbc, 0x53, 0x87, 0xf8, 0x8d, 0x21, 0xb3, 0x04, 0x6e, 0x49, 0xb3, 0x72, 0xae, 0xd2, 0xe5, 0x16, 0xd6, 0xbb, 0x78, 0x5e, 0x4e, 0x82, 0xa6, 0x40, 0x27, 0x3d, 0xa5, 0xf1, 0x77, 0xd2, 0xef, 0xe5, 0xfa, 0x60, 0x0a, 0xab, 0x0e, 0xbb, 0xf9, 0x32, 0x48, 0x43, 0x99, 0xf9, 0x1e, 0xf6, 0x1d, 0x82, 0x42, 0x50, 0xe4, 0x67, 0x66, 0xf1, 0x7c, 0xc5, 0xf4, 0xb1, 0x6b, 0x9f, 0xfb, 0xee, 0xdb, 0xdf, 0x3f, 0xd6, 0xf8, 0x68, 0x45, 0x39, 0xab, 0xb7, 0x38, 0x2e, 0x97, 0xad, 0x2e, 0xb4, 0xb6, 0x46, 0x5f, 0x7f, 0xed, 0xcc, 0xc6, 0xfe, 0x73, 0x74, 0x87, 0xee, 0x0f, 0xa8, 0xe0, 0xc0, 0x44, 0xdc, 0xb9, 0xe4, 0x1a, 0xea, 0x18, 0xae, 0x9e, 0xda, 0x07, 0x83, 0xb8, 0x47, 0xe3, 0x8f, 0x8d, 0x13, 0x5e, 0xc6, 0x55, 0xb1, 0x2e, 0xb3, 0x43, 0xd7, 0xf4, 0x14, 0x43, 0x95, 0x8b, 0xec, 0x16, 0xdc, 0x5c, 0xe7, 0xf5, 0x76, 0x4d, 0x8a, 0x79, 0x98, 0xaa, 0xe1, 0xc0, 0x8f, 0x15, 0xf9, 0x38, 0x6a, 0xf6, 0x76, 0xf1, 0x9a, 0xde, 0xae, 0x29, 0x93, 0x6d, 0xf2, 0x1f, 0xc6, 0x2e, 0x79, 0xcb, 0xf0, 0xb8, 0xb9, 0x75, 0x15, 0xec, 0x5d, 0x83, 0xd8, 0xcb, 0xfd, 0xd7, 0x57, 0x8d, 0x36, 0x06, 0x45, 0x4b, 0x82, 0xd5, 0x60, 0xe1, 0xa5, 0xa9, 0x29, 0xd8, 0x61, 0x1d, 0xf9, 0x06, 0x1f, 0x42, 0x41, 0x75, 0x87, 0xf8, 0x65, 0x59, 0x46, 0xf3, 0xf2, 0xbf, 0x3e, 0x32, 0x79, 0xab, 0x73, 0xe1, 0x9d, 0xda, 0xf3, 0x4f, 0x5e, 0x59, 0x7d, 0xe5, 0xe5, 0x97, 0x1b, 0x3f, 0xbf, 0xba, 0xba, 0x5a, 0xc1, 0x7a, 0x16, 0xee, 0xf9, 0xb6, 0xb0, 0x6a, 0x61, 0xa1, 0x62, 0x81, 0xc9, 0x17, 0x40, 0xbc, 0x80, 0xb4, 0x46, 0x67, 0x27, 0x0e, 0x7c, 0x65, 0x63, 0xf7, 0xc1, 0xa2, 0xa9, 0xc1, 0x48, 0xe4, 0xec, 0x76, 0x55, 0x29, 0xc4, 0x9d, 0xcd, 0xb1, 0xa1, 0xdd, 0xf5, 0x3d, 0xd0, 0xa0, 0x7f, 0xf6, 0xc4, 0x79, 0xab, 0x03, 0x1e, 0x06, 0x8c, 0x5f, 0xfe, 0xb8, 0x08, 0x46, 0xb7, 0x67, 0x5b, 0xd1, 0x17, 0x67, 0x03, 0xb1, 0x4f, 0xf5, 0x1b, 0x72, 0x72, 0xab, 0x2a, 0x93, 0x50, 0xd4, 0xe5, 0x12, 0xfe, 0xf3, 0x79, 0x24, 0x3d, 0x36, 0xdd, 0x7e, 0x34, 0x95, 0x4a, 0xe1, 0x26, 0xe7, 0x79, 0x01, 0x4f, 0x19, 0x57, 0xc5, 0xc3, 0xd0, 0xa3, 0x53, 0x2c, 0xdb, 0x35, 0x5a, 0xe4, 0xff, 0x92, 0x76, 0x01, 0x73, 0x83, 0x61, 0xf1, 0x01, 0x40, 0x26, 0x95, 0xc1, 0x98, 0xc9, 0xd8, 0x50, 0x0c, 0xc3, 0xba, 0x24, 0x36, 0x27, 0x90, 0xea, 0x65, 0xcf, 0x1a, 0x7e, 0xb3, 0xd0, 0x79, 0xed, 0xc9, 0x57, 0xbc, 0xe7, 0x17, 0x8e, 0xed, 0x39, 0xfc, 0xe8, 0x8b, 0x87, 0x17, 0xee, 0x04, 0xe8, 0x7b, 0xd1, 0x9d, 0x13, 0xcf, 0x3c, 0xf3, 0xe0, 0x66, 0xfe, 0x33, 0x5c, 0x4e, 0xff, 0xb2, 0x6a, 0xb7, 0xe7, 0x94, 0x82, 0xd6, 0x7b, 0xf9, 0x27, 0xdd, 0x43, 0x17, 0xea, 0xa1, 0x0f, 0x0e, 0x8b, 0x77, 0x3d, 0x59, 0x77, 0xb0, 0x0e, 0xa3, 0xe8, 0x7f, 0x99, 0xea, 0x83, 0x90, 0xc3, 0x87, 0x98, 0x4a, 0xd9, 0xa7, 0x12, 0x29, 0x55, 0x77, 0xc2, 0xb8, 0xaf, 0xab, 0xfb, 0xd8, 0xe0, 0xd3, 0x8a, 0x48, 0xdd, 0x41, 0x11, 0x86, 0x35, 0xdb, 0xba, 0x0f, 0x40, 0x1c, 0x75, 0x75, 0xac, 0x4c, 0x24, 0x83, 0xd2, 0xfa, 0xf0, 0x55, 0x26, 0x19, 0x29, 0xf2, 0x49, 0xe4, 0x36, 0xcc, 0x0c, 0x86, 0xf3, 0x6b, 0xea, 0x2b, 0x83, 0x50, 0x47, 0x0e, 0x90, 0xb2, 0x56, 0xe4, 0x94, 0xfa, 0x3a, 0x8e, 0x3c, 0xfb, 0xe0, 0x73, 0xad, 0xef, 0xed, 0x69, 0x3d, 0xd5, 0x78, 0xfe, 0x4a, 0xe3, 0x7b, 0xad, 0x87, 0x1b, 0xaf, 0xb4, 0x72, 0x06, 0x2c, 0x29, 0xee, 0xc4, 0x51, 0x39, 0x03, 0xd1, 0xd9, 0x9d, 0x13, 0x1f, 0x8c, 0x7f, 0xf4, 0x78, 0xfb, 0x66, 0xfe, 0xb3, 0x5f, 0x6a, 0x36, 0x82, 0xd0, 0x01, 0xa1, 0x07, 0x26, 0xc2, 0x11, 0xdf, 0x92, 0x6b, 0xdb, 0xe4, 0xee, 0xfa, 0xfa, 0xfa, 0x70, 0x58, 0xc5, 0x37, 0xd6, 0x21, 0x86, 0x9e, 0x11, 0x4f, 0xb2, 0xf7, 0xb9, 0x03, 0x33, 0x3d, 0x06, 0x38, 0x50, 0x57, 0xcd, 0xee, 0x94, 0xb1, 0x27, 0x0b, 0x5a, 0x35, 0xd3, 0xeb, 0xce, 0x26, 0xec, 0xa0, 0x96, 0x32, 0xcc, 0x41, 0x3b, 0x9c, 0xf7, 0xab, 0x99, 0x12, 0xcc, 0x8a, 0xa3, 0x7a, 0xaa, 0x8c, 0x62, 0x96, 0x5b, 0xff, 0x17, 0xcf, 0x8e, 0x18, 0x0a, 0xcb, 0xc6, 0x58, 0x2e, 0xcf, 0x5b, 0x52, 0x22, 0xd9, 0x83, 0x6d, 0xd0, 0x84, 0xd9, 0x94, 0x48, 0x52, 0x29, 0x55, 0xd1, 0x9d, 0x53, 0x6b, 0xf8, 0xf9, 0x40, 0xf4, 0x9d, 0xef, 0x44, 0x4f, 0x79, 0xef, 0xde, 0x93, 0x3e, 0x76, 0x38, 0x5a, 0xfb, 0xa7, 0x81, 0x81, 0xe8, 0x40, 0x14, 0x18, 0x60, 0x1b, 0x0b, 0x48, 0x94, 0x6d, 0x66, 0xca, 0x75, 0xc0, 0x7b, 0xc7, 0xc7, 0x7f, 0x3d, 0x33, 0xf3, 0xc9, 0x66, 0xee, 0xeb, 0xeb, 0x8b, 0xdf, 0x38, 0x0d, 0x77, 0x10, 0x7b, 0xca, 0x9e, 0x5b, 0x0e, 0xfa, 0xb3, 0xce, 0x50, 0x43, 0xac, 0x1b, 0xe6, 0x11, 0x79, 0x4d, 0x5f, 0xc4, 0x95, 0x41, 0xfd, 0x70, 0x0f, 0x86, 0xa2, 0x51, 0xc0, 0x95, 0x41, 0x72, 0x88, 0x65, 0x77, 0x1b, 0x95, 0xa3, 0x86, 0x3f, 0x00, 0x95, 0xb0, 0xed, 0x6a, 0x52, 0xf7, 0xe7, 0x77, 0xb3, 0x82, 0xe1, 0x6e, 0x2d, 0xaf, 0xe3, 0xe7, 0x14, 0x1a, 0xcb, 0x61, 0xbc, 0x10, 0xd6, 0xf4, 0x00, 0x47, 0x11, 0x5b, 0x40, 0xe9, 0x60, 0x85, 0x32, 0x19, 0xae, 0x74, 0x27, 0x98, 0xf5, 0x6e, 0x2e, 0xf2, 0x15, 0x6d, 0x4c, 0xab, 0x99, 0x15, 0x6b, 0xf1, 0xc4, 0x62, 0xc0, 0x82, 0x12, 0xe8, 0x00, 0x69, 0x7d, 0x38, 0xab, 0x2b, 0x8b, 0x01, 0xfe, 0x89, 0xca, 0x22, 0x46, 0xdd, 0xe6, 0x8f, 0x77, 0xf0, 0xd6, 0x4a, 0xcb, 0x3d, 0xc3, 0xf7, 0x46, 0x67, 0xef, 0xf9, 0x73, 0xf4, 0xb1, 0xc6, 0x7b, 0x7e, 0xf0, 0x8f, 0xda, 0x6f, 0xce, 0xd6, 0x3e, 0x35, 0x3b, 0x1b, 0xdd, 0x39, 0x1b, 0xc5, 0x84, 0xa5, 0x6c, 0xdb, 0x69, 0xee, 0x67, 0x4b, 0xdb, 0xce, 0x89, 0x89, 0x13, 0x1f, 0x8c, 0x3f, 0x32, 0xb3, 0xeb, 0xc3, 0x4f, 0x36, 0xed, 0x7e, 0xd0, 0xeb, 0x1b, 0x9a, 0x0d, 0x3d, 0x69, 0x07, 0x07, 0xaa, 0xc9, 0xc5, 0x42, 0xd8, 0xed, 0xdc, 0xb1, 0xd4, 0xe0, 0xfa, 0x38, 0x9f, 0x65, 0x5f, 0x96, 0xd5, 0xe5, 0x94, 0xac, 0x98, 0xcd, 0x42, 0xd6, 0xd6, 0xc0, 0xb3, 0x0f, 0xbb, 0xfd, 0x91, 0xac, 0xe6, 0xd7, 0x71, 0x3d, 0x15, 0x2c, 0xf8, 0x6d, 0x59, 0xa1, 0x36, 0x19, 0x0b, 0x1b, 0xf1, 0xac, 0x06, 0x75, 0xcd, 0xd9, 0xda, 0x90, 0xdf, 0x9d, 0x15, 0xdf, 0xe0, 0xcd, 0x62, 0xc4, 0xcc, 0xa1, 0xee, 0xc9, 0x32, 0xdc, 0xe5, 0xd6, 0x9d, 0x06, 0xcb, 0xe6, 0x7d, 0x2c, 0x97, 0x2f, 0xf2, 0x4a, 0x26, 0x36, 0x7c, 0x92, 0xfd, 0x84, 0x10, 0x67, 0x35, 0xb1, 0x8b, 0x67, 0x4b, 0xad, 0x30, 0xf9, 0xe3, 0xc4, 0x89, 0x13, 0xf7, 0x36, 0x1e, 0x78, 0xe8, 0xda, 0x53, 0x07, 0x1e, 0x7a, 0xeb, 0x9e, 0x37, 0xae, 0xbd, 0x36, 0x31, 0x71, 0x1c, 0xe2, 0x89, 0x13, 0x13, 0x98, 0x4e, 0x40, 0x80, 0x23, 0xcc, 0xc0, 0xfe, 0x38, 0x66, 0x27, 0x58, 0x72, 0xe2, 0x38, 0x30, 0xfe, 0xc3, 0x47, 0x66, 0x3e, 0xbc, 0x7e, 0xfd, 0x57, 0xcf, 0x6e, 0xee, 0x3e, 0x58, 0xf4, 0xb8, 0x7c, 0xda, 0x7d, 0x0e, 0x7b, 0x2a, 0x05, 0x17, 0xc1, 0x64, 0xb0, 0xa0, 0x19, 0x11, 0x58, 0x2b, 0xe8, 0x40, 0x42, 0x4f, 0x28, 0x0d, 0x31, 0x57, 0xcc, 0xe5, 0x72, 0x69, 0x98, 0x49, 0xe8, 0x01, 0x9f, 0x0b, 0xf3, 0xb1, 0x58, 0xcc, 0x59, 0x38, 0xa4, 0x85, 0xc3, 0x79, 0xbf, 0x3f, 0x9f, 0x4f, 0xe8, 0x4e, 0xa6, 0x84, 0xf1, 0xd5, 0xc5, 0x84, 0xce, 0x02, 0xd4, 0xe1, 0xb2, 0x1b, 0xac, 0x66, 0x81, 0xc5, 0x04, 0x33, 0x92, 0x00, 0xab, 0x52, 0x9f, 0x19, 0x77, 0x69, 0x4c, 0x45, 0x0f, 0x8e, 0xb0, 0x6c, 0x6c, 0x14, 0xaa, 0x80, 0xb6, 0xee, 0x66, 0xc5, 0x36, 0x66, 0xa0, 0x84, 0x1e, 0x78, 0x15, 0xeb, 0x09, 0x53, 0x52, 0xa8, 0x27, 0xfc, 0x3e, 0xb3, 0x15, 0xfc, 0xdc, 0xc0, 0x33, 0xe3, 0xe3, 0xdb, 0xb7, 0xdf, 0xfd, 0xd7, 0x03, 0xb5, 0x07, 0xf7, 0xd4, 0x3e, 0xfd, 0xfc, 0x8f, 0x7f, 0x3f, 0xfe, 0x9f, 0xf1, 0xed, 0x2c, 0xa0, 0x7c, 0x9c, 0xb3, 0x7d, 0x9c, 0xe7, 0x2d, 0x61, 0x1c, 0x7d, 0x07, 0xce, 0x6b, 0xaa, 0xaa, 0xea, 0x7f, 0xf3, 0xec, 0xed, 0xfc, 0xd7, 0x17, 0x69, 0x70, 0xe6, 0x83, 0x49, 0xfb, 0x4d, 0xb8, 0x43, 0xaa, 0x0e, 0x65, 0xb1, 0x90, 0x0f, 0x67, 0x23, 0x95, 0x91, 0xd1, 0xd3, 0xbd, 0x4e, 0xe7, 0x88, 0x58, 0x33, 0xf9, 0xd8, 0x3a, 0x75, 0xc4, 0x29, 0x72, 0xb8, 0xec, 0x32, 0x0a, 0x4a, 0x00, 0x07, 0x8e, 0xee, 0xf6, 0xed, 0xf0, 0xf9, 0xca, 0xf4, 0x2c, 0xf5, 0x4c, 0x09, 0x70, 0x1a, 0x2d, 0x58, 0xcd, 0xf8, 0x42, 0xa6, 0x0e, 0x2b, 0x30, 0xab, 0x8c, 0x58, 0xb4, 0x30, 0x33, 0xc2, 0x2a, 0x4b, 0x03, 0x25, 0x1d, 0x96, 0x9c, 0x2e, 0x09, 0xd7, 0xf0, 0xc0, 0x49, 0xe4, 0xf9, 0xda, 0x6f, 0x7c, 0x34, 0x33, 0x33, 0xf3, 0xf8, 0xb7, 0x0e, 0x3e, 0x3e, 0x73, 0x52, 0x30, 0x73, 0x72, 0x17, 0x26, 0x10, 0x67, 0xc4, 0x9e, 0xb1, 0x0b, 0xf7, 0xbb, 0x4e, 0xee, 0xda, 0xf5, 0xe1, 0x9b, 0xe0, 0xbc, 0xb1, 0xb6, 0xb1, 0xb1, 0x73, 0xb7, 0x19, 0xbd, 0x48, 0xc6, 0xb7, 0x14, 0xf1, 0x2f, 0x3a, 0x60, 0x08, 0x43, 0x1f, 0x54, 0x61, 0x14, 0x07, 0x0b, 0x05, 0x7f, 0x3e, 0xac, 0x69, 0xf1, 0xfb, 0xb5, 0x78, 0x3c, 0x1b, 0x8f, 0xe3, 0x18, 0x89, 0x23, 0xb0, 0x63, 0x41, 0x63, 0x49, 0x56, 0x03, 0x25, 0x5e, 0xc8, 0x02, 0x26, 0x71, 0xbe, 0x69, 0x59, 0x8c, 0x9c, 0x38, 0xaf, 0xcb, 0x6a, 0x6b, 0x62, 0xcf, 0x07, 0x39, 0xd3, 0x81, 0x2d, 0xce, 0xe5, 0x68, 0xa2, 0x94, 0xc3, 0x53, 0xb3, 0xb3, 0xf0, 0x63, 0x8d, 0xcb, 0x85, 0xd4, 0x34, 0x19, 0xe7, 0xdf, 0xf3, 0x67, 0x8a, 0xbc, 0x09, 0xdc, 0x24, 0x6e, 0x5f, 0x68, 0x42, 0xaa, 0x9a, 0x7e, 0xba, 0xef, 0xe9, 0xdf, 0x7e, 0xf9, 0x67, 0xfb, 0xfe, 0xf6, 0x00, 0x1c, 0x5b, 0xc0, 0x4c, 0x3f, 0xa4, 0x18, 0x44, 0x14, 0x0a, 0x55, 0x55, 0x63, 0x63, 0x6d, 0x2d, 0x40, 0x5b, 0xff, 0xfc, 0xed, 0xdd, 0x87, 0x33, 0xe1, 0xa5, 0x08, 0xf4, 0xc0, 0x17, 0xe0, 0x7e, 0x80, 0xa3, 0x18, 0x5c, 0xb8, 0xac, 0x2c, 0x2e, 0xe2, 0x97, 0x4e, 0x74, 0x19, 0xe4, 0xce, 0x14, 0x94, 0x95, 0x95, 0x44, 0x96, 0x02, 0xab, 0x8a, 0x69, 0x44, 0x96, 0x94, 0xb2, 0x42, 0x8d, 0x7f, 0x69, 0x46, 0x2f, 0xb3, 0x2c, 0x4f, 0x1c, 0x2c, 0x55, 0xb4, 0x9c, 0xbb, 0xd4, 0x9e, 0xf2, 0x56, 0xc9, 0xfd, 0x7d, 0x2d, 0x7b, 0x31, 0xec, 0x6d, 0x69, 0x79, 0xf6, 0x17, 0x0f, 0xef, 0xfe, 0xf8, 0xf4, 0xb7, 0x5b, 0x78, 0x76, 0x2f, 0xd0, 0x32, 0x0f, 0xc7, 0x2c, 0xd3, 0x22, 0xa5, 0x98, 0xe5, 0x85, 0x50, 0x3a, 0x3f, 0x7f, 0x06, 0xb6, 0xb6, 0xaa, 0xb6, 0xad, 0xb8, 0x0f, 0xff, 0x36, 0x65, 0xa9, 0x37, 0xac, 0x2f, 0xab, 0x2f, 0xe0, 0x34, 0xf7, 0x28, 0x73, 0xa1, 0x9a, 0x53, 0x73, 0x39, 0x47, 0x4e, 0xa2, 0xb2, 0xd4, 0xa1, 0x62, 0x44, 0xb1, 0x03, 0x82, 0xa9, 0xa0, 0x8a, 0xd4, 0x54, 0xe7, 0x4a, 0x0e, 0x2e, 0xc2, 0x52, 0x5e, 0x43, 0x15, 0xaa, 0x2a, 0x2b, 0x74, 0x70, 0x3d, 0x59, 0x0b, 0x04, 0x0e, 0x07, 0x2f, 0xc9, 0x99, 0x12, 0xeb, 0x69, 0x1d, 0xaa, 0x45, 0xb5, 0x74, 0x6a, 0x69, 0xd7, 0x61, 0x36, 0x43, 0x18, 0x7d, 0xbf, 0xa6, 0x7d, 0xae, 0x66, 0xae, 0xa6, 0xbd, 0x46, 0x32, 0x27, 0xe2, 0xfb, 0xb8, 0x13, 0x39, 0xa4, 0x9d, 0xeb, 0xb4, 0xf3, 0x80, 0xc7, 0xed, 0xed, 0xed, 0x67, 0xce, 0xcc, 0xb7, 0x8d, 0xb5, 0x6c, 0xf5, 0x2b, 0xd0, 0x19, 0xb8, 0x06, 0x1a, 0xfe, 0x20, 0x4e, 0xa4, 0x99, 0x07, 0x53, 0x70, 0x3b, 0xde, 0x9c, 0xdb, 0x95, 0xdf, 0x46, 0xef, 0x16, 0xe2, 0xad, 0x5a, 0xdc, 0x22, 0x67, 0xcf, 0xde, 0x84, 0xc0, 0x92, 0xa3, 0x17, 0x59, 0x6a, 0x4a, 0xf8, 0xc1, 0xcd, 0x8b, 0x67, 0x21, 0xf0, 0x1c, 0x66, 0x70, 0x8f, 0xe9, 0xc5, 0x8b, 0x73, 0x73, 0x73, 0x35, 0x67, 0xf6, 0xb6, 0x55, 0xbd, 0xbb, 0x45, 0xef, 0x21, 0xf9, 0x86, 0x50, 0x24, 0x6c, 0x0b, 0x2e, 0x3b, 0xd4, 0xff, 0xcb, 0xab, 0xf9, 0xac, 0xb9, 0x69, 0x3a, 0x8f, 0x7b, 0x4c, 0x64, 0xce, 0x96, 0x49, 0x85, 0xd2, 0xcd, 0xb3, 0xd6, 0x80, 0xee, 0x43, 0xef, 0x8d, 0x35, 0xbd, 0x70, 0x7b, 0xaf, 0x59, 0xc7, 0x70, 0x64, 0xc9, 0x17, 0x81, 0x39, 0x5d, 0x50, 0x59, 0x4e, 0xb2, 0x41, 0xe1, 0x60, 0x83, 0x18, 0x70, 0xb0, 0xc1, 0x2c, 0xf2, 0xa5, 0xc0, 0x4a, 0x73, 0xa6, 0xd0, 0xc4, 0xac, 0x67, 0x26, 0x0e, 0xa1, 0x9a, 0x53, 0x73, 0x52, 0xd7, 0x61, 0x4a, 0xf0, 0x00, 0xab, 0x38, 0x2c, 0xd6, 0xb8, 0x80, 0x1f, 0x9a, 0x96, 0x73, 0x66, 0x2b, 0x54, 0x8b, 0x75, 0xa6, 0x21, 0x54, 0x1d, 0x66, 0x4b, 0xde, 0xc7, 0x2e, 0xc4, 0x37, 0x33, 0xe2, 0x60, 0x9e, 0x9b, 0x2b, 0x15, 0x48, 0x6a, 0x4a, 0x7b, 0x8c, 0xed, 0x35, 0x30, 0x72, 0xdb, 0xaa, 0x9a, 0xe6, 0xef, 0xf8, 0x87, 0x5c, 0xc9, 0xde, 0xa5, 0x1d, 0xce, 0xa2, 0x96, 0xb7, 0x15, 0x12, 0x41, 0xcb, 0x97, 0x20, 0xb7, 0x8c, 0xbe, 0xbe, 0xd2, 0x46, 0x46, 0xee, 0xd0, 0xf8, 0x1d, 0xa9, 0xeb, 0xf7, 0xbd, 0x6b, 0xde, 0x27, 0x24, 0x2d, 0xb7, 0x08, 0x82, 0x79, 0x79, 0x34, 0xcf, 0xf4, 0xda, 0xda, 0xda, 0xc6, 0xaa, 0x9a, 0xae, 0xb7, 0x7c, 0xf5, 0x4e, 0xbd, 0xc7, 0xfa, 0x60, 0x21, 0x12, 0x0a, 0x8d, 0x38, 0x2b, 0x8b, 0x45, 0xb7, 0x81, 0x13, 0x01, 0x43, 0xce, 0x06, 0x3e, 0x2d, 0x8c, 0x72, 0x63, 0x9f, 0xa6, 0x69, 0xc1, 0xbf, 0xff, 0xd0, 0xdf, 0xcf, 0x26, 0x21, 0xfd, 0xa5, 0xd8, 0x6f, 0x39, 0x6a, 0x12, 0xbb, 0xd2, 0x6c, 0x45, 0x6a, 0x0b, 0xae, 0x7f, 0xfd, 0xfa, 0x58, 0xfb, 0xff, 0xfe, 0x23, 0xc2, 0xd4, 0x62, 0xc2, 0xaf, 0xb9, 0xc1, 0x83, 0x88, 0xbb, 0x28, 0x30, 0x64, 0x14, 0x9b, 0x9b, 0x97, 0xb9, 0x65, 0x21, 0x1e, 0x72, 0x05, 0x77, 0x49, 0xc0, 0x0f, 0x4c, 0x1b, 0x5c, 0x2c, 0x8d, 0x18, 0x56, 0xa9, 0xd0, 0x75, 0x5b, 0xaa, 0x95, 0xb4, 0xdc, 0xb2, 0xcc, 0xd2, 0x02, 0xf7, 0x9a, 0x16, 0x71, 0xfd, 0xef, 0xfd, 0xf2, 0xc1, 0x73, 0x4d, 0xe7, 0x9a, 0xca, 0x58, 0x93, 0x5d, 0xc7, 0x39, 0x8c, 0xe7, 0xb8, 0xde, 0xb9, 0xa6, 0xfe, 0xb6, 0x96, 0x33, 0x73, 0x9f, 0xc5, 0xdf, 0x19, 0x13, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x04, 0x41, 0x10, 0x5b, 0xe6, 0xbf, 0x9f, 0xe5, 0x54, 0xe2, 0x36, 0x30, 0x01, 0x00,
gpl-2.0
Xxskyl3rxX/linux
arch/cris/arch-v32/kernel/process.c
399
4660
/* * Copyright (C) 2000-2003 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * Mikael Starvik (starvik@axis.com) * Tobias Anderberg (tobiasa@axis.com), CRISv32 port. * * This file handles the architecture-dependent parts of process handling.. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/fs.h> #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/timer_defs.h> #include <hwregs/intr_vect_defs.h> #include <linux/ptrace.h> extern void stop_watchdog(void); /* We use this if we don't have any better idle routine. */ void default_idle(void) { local_irq_enable(); /* Halt until exception. */ __asm__ volatile("halt"); } /* * Free current thread data structures etc.. */ extern void deconfigure_bp(long pid); void exit_thread(void) { deconfigure_bp(current->pid); } /* * If the watchdog is enabled, disable interrupts and enter an infinite loop. * The watchdog will reset the CPU after 0.1s. If the watchdog isn't enabled * then enable it and wait. */ extern void arch_enable_nmi(void); void hard_reset_now(void) { /* * Don't declare this variable elsewhere. We don't want any other * code to know about it than the watchdog handler in entry.S and * this code, implementing hard reset through the watchdog. */ #if defined(CONFIG_ETRAX_WATCHDOG) extern int cause_of_death; #endif printk("*** HARD RESET ***\n"); local_irq_disable(); #if defined(CONFIG_ETRAX_WATCHDOG) cause_of_death = 0xbedead; #else { reg_timer_rw_wd_ctrl wd_ctrl = {0}; stop_watchdog(); wd_ctrl.key = 16; /* Arbitrary key. */ wd_ctrl.cnt = 1; /* Minimum time. */ wd_ctrl.cmd = regk_timer_start; arch_enable_nmi(); REG_WR(timer, regi_timer0, rw_wd_ctrl, wd_ctrl); } #endif while (1) ; /* Wait for reset. */ } /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *t) { return task_pt_regs(t)->erp; } /* * Setup the child's kernel stack with a pt_regs and call switch_stack() on it. * It will be unnested during _resume and _ret_from_sys_call when the new thread * is scheduled. * * Also setup the thread switching structure which is used to keep * thread-specific data during _resumes. */ extern asmlinkage void ret_from_fork(void); extern asmlinkage void ret_from_kernel_thread(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); struct switch_stack *swstack = ((struct switch_stack *) childregs) - 1; /* * Put the pt_regs structure at the end of the new kernel stack page and * fix it up. Note: the task_struct doubles as the kernel stack for the * task. */ if (unlikely(p->flags & PF_KTHREAD)) { memset(swstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); swstack->r1 = usp; swstack->r2 = arg; childregs->ccs = 1 << (I_CCS_BITNR + CCS_SHIFT); swstack->return_ip = (unsigned long) ret_from_kernel_thread; p->thread.ksp = (unsigned long) swstack; p->thread.usp = 0; return 0; } *childregs = *current_pt_regs(); /* Struct copy of pt_regs. */ childregs->r10 = 0; /* Child returns 0 after a fork/clone. */ /* Set a new TLS ? * The TLS is in $mof because it is the 5th argument to sys_clone. */ if (p->mm && (clone_flags & CLONE_SETTLS)) { task_thread_info(p)->tls = childregs->mof; } /* Put the switch stack right below the pt_regs. */ /* Parameter to ret_from_sys_call. 0 is don't restart the syscall. */ swstack->r9 = 0; /* * We want to return into ret_from_sys_call after the _resume. * ret_from_fork will call ret_from_sys_call. */ swstack->return_ip = (unsigned long) ret_from_fork; /* Fix the user-mode and kernel-mode stackpointer. */ p->thread.usp = usp ?: rdusp(); p->thread.ksp = (unsigned long) swstack; return 0; } unsigned long get_wchan(struct task_struct *p) { /* TODO */ return 0; } #undef last_sched #undef first_sched void show_regs(struct pt_regs * regs) { unsigned long usp = rdusp(); show_regs_print_info(KERN_DEFAULT); printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n", regs->erp, regs->srp, regs->ccs, usp, regs->mof); printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", regs->r0, regs->r1, regs->r2, regs->r3); printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n", regs->r4, regs->r5, regs->r6, regs->r7); printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n", regs->r8, regs->r9, regs->r10, regs->r11); printk("r12: %08lx r13: %08lx oR10: %08lx\n", regs->r12, regs->r13, regs->orig_r10); }
gpl-2.0
cphelps76/DEMENTED_kernel_grouper
arch/openrisc/mm/tlb.c
655
5080
/* * OpenRISC tlb.c * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/init.h> #include <asm/system.h> #include <asm/segment.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/spr_defs.h> #define NO_CONTEXT -1 #define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ SPR_DMMUCFGR_NTS_OFF)) #define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ SPR_IMMUCFGR_NTS_OFF)) #define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1)) #define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1)) /* * Invalidate all TLB entries. * * This comes down to setting the 'valid' bit for all xTLBMR registers to 0. * Easiest way to accomplish this is to just zero out the xTLBMR register * completely. * */ void flush_tlb_all(void) { int i; unsigned long num_tlb_sets; /* Determine number of sets for IMMU. */ /* FIXME: Assumption is I & D nsets equal. */ num_tlb_sets = NUM_ITLB_SETS; for (i = 0; i < num_tlb_sets; i++) { mtspr_off(SPR_DTLBMR_BASE(0), i, 0); mtspr_off(SPR_ITLBMR_BASE(0), i, 0); } } #define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI) #define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI) /* * Invalidate a single page. This is what the xTLBEIR register is for. * * There's no point in checking the vma for PAGE_EXEC to determine whether it's * the data or instruction TLB that should be flushed... that would take more * than the few instructions that the following compiles down to! * * The case where we don't have the xTLBEIR register really only works for * MMU's with a single way and is hard-coded that way. */ #define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr) #define flush_dtlb_page_no_eir(addr) \ mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0); #define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr) #define flush_itlb_page_no_eir(addr) \ mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (have_dtlbeir) flush_dtlb_page_eir(addr); else flush_dtlb_page_no_eir(addr); if (have_itlbeir) flush_itlb_page_eir(addr); else flush_itlb_page_no_eir(addr); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int addr; bool dtlbeir; bool itlbeir; dtlbeir = have_dtlbeir; itlbeir = have_itlbeir; for (addr = start; addr < end; addr += PAGE_SIZE) { if (dtlbeir) flush_dtlb_page_eir(addr); else flush_dtlb_page_no_eir(addr); if (itlbeir) flush_itlb_page_eir(addr); else flush_itlb_page_no_eir(addr); } } /* * Invalidate the selected mm context only. * * FIXME: Due to some bug here, we're flushing everything for now. * This should be changed to loop over over mm and call flush_tlb_range. */ void flush_tlb_mm(struct mm_struct *mm) { /* Was seeing bugs with the mm struct passed to us. Scrapped most of this function. */ /* Several architctures do this */ flush_tlb_all(); } /* called in schedule() just before actually doing the switch_to */ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *next_tsk) { /* remember the pgd for the fault handlers * this is similar to the pgd register in some other CPU's. * we need our own copy of it because current and active_mm * might be invalid at points where we still need to derefer * the pgd. */ current_pgd = next->pgd; /* We don't have context support implemented, so flush all * entries belonging to previous map */ if (prev != next) flush_tlb_mm(prev); } /* * Initialize the context related info for a new mm_struct * instance. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context = NO_CONTEXT; return 0; } /* called by __exit_mm to destroy the used MMU context if any before * destroying the mm itself. this is only called when the last user of the mm * drops it. */ void destroy_context(struct mm_struct *mm) { flush_tlb_mm(mm); } /* called once during VM initialization, from init.c */ void __init tlb_init(void) { /* Do nothing... */ /* invalidate the entire TLB */ /* flush_tlb_all(); */ }
gpl-2.0
adeepv/android-kernel-zte-v9a
crypto/sha512_generic.c
2703
9037
/* SHA-512 code by Jean-Luc Cooke <jlcooke@certainkey.com> * * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * */ #include <crypto/internal/hash.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/types.h> #include <crypto/sha.h> #include <linux/percpu.h> #include <asm/byteorder.h> static DEFINE_PER_CPU(u64[80], msg_schedule); static inline u64 Ch(u64 x, u64 y, u64 z) { return z ^ (x & (y ^ z)); } static inline u64 Maj(u64 x, u64 y, u64 z) { return (x & y) | (z & (x | y)); } static inline u64 RORu64(u64 x, u64 y) { return (x >> y) | (x << (64 - y)); } static const u64 sha512_K[80] = { 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, }; #define e0(x) (RORu64(x,28) ^ RORu64(x,34) ^ RORu64(x,39)) #define e1(x) (RORu64(x,14) ^ RORu64(x,18) ^ RORu64(x,41)) #define s0(x) (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7)) #define s1(x) (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6)) static inline void LOAD_OP(int I, u64 *W, const u8 *input) { W[I] = __be64_to_cpu( ((__be64*)(input))[I] ); } static inline void BLEND_OP(int I, u64 *W) { W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; } static void sha512_transform(u64 *state, const u8 *input) { u64 a, b, c, d, e, f, g, h, t1, t2; int i; u64 *W = get_cpu_var(msg_schedule); /* load the input */ for (i = 0; i < 16; i++) LOAD_OP(i, W, input); for (i = 16; i < 80; i++) { BLEND_OP(i, W); } /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; e=state[4]; f=state[5]; g=state[6]; h=state[7]; /* now iterate */ for (i=0; i<80; i+=8) { t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; /* erase our data */ a = b = c = d = e = f = g = h = t1 = t2 = 0; memset(W, 0, sizeof(__get_cpu_var(msg_schedule))); put_cpu_var(msg_schedule); } static int sha512_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA512_H0; sctx->state[1] = SHA512_H1; sctx->state[2] = SHA512_H2; sctx->state[3] = SHA512_H3; sctx->state[4] = SHA512_H4; sctx->state[5] = SHA512_H5; sctx->state[6] = SHA512_H6; sctx->state[7] = SHA512_H7; sctx->count[0] = sctx->count[1] = 0; return 0; } static int sha384_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA384_H0; sctx->state[1] = SHA384_H1; sctx->state[2] = SHA384_H2; sctx->state[3] = SHA384_H3; sctx->state[4] = SHA384_H4; sctx->state[5] = SHA384_H5; sctx->state[6] = SHA384_H6; sctx->state[7] = SHA384_H7; sctx->count[0] = sctx->count[1] = 0; return 0; } static int sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int i, index, part_len; /* Compute number of bytes mod 128 */ index = sctx->count[0] & 0x7f; /* Update number of bytes */ if (!(sctx->count[0] += len)) sctx->count[1]++; part_len = 128 - index; /* Transform as many times as possible. */ if (len >= part_len) { memcpy(&sctx->buf[index], data, part_len); sha512_transform(sctx->state, sctx->buf); for (i = part_len; i + 127 < len; i+=128) sha512_transform(sctx->state, &data[i]); index = 0; } else { i = 0; } /* Buffer remaining input */ memcpy(&sctx->buf[index], &data[i], len - i); return 0; } static int sha512_final(struct shash_desc *desc, u8 *hash) { struct sha512_state *sctx = shash_desc_ctx(desc); static u8 padding[128] = { 0x80, }; __be64 *dst = (__be64 *)hash; __be64 bits[2]; unsigned int index, pad_len; int i; /* Save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128. */ index = sctx->count[0] & 0x7f; pad_len = (index < 112) ? (112 - index) : ((128+112) - index); sha512_update(desc, padding, pad_len); /* Append length (before padding) */ sha512_update(desc, (const u8 *)bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_be64(sctx->state[i]); /* Zeroize sensitive information. */ memset(sctx, 0, sizeof(struct sha512_state)); return 0; } static int sha384_final(struct shash_desc *desc, u8 *hash) { u8 D[64]; sha512_final(desc, D); memcpy(hash, D, 48); memset(D, 0, 64); return 0; } static struct shash_alg sha512 = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_init, .update = sha512_update, .final = sha512_final, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct shash_alg sha384 = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_init, .update = sha512_update, .final = sha384_final, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha512_generic_mod_init(void) { int ret = 0; if ((ret = crypto_register_shash(&sha384)) < 0) goto out; if ((ret = crypto_register_shash(&sha512)) < 0) crypto_unregister_shash(&sha384); out: return ret; } static void __exit sha512_generic_mod_fini(void) { crypto_unregister_shash(&sha384); crypto_unregister_shash(&sha512); } module_init(sha512_generic_mod_init); module_exit(sha512_generic_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms"); MODULE_ALIAS("sha384"); MODULE_ALIAS("sha512");
gpl-2.0